This is so that flag setting can be resused later in other functions, to reduce code duplication (including the s390 exception). No functional change intended with this patch. Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx> --- include/linux/huge_mm.h | 1 + mm/khugepaged.c | 26 +++++++++++++++++--------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2f190c90192d..23580a43787c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -431,6 +431,7 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, __split_huge_pud(__vma, __pud, __address); \ } while (0) +int hugepage_set_vmflags(unsigned long *vm_flags, int advice); int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); int madvise_collapse(struct vm_area_struct *vma, diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b04b6a770afe..ab3427c87422 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -346,8 +346,7 @@ struct attribute_group khugepaged_attr_group = { }; #endif /* CONFIG_SYSFS */ -int hugepage_madvise(struct vm_area_struct *vma, - unsigned long *vm_flags, int advice) +int hugepage_set_vmflags(unsigned long *vm_flags, int advice) { switch (advice) { case MADV_HUGEPAGE: @@ -358,16 +357,10 @@ int hugepage_madvise(struct vm_area_struct *vma, * ignore the madvise to prevent qemu from causing a SIGSEGV. */ if (mm_has_pgste(vma->vm_mm)) - return 0; + return -EPERM; #endif *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; - /* - * If the vma become good for khugepaged to scan, - * register it here without waiting a page fault that - * may not happen any time soon. - */ - khugepaged_enter_vma(vma, *vm_flags); break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; @@ -383,6 +376,21 @@ int hugepage_madvise(struct vm_area_struct *vma, return 0; } +int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + if (advice == MADV_HUGEPAGE && !hugepage_set_vmflags(vm_flags, advice)) { + /* + * If the vma become good for khugepaged to scan, + * register it here without waiting a page fault that + * may not happen any time soon. + */ + khugepaged_enter_vma(vma, *vm_flags); + } + + return 0; +} + int __init khugepaged_init(void) { mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0); -- 2.47.1