A new hook bpf_thp_allocator() is added to determine if the THP is allocated by khugepaged or by the current task. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/huge_mm.h | 10 ++++++++++ mm/khugepaged.c | 2 ++ 2 files changed, 12 insertions(+) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2f190c90192d..db2eadd3f65b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -190,6 +190,14 @@ static inline bool hugepage_global_always(void) (1<<TRANSPARENT_HUGEPAGE_FLAG); } +#define THP_ALLOC_KHUGEPAGED (1 << 1) +#define THP_ALLOC_CURRENT (1 << 2) +static inline int bpf_thp_allocator(unsigned long vm_flags, + unsigned long tva_flags) +{ + return THP_ALLOC_KHUGEPAGED | THP_ALLOC_CURRENT; +} + static inline int highest_order(unsigned long orders) { return fls_long(orders) - 1; @@ -290,6 +298,8 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) { unsigned long mask = READ_ONCE(huge_anon_orders_always); + if (!(bpf_thp_allocator(vm_flags, tva_flags) & THP_ALLOC_CURRENT)) + return 0; if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_anon_orders_madvise); if (hugepage_global_always() || diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 79e208999ddb..18f800fe7335 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -472,6 +472,8 @@ void __khugepaged_enter(struct mm_struct *mm) void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { + if (!(bpf_thp_allocator(vm_flags, 0) & THP_ALLOC_KHUGEPAGED)) + return; if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && hugepage_pmd_enabled()) { if (__thp_vma_allowable_orders(vma, vm_flags, TVA_ENFORCE_SYSFS, -- 2.43.5