Currently, THP allocation cannot be restricted to khugepaged alone while being disabled in the page fault path. This limitation exists because disabling THP allocation during page faults also prevents the execution of khugepaged_enter_vma() in that path. With the introduction of BPF, we can now implement THP policies based on different TVA types. This patch adjusts the logic to support this new capability. While we could also extend prtcl() to utilize this new policy, such a change would require a uAPI modification. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- mm/huge_memory.c | 1 - mm/memory.c | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 523153d21a41..1e9e7b32e2cf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1346,7 +1346,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) ret = vmf_anon_prepare(vmf); if (ret) return ret; - khugepaged_enter_vma(vma, vma->vm_flags); if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && diff --git a/mm/memory.c b/mm/memory.c index d8819cac7930..d0609dc1e371 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6289,11 +6289,14 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; - if (pmd_none(*vmf.pmd) && - thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { - ret = create_huge_pmd(&vmf); - if (!(ret & VM_FAULT_FALLBACK)) - return ret; + if (pmd_none(*vmf.pmd)) { + if (vma_is_anonymous(vma)) + khugepaged_enter_vma(vma, vm_flags); + if (thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { + ret = create_huge_pmd(&vmf); + if (!(ret & VM_FAULT_FALLBACK)) + return ret; + } } else { vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); -- 2.47.3