This function is pretty handy for any type of VMA to provide a size-aligned VMA address when mmap(). Rename the function and export it. About the rename: - Dropping "THP" because it doesn't really have much to do with THP internally. - The suffix "_aligned" imply it is a helper to generate aligned virtual address based on what is specified (which can be not PMD_SIZE). Cc: Zi Yan <ziy@xxxxxxxxxx> Cc: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Cc: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> Cc: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Cc: Dev Jain <dev.jain@xxxxxxx> Cc: Barry Song <baohua@xxxxxxxxxx> Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> --- include/linux/huge_mm.h | 14 +++++++++++++- mm/huge_memory.c | 6 ++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2f190c90192d..706488d92bb6 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -339,7 +339,10 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); - +unsigned long mm_get_unmapped_area_aligned(struct file *filp, + unsigned long addr, unsigned long len, + loff_t off, unsigned long flags, unsigned long size, + vm_flags_t vm_flags); bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); @@ -543,6 +546,15 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, return 0; } +static inline unsigned long +mm_get_unmapped_area_aligned(struct file *filp, + unsigned long addr, unsigned long len, + loff_t off, unsigned long flags, unsigned long size, + vm_flags_t vm_flags) +{ + return 0; +} + static inline bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4734de1dc0ae..52f13a70562f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1088,7 +1088,7 @@ static inline bool is_transparent_hugepage(const struct folio *folio) folio_test_large_rmappable(folio); } -static unsigned long __thp_get_unmapped_area(struct file *filp, +unsigned long mm_get_unmapped_area_aligned(struct file *filp, unsigned long addr, unsigned long len, loff_t off, unsigned long flags, unsigned long size, vm_flags_t vm_flags) @@ -1132,6 +1132,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, ret += off_sub; return ret; } +EXPORT_SYMBOL_GPL(mm_get_unmapped_area_aligned); unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, @@ -1140,7 +1141,8 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; - ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); + ret = mm_get_unmapped_area_aligned(filp, addr, len, off, flags, + PMD_SIZE, vm_flags); if (ret) return ret; -- 2.49.0