On Mon, Jul 07, 2025 at 04:23:15PM +0200, Pankaj Raghav (Samsung) wrote: > From: Pankaj Raghav <p.raghav@xxxxxxxxxxx> > > Move the declaration associated with huge_zero_page from huge_mm.h to > mm.h. This patch is in preparation for adding static PMD zero page as we > will be reusing some of the huge_zero_page infrastructure. Hmm this is really iffy. The whole purpose of huge_mm.h is to handle huge page stuff, and now you're moving it to a general header... not a fan of this - now we have _some_ huge stuff in mm.h and some stuff here. Yes this might be something we screwed up already, but that's not a recent to perpetuate mistakes. Surely you don't _need_ to do this and this is a question of fixing up header includes right? Or is them some horrible cyclical header issue here? Also your commit message doesn't give any reason as to why you _need_ to do this also. For something like this where you're doing something that at face value seems to contradict the purpose of these headers, you need to explain why. > > No functional changes. > > Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> > --- > include/linux/huge_mm.h | 31 ------------------------------- > include/linux/mm.h | 34 ++++++++++++++++++++++++++++++++++ > 2 files changed, 34 insertions(+), 31 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index 2f190c90192d..3e887374892c 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -478,22 +478,6 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, > > vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); > > -extern struct folio *huge_zero_folio; > -extern unsigned long huge_zero_pfn; > - > -static inline bool is_huge_zero_folio(const struct folio *folio) > -{ > - return READ_ONCE(huge_zero_folio) == folio; > -} > - > -static inline bool is_huge_zero_pmd(pmd_t pmd) > -{ > - return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); > -} > - > -struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); > -void mm_put_huge_zero_folio(struct mm_struct *mm); > - > static inline bool thp_migration_supported(void) > { > return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); > @@ -631,21 +615,6 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) > return 0; > } > > -static inline bool is_huge_zero_folio(const struct folio *folio) > -{ > - return false; > -} > - > -static inline bool is_huge_zero_pmd(pmd_t pmd) > -{ > - return false; > -} > - > -static inline void mm_put_huge_zero_folio(struct mm_struct *mm) > -{ > - return; > -} > - > static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, > unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) > { > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 0ef2ba0c667a..c8fbeaacf896 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -4018,6 +4018,40 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma) > > #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > +extern struct folio *huge_zero_folio; > +extern unsigned long huge_zero_pfn; > + > +static inline bool is_huge_zero_folio(const struct folio *folio) > +{ > + return READ_ONCE(huge_zero_folio) == folio; > +} > + > +static inline bool is_huge_zero_pmd(pmd_t pmd) > +{ > + return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); > +} > + > +struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); > +void mm_put_huge_zero_folio(struct mm_struct *mm); > + > +#else > +static inline bool is_huge_zero_folio(const struct folio *folio) > +{ > + return false; > +} > + > +static inline bool is_huge_zero_pmd(pmd_t pmd) > +{ > + return false; > +} > + > +static inline void mm_put_huge_zero_folio(struct mm_struct *mm) > +{ > + return; > +} > +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ > + > #if MAX_NUMNODES > 1 > void __init setup_nr_node_ids(void); > #else > -- > 2.49.0 >