On Wed, Aug 27, 2025 at 07:39:55PM +0800, Yafang Shao wrote: > On Wed, Aug 27, 2025 at 10:58 AM kernel test robot <lkp@xxxxxxxxx> wrote: > > > > Hi Yafang, > > > > kernel test robot noticed the following build warnings: > > > > [auto build test WARNING on akpm-mm/mm-everything] > > > > url: https://github.com/intel-lab-lkp/linux/commits/Yafang-Shao/mm-thp-add-support-for-BPF-based-THP-order-selection/20250826-152415 > > base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything > > patch link: https://lore.kernel.org/r/20250826071948.2618-2-laoar.shao%40gmail.com > > patch subject: [PATCH v6 mm-new 01/10] mm: thp: add support for BPF based THP order selection > > config: loongarch-randconfig-r113-20250827 (https://download.01.org/0day-ci/archive/20250827/202508271009.5neOZ0OG-lkp@xxxxxxxxx/config) > > compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff) > > reproduce: (https://download.01.org/0day-ci/archive/20250827/202508271009.5neOZ0OG-lkp@xxxxxxxxx/reproduce) > > > > If you fix the issue in a separate patch/commit (i.e. not just a new version of > > the same patch/commit), kindly add following tags > > | Reported-by: kernel test robot <lkp@xxxxxxxxx> > > | Closes: https://lore.kernel.org/oe-kbuild-all/202508271009.5neOZ0OG-lkp@xxxxxxxxx/ > > Thanks for the report . > It seems this sparse warning can be fixed with the below additional > change, would you please test it again? > > diff --git a/mm/bpf_thp.c b/mm/bpf_thp.c > index 46b3bc96359e..b2f97f9e930d 100644 > --- a/mm/bpf_thp.c > +++ b/mm/bpf_thp.c > @@ -5,27 +5,32 @@ > #include <linux/huge_mm.h> > #include <linux/khugepaged.h> > > +/** > + * @get_suggested_order: Get the suggested THP orders for allocation > + * @mm: mm_struct associated with the THP allocation > + * @vma__nullable: vm_area_struct associated with the THP allocation > (may be NULL) > + * When NULL, the decision should be based on @mm (i.e., when > + * triggered from an mm-scope hook rather than a VMA-specific > + * context). > + * Must belong to @mm (guaranteed by the caller). > + * @vma_flags: use these vm_flags instead of @vma->vm_flags (0 if @vma is NULL) > + * @tva_flags: TVA flags for current @vma (-1 if @vma is NULL) > + * @orders: Bitmask of requested THP orders for this allocation > + * - PMD-mapped allocation if PMD_ORDER is set > + * - mTHP allocation otherwise > + * > + * Rerurn: Bitmask of suggested THP orders for allocation. The highest > + * suggested order will not exceed the highest requested order > + * in @orders. > + */ > +typedef int suggested_order_fn_t(struct mm_struct *mm, > + struct vm_area_struct *vma__nullable, > + u64 vma_flags, > + enum tva_type tva_flags, > + int orders); Hm you are doing part of my review here as part of the fix :) I think a respin is in order anyway so can tackle in future version. Not sure the test bot can try out patches though? Not seen that before (nice if it or somebody on other end does though! :) > + > struct bpf_thp_ops { > - /** > - * @get_suggested_order: Get the suggested THP orders for allocation > - * @mm: mm_struct associated with the THP allocation > - * @vma__nullable: vm_area_struct associated with the THP > allocation (may be NULL) > - * When NULL, the decision should be based on > @mm (i.e., when > - * triggered from an mm-scope hook rather than > a VMA-specific > - * context). > - * Must belong to @mm (guaranteed by the caller). > - * @vma_flags: use these vm_flags instead of @vma->vm_flags (0 > if @vma is NULL) > - * @tva_flags: TVA flags for current @vma (-1 if @vma is NULL) > - * @orders: Bitmask of requested THP orders for this allocation > - * - PMD-mapped allocation if PMD_ORDER is set > - * - mTHP allocation otherwise > - * > - * Rerurn: Bitmask of suggested THP orders for allocation. The highest > - * suggested order will not exceed the highest requested order > - * in @orders. > - */ > - int (*get_suggested_order)(struct mm_struct *mm, struct > vm_area_struct *vma__nullable, > - u64 vma_flags, enum tva_type > tva_flags, int orders) __rcu; > + suggested_order_fn_t __rcu *get_suggested_order; > }; > > static struct bpf_thp_ops bpf_thp; > @@ -34,8 +39,7 @@ static DEFINE_SPINLOCK(thp_ops_lock); > int get_suggested_order(struct mm_struct *mm, struct vm_area_struct > *vma__nullable, > u64 vma_flags, enum tva_type tva_flags, int orders) > { > - int (*bpf_suggested_order)(struct mm_struct *mm, struct > vm_area_struct *vma__nullable, > - u64 vma_flags, enum tva_type > tva_flags, int orders); > + suggested_order_fn_t *bpf_suggested_order; > int suggested_orders = orders; > > /* No BPF program is attached */ > @@ -106,10 +110,12 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link) > > static void bpf_thp_unreg(void *kdata, struct bpf_link *link) > { > + suggested_order_fn_t *old_fn; > + > spin_lock(&thp_ops_lock); > clear_bit(TRANSPARENT_HUGEPAGE_BPF_ATTACHED, > &transparent_hugepage_flags); > - WARN_ON_ONCE(!rcu_access_pointer(bpf_thp.get_suggested_order)); > - rcu_replace_pointer(bpf_thp.get_suggested_order, NULL, > lockdep_is_held(&thp_ops_lock)); > + old_fn = rcu_replace_pointer(bpf_thp.get_suggested_order, > NULL, lockdep_is_held(&thp_ops_lock)); > + WARN_ON_ONCE(!old_fn); > spin_unlock(&thp_ops_lock); > > synchronize_rcu(); > @@ -117,8 +123,9 @@ static void bpf_thp_unreg(void *kdata, struct > bpf_link *link) > > static int bpf_thp_update(void *kdata, void *old_kdata, struct bpf_link *link) > { > - struct bpf_thp_ops *ops = kdata; > + suggested_order_fn_t *old_fn, *new_fn; > struct bpf_thp_ops *old = old_kdata; > + struct bpf_thp_ops *ops = kdata; > int ret = 0; > > if (!ops || !old) > @@ -130,9 +137,10 @@ static int bpf_thp_update(void *kdata, void > *old_kdata, struct bpf_link *link) > ret = -ENOENT; > goto out; > } > - WARN_ON_ONCE(!rcu_access_pointer(bpf_thp.get_suggested_order)); > - rcu_replace_pointer(bpf_thp.get_suggested_order, > ops->get_suggested_order, > - lockdep_is_held(&thp_ops_lock)); > + > + new_fn = rcu_dereference(ops->get_suggested_order); > + old_fn = rcu_replace_pointer(bpf_thp.get_suggested_order, > new_fn, lockdep_is_held(&thp_ops_lock)); > + WARN_ON_ONCE(!old_fn || !new_fn); > > out: > spin_unlock(&thp_ops_lock); > @@ -159,7 +167,7 @@ static int suggested_order(struct mm_struct *mm, > struct vm_area_struct *vma__nul > } > > static struct bpf_thp_ops __bpf_thp_ops = { > - .get_suggested_order = suggested_order, > + .get_suggested_order = (suggested_order_fn_t __rcu *)suggested_order, > }; > > static struct bpf_struct_ops bpf_bpf_thp_ops = { > > > -- > Regards > > Yafang