On Tue, Aug 19, 2025 at 8:32 PM Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote: > > On Tue, Aug 19, 2025 at 09:58:31AM +0800, Menglong Dong wrote: > > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index be00629f0ba4..00383fed9f63 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -119,6 +119,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); > > EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); > > > > DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); > > +EXPORT_SYMBOL_GPL(runqueues); > > Oh no, absolutely not. > > You never, ever, export a variable, and certainly not this one. > > How about something like so? > > I tried 'clever' things with export inline, but the compiler hates me, > so the below is the best I could make work. I see. You mean that we don't export the various, and use the inlined version in vmlinux, and use the external version in modules, which I think is nice ;) (I were not aware that we should export various :/) I'll try your advice. Thanks! Menglong Dong > > --- > --- a/include/linux/sched.h > +++ b/include/linux/sched.h > @@ -2315,6 +2315,7 @@ static __always_inline void alloc_tag_re > #define alloc_tag_restore(_tag, _old) do {} while (0) > #endif > > +#ifndef MODULE > #ifndef COMPILE_OFFSETS > > extern void __migrate_enable(void); > @@ -2328,7 +2329,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq > #define this_rq_raw() PERCPU_PTR(&runqueues) > #endif > > -static inline void migrate_enable(void) > +static inline void _migrate_enable(void) > { > struct task_struct *p = current; > > @@ -2363,7 +2364,7 @@ static inline void migrate_enable(void) > (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))--; > } > > -static inline void migrate_disable(void) > +static inline void _migrate_disable(void) > { > struct task_struct *p = current; > > @@ -2382,10 +2383,30 @@ static inline void migrate_disable(void) > (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))++; > p->migration_disabled = 1; > } > -#else > -static inline void migrate_disable(void) { } > -static inline void migrate_enable(void) { } > -#endif > +#else /* !COMPILE_OFFSETS */ > +static inline void _migrate_disable(void) { } > +static inline void _migrate_enable(void) { } > +#endif /* !COMPILE_OFFSETS */ > + > +#ifndef CREATE_MIGRATE_DISABLE > +static inline void migrate_disable(void) > +{ > + _migrate_disable(); > +} > + > +static inline void migrate_enable(void) > +{ > + _migrate_enable(); > +} > +#else /* CREATE_MIGRATE_DISABLE */ > +extern void migrate_disable(void); > +extern void migrate_enable(void); > +#endif /* CREATE_MIGRATE_DISABLE */ > + > +#else /* !MODULE */ > +extern void migrate_disable(void); > +extern void migrate_enable(void); > +#endif /* !MODULE */ > > DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -7,6 +7,9 @@ > * Copyright (C) 1991-2002 Linus Torvalds > * Copyright (C) 1998-2024 Ingo Molnar, Red Hat > */ > +#define CREATE_MIGRATE_DISABLE > +#include <linux/sched.h> > + > #include <linux/highmem.h> > #include <linux/hrtimer_api.h> > #include <linux/ktime_api.h> > @@ -119,7 +122,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat > EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); > > DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); > -EXPORT_SYMBOL_GPL(runqueues); > > #ifdef CONFIG_SCHED_PROXY_EXEC > DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec); > @@ -2382,6 +2384,11 @@ static void migrate_disable_switch(struc > __do_set_cpus_allowed(p, &ac); > } > > +void migrate_disable(void) > +{ > + _migrate_disable(); > +} > + > void __migrate_enable(void) > { > struct task_struct *p = current; > @@ -2392,7 +2399,11 @@ void __migrate_enable(void) > > __set_cpus_allowed_ptr(p, &ac); > } > -EXPORT_SYMBOL_GPL(__migrate_enable); > + > +void migrate_enable(void) > +{ > + _migrate_enable(); > +} > > static inline bool rq_has_pinned_tasks(struct rq *rq) > {