On Fri 05-09-25 13:16:06, Shakeel Butt wrote: > Generally memcg charging is allowed from all the contexts including NMI > where even spinning on spinlock can cause locking issues. However one > call chain was missed during the addition of memcg charging from any > context support. That is try_charge_memcg() -> memcg_memory_event() -> > cgroup_file_notify(). > > The possible function call tree under cgroup_file_notify() can acquire > many different spin locks in spinning mode. Some of them are > cgroup_file_kn_lock, kernfs_notify_lock, pool_workqeue's lock. So, let's > just skip cgroup_file_notify() from memcg charging if the context does > not allow spinning. > > Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> > --- > include/linux/memcontrol.h | 23 ++++++++++++++++------- > mm/memcontrol.c | 7 ++++--- > 2 files changed, 20 insertions(+), 10 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 9dc5b52672a6..054fa34c936a 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -993,22 +993,25 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, > count_memcg_events_mm(mm, idx, 1); > } > > -static inline void memcg_memory_event(struct mem_cgroup *memcg, > - enum memcg_memory_event event) > +static inline void __memcg_memory_event(struct mem_cgroup *memcg, > + enum memcg_memory_event event, > + bool allow_spinning) > { > bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || > event == MEMCG_SWAP_FAIL; > > atomic_long_inc(&memcg->memory_events_local[event]); Doesn't this involve locking on 32b? I guess we do not care all that much but we might want to bail out early on those arches for !allow_spinning > - if (!swap_event) > + if (!swap_event && allow_spinning) > cgroup_file_notify(&memcg->events_local_file); > > do { > atomic_long_inc(&memcg->memory_events[event]); > - if (swap_event) > - cgroup_file_notify(&memcg->swap_events_file); > - else > - cgroup_file_notify(&memcg->events_file); > + if (allow_spinning) { > + if (swap_event) > + cgroup_file_notify(&memcg->swap_events_file); > + else > + cgroup_file_notify(&memcg->events_file); > + } > > if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) > break; > @@ -1018,6 +1021,12 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, > !mem_cgroup_is_root(memcg)); > } > > +static inline void memcg_memory_event(struct mem_cgroup *memcg, > + enum memcg_memory_event event) > +{ > + __memcg_memory_event(memcg, event, true); > +} > + > static inline void memcg_memory_event_mm(struct mm_struct *mm, > enum memcg_memory_event event) > { > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 257d2c76b730..dd5cd9d352f3 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -2306,12 +2306,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, > bool drained = false; > bool raised_max_event = false; > unsigned long pflags; > + bool allow_spinning = gfpflags_allow_spinning(gfp_mask); > > retry: > if (consume_stock(memcg, nr_pages)) > return 0; > > - if (!gfpflags_allow_spinning(gfp_mask)) > + if (!allow_spinning) > /* Avoid the refill and flush of the older stock */ > batch = nr_pages; > > @@ -2347,7 +2348,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, > if (!gfpflags_allow_blocking(gfp_mask)) > goto nomem; > > - memcg_memory_event(mem_over_limit, MEMCG_MAX); > + __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); > raised_max_event = true; > > psi_memstall_enter(&pflags); > @@ -2414,7 +2415,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, > * a MEMCG_MAX event. > */ > if (!raised_max_event) > - memcg_memory_event(mem_over_limit, MEMCG_MAX); > + __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); > > /* > * The allocation either can't fail or will lead to more memory > -- > 2.47.3 > -- Michal Hocko SUSE Labs