Add necessary infrastructure to enable the nmi-safe execution of css_rstat_updated(). Currently css_rstat_updated() takes a per-cpu per-css raw spinlock to add the given css in the per-cpu per-css update tree. However the kernel can not spin in nmi context, so we need to replace spinning on the raw spinlock with the trylock and on failure, add the given css to the per-cpu backlog which will be processed when the context that can spin on raw spinlock can run. For now, this patch just adds necessary data structures in the css and ss structures. Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx> --- include/linux/cgroup-defs.h | 4 ++++ kernel/cgroup/rstat.c | 13 +++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 560582c4dbeb..f7b680f853ea 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -370,6 +370,9 @@ struct css_rstat_cpu { */ struct cgroup_subsys_state *updated_children; /* terminated by self cgroup */ struct cgroup_subsys_state *updated_next; /* NULL iff not on the list */ + + struct llist_node lnode; /* lockless backlog node */ + struct cgroup_subsys_state *owner; /* back pointer */ }; /* @@ -800,6 +803,7 @@ struct cgroup_subsys { spinlock_t rstat_ss_lock; raw_spinlock_t __percpu *rstat_ss_cpu_lock; + struct llist_head __percpu *lhead; /* lockless backlog list */ }; extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index a30bcc4d4f48..d3092b4c85d7 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -419,7 +419,8 @@ int css_rstat_init(struct cgroup_subsys_state *css) for_each_possible_cpu(cpu) { struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu); - rstatc->updated_children = css; + rstatc->owner = rstatc->updated_children = css; + init_llist_node(&rstatc->lnode); if (css_is_cgroup(css)) { struct cgroup_rstat_base_cpu *rstatbc; @@ -484,8 +485,16 @@ int __init ss_rstat_init(struct cgroup_subsys *ss) if (!ss->rstat_ss_cpu_lock) return -ENOMEM; - for_each_possible_cpu(cpu) + ss->lhead = alloc_percpu(struct llist_head); + if (!ss->lhead) { + free_percpu(ss->rstat_ss_cpu_lock); + return -ENOMEM; + } + + for_each_possible_cpu(cpu) { raw_spin_lock_init(per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu)); + init_llist_head(per_cpu_ptr(ss->lhead, cpu)); + } return 0; } -- 2.47.1