Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx> --- mm/slab.h | 1 + mm/slub.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/mm/slab.h b/mm/slab.h index 65f4616b41de..165737accb20 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -262,6 +262,7 @@ struct kmem_cache_order_objects { struct kmem_cache { #ifndef CONFIG_SLUB_TINY struct kmem_cache_cpu __percpu *cpu_slab; + struct lock_class_key lock_key; #endif /* Used for retrieving partial slabs, etc. */ slab_flags_t flags; diff --git a/mm/slub.c b/mm/slub.c index 2f30b85fbf68..ca7f6a3d5db4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3080,9 +3080,13 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) int cpu; struct kmem_cache_cpu *c; + if (!init_section_contains(s, 1)) + /* register lockdep key for non-boot kmem caches */ + lockdep_register_key(&s->lock_key); for_each_possible_cpu(cpu) { c = per_cpu_ptr(s->cpu_slab, cpu); local_trylock_init(&c->lock); + lockdep_set_class(&c->lock, &s->lock_key); c->tid = init_tid(cpu); } } @@ -5953,6 +5957,7 @@ void __kmem_cache_release(struct kmem_cache *s) { cache_random_seq_destroy(s); #ifndef CONFIG_SLUB_TINY + lockdep_unregister_key(&s->lock_key); free_percpu(s->cpu_slab); #endif free_kmem_cache_nodes(s); -- 2.47.1