On 2025-07-08 18:53:03 [-0700], Alexei Starovoitov wrote: > @@ -4555,6 +4707,53 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, > discard_slab(s, slab); > } > > +static DEFINE_PER_CPU(struct llist_head, defer_free_objects); > +static DEFINE_PER_CPU(struct irq_work, defer_free_work); static DEFINE_PER_CPU(struct llist_head, defer_free_objects) = LLIST_HEAD_INIT(defer_free_objects); static DEFINE_PER_CPU(struct irq_work, defer_free_work) = IRQ_WORK_INIT(free_deferred_objects); would allow you to avoid init_defer_work(). > +static void free_deferred_objects(struct irq_work *work) > +{ > + struct llist_head *llhead = this_cpu_ptr(&defer_free_objects); > + struct llist_node *llnode, *pos, *t; … > +} > + > +static int __init init_defer_work(void) > +{ > + int cpu; > + > + for_each_possible_cpu(cpu) { > + init_llist_head(per_cpu_ptr(&defer_free_objects, cpu)); > + init_irq_work(per_cpu_ptr(&defer_free_work, cpu), > + free_deferred_objects); > + } > + return 0; > +} > +late_initcall(init_defer_work); > + > +static void defer_free(void *head) > +{ > + if (llist_add(head, this_cpu_ptr(&defer_free_objects))) > + irq_work_queue(this_cpu_ptr(&defer_free_work)); If you group &defer_free_objects and &defer_free_work into a struct you could avoid using this_cpu_ptr twice. Having both in one struct would allow to use container_of() in free_deferred_objects() to get the free list. > +} … > @@ -4844,6 +5064,62 @@ void kfree(const void *object) > } > EXPORT_SYMBOL(kfree); > > +/* > + * Can be called while holding raw_spin_lock or from IRQ and NMI, raw_spinlock_t > + * but only for objects allocated by kmalloc_nolock(), > + * since some debug checks (like kmemleak and kfence) were > + * skipped on allocation. large_kmalloc is not supported either. > + */ > +void kfree_nolock(const void *object) > +{ … Sebastian