> -----Original Message----- > From: Nhat Pham <nphamcs@xxxxxxxxx> > Sent: Friday, July 4, 2025 11:39 AM > To: Sridhar, Kanchana P <kanchana.p.sridhar@xxxxxxxxx> > Cc: linux-kernel@xxxxxxxxxxxxxxx; linux-mm@xxxxxxxxx; > hannes@xxxxxxxxxxx; yosry.ahmed@xxxxxxxxx; chengming.zhou@xxxxxxxxx; > usamaarif642@xxxxxxxxx; ryan.roberts@xxxxxxx; 21cnbao@xxxxxxxxx; > ying.huang@xxxxxxxxxxxxxxxxx; akpm@xxxxxxxxxxxxxxxxxxxx; > senozhatsky@xxxxxxxxxxxx; linux-crypto@xxxxxxxxxxxxxxx; > herbert@xxxxxxxxxxxxxxxxxxx; davem@xxxxxxxxxxxxx; > clabbe@xxxxxxxxxxxx; ardb@xxxxxxxxxx; ebiggers@xxxxxxxxxx; > surenb@xxxxxxxxxx; Accardi, Kristen C <kristen.c.accardi@xxxxxxxxx>; > Gomes, Vinicius <vinicius.gomes@xxxxxxxxx>; Feghali, Wajdi K > <wajdi.k.feghali@xxxxxxxxx>; Gopal, Vinodh <vinodh.gopal@xxxxxxxxx> > Subject: Re: [PATCH v10 20/25] mm: zswap: Move the CPU hotplug > procedures under "pool functions". > > On Thu, Jul 3, 2025 at 9:23 PM Kanchana P Sridhar > <kanchana.p.sridhar@xxxxxxxxx> wrote: > > > > This patch merely moves zswap_cpu_comp_prepare() and > > zswap_cpu_comp_dead() to be in the "pool functions" section because > > these functions are invoked upon pool creation/deletion. > > Hmm idk, "compressed storage" section seems fitting for > zswap_cpu_comp_prepare() and zswap_cpu_comp_dead(). > > Is this patch necessary? Not too sure. I was moving it to be in "pool functions" because the next patch #21 makes the association between the acomp_ctx resources' lifetime to be from pool creation to deletion, and also does not register a teardown callback. I can move the zswap_cpu_comp_prepare() and modifications back to "compressed storage" if you think that's where it belongs. Thanks, Kanchana > > > > > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx> > > --- > > mm/zswap.c | 188 ++++++++++++++++++++++++++--------------------------- > > 1 file changed, 94 insertions(+), 94 deletions(-) > > > > diff --git a/mm/zswap.c b/mm/zswap.c > > index 3c0fd8a137182..3538ecaed5e16 100644 > > --- a/mm/zswap.c > > +++ b/mm/zswap.c > > @@ -248,6 +248,100 @@ static inline struct xarray > *swap_zswap_tree(swp_entry_t swp) > > **********************************/ > > static void __zswap_pool_empty(struct percpu_ref *ref); > > > > +static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node > *node) > > +{ > > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool- > >acomp_ctx, cpu); > > + struct crypto_acomp *acomp = NULL; > > + struct acomp_req *req = NULL; > > + u8 *buffer = NULL; > > + int ret; > > + > > + buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, > cpu_to_node(cpu)); > > + if (!buffer) { > > + ret = -ENOMEM; > > + goto fail; > > + } > > + > > + acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, > cpu_to_node(cpu)); > > + if (IS_ERR(acomp)) { > > + pr_err("could not alloc crypto acomp %s : %ld\n", > > + pool->tfm_name, PTR_ERR(acomp)); > > + ret = PTR_ERR(acomp); > > + goto fail; > > + } > > + > > + req = acomp_request_alloc(acomp); > > + if (!req) { > > + pr_err("could not alloc crypto acomp_request %s\n", > > + pool->tfm_name); > > + ret = -ENOMEM; > > + goto fail; > > + } > > + > > + /* > > + * Only hold the mutex after completing allocations, otherwise we may > > + * recurse into zswap through reclaim and attempt to hold the mutex > > + * again resulting in a deadlock. > > + */ > > + mutex_lock(&acomp_ctx->mutex); > > + crypto_init_wait(&acomp_ctx->wait); > > + > > + /* > > + * if the backend of acomp is async zip, crypto_req_done() will wakeup > > + * crypto_wait_req(); if the backend of acomp is scomp, the callback > > + * won't be called, crypto_wait_req() will return without blocking. > > + */ > > + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > > + crypto_req_done, &acomp_ctx->wait); > > + > > + acomp_ctx->buffer = buffer; > > + acomp_ctx->acomp = acomp; > > + acomp_ctx->is_sleepable = acomp_is_async(acomp); > > + acomp_ctx->req = req; > > + mutex_unlock(&acomp_ctx->mutex); > > + return 0; > > + > > +fail: > > + if (acomp) > > + crypto_free_acomp(acomp); > > + kfree(buffer); > > + return ret; > > +} > > + > > +static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node > *node) > > +{ > > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool- > >acomp_ctx, cpu); > > + struct acomp_req *req; > > + struct crypto_acomp *acomp; > > + u8 *buffer; > > + > > + if (IS_ERR_OR_NULL(acomp_ctx)) > > + return 0; > > + > > + mutex_lock(&acomp_ctx->mutex); > > + req = acomp_ctx->req; > > + acomp = acomp_ctx->acomp; > > + buffer = acomp_ctx->buffer; > > + acomp_ctx->req = NULL; > > + acomp_ctx->acomp = NULL; > > + acomp_ctx->buffer = NULL; > > + mutex_unlock(&acomp_ctx->mutex); > > + > > + /* > > + * Do the actual freeing after releasing the mutex to avoid subtle > > + * locking dependencies causing deadlocks. > > + */ > > + if (!IS_ERR_OR_NULL(req)) > > + acomp_request_free(req); > > + if (!IS_ERR_OR_NULL(acomp)) > > + crypto_free_acomp(acomp); > > + kfree(buffer); > > + > > + return 0; > > +} > > + > > static struct zswap_pool *zswap_pool_create(char *type, char > *compressor) > > { > > struct zswap_pool *pool; > > @@ -818,100 +912,6 @@ static void zswap_entry_free(struct zswap_entry > *entry) > > /********************************* > > * compressed storage functions > > **********************************/ > > -static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node > *node) > > -{ > > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, > cpu); > > - struct crypto_acomp *acomp = NULL; > > - struct acomp_req *req = NULL; > > - u8 *buffer = NULL; > > - int ret; > > - > > - buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, > cpu_to_node(cpu)); > > - if (!buffer) { > > - ret = -ENOMEM; > > - goto fail; > > - } > > - > > - acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, > cpu_to_node(cpu)); > > - if (IS_ERR(acomp)) { > > - pr_err("could not alloc crypto acomp %s : %ld\n", > > - pool->tfm_name, PTR_ERR(acomp)); > > - ret = PTR_ERR(acomp); > > - goto fail; > > - } > > - > > - req = acomp_request_alloc(acomp); > > - if (!req) { > > - pr_err("could not alloc crypto acomp_request %s\n", > > - pool->tfm_name); > > - ret = -ENOMEM; > > - goto fail; > > - } > > - > > - /* > > - * Only hold the mutex after completing allocations, otherwise we may > > - * recurse into zswap through reclaim and attempt to hold the mutex > > - * again resulting in a deadlock. > > - */ > > - mutex_lock(&acomp_ctx->mutex); > > - crypto_init_wait(&acomp_ctx->wait); > > - > > - /* > > - * if the backend of acomp is async zip, crypto_req_done() will wakeup > > - * crypto_wait_req(); if the backend of acomp is scomp, the callback > > - * won't be called, crypto_wait_req() will return without blocking. > > - */ > > - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > > - crypto_req_done, &acomp_ctx->wait); > > - > > - acomp_ctx->buffer = buffer; > > - acomp_ctx->acomp = acomp; > > - acomp_ctx->is_sleepable = acomp_is_async(acomp); > > - acomp_ctx->req = req; > > - mutex_unlock(&acomp_ctx->mutex); > > - return 0; > > - > > -fail: > > - if (acomp) > > - crypto_free_acomp(acomp); > > - kfree(buffer); > > - return ret; > > -} > > - > > -static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node > *node) > > -{ > > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, > cpu); > > - struct acomp_req *req; > > - struct crypto_acomp *acomp; > > - u8 *buffer; > > - > > - if (IS_ERR_OR_NULL(acomp_ctx)) > > - return 0; > > - > > - mutex_lock(&acomp_ctx->mutex); > > - req = acomp_ctx->req; > > - acomp = acomp_ctx->acomp; > > - buffer = acomp_ctx->buffer; > > - acomp_ctx->req = NULL; > > - acomp_ctx->acomp = NULL; > > - acomp_ctx->buffer = NULL; > > - mutex_unlock(&acomp_ctx->mutex); > > - > > - /* > > - * Do the actual freeing after releasing the mutex to avoid subtle > > - * locking dependencies causing deadlocks. > > - */ > > - if (!IS_ERR_OR_NULL(req)) > > - acomp_request_free(req); > > - if (!IS_ERR_OR_NULL(acomp)) > > - crypto_free_acomp(acomp); > > - kfree(buffer); > > - > > - return 0; > > -} > > - > > static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct > zswap_pool *pool) > > { > > struct crypto_acomp_ctx *acomp_ctx; > > -- > > 2.27.0 > >