Re: [PATCH net-next 2/9] page_pool: rename page_pool_return_page() to page_pool_return_netmem()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 9 Jun 2025 at 07:32, Byungchul Park <byungchul@xxxxxx> wrote:
>
> Now that page_pool_return_page() is for returning netmem, not struct
> page, rename it to page_pool_return_netmem() to reflect what it does.
>
> Signed-off-by: Byungchul Park <byungchul@xxxxxx>
> Reviewed-by: Mina Almasry <almasrymina@xxxxxxxxxx>
> Reviewed-by: Toke Høiland-Jørgensen <toke@xxxxxxxxxx>
> Reviewed-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
> ---

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@xxxxxxxxxx>

>  net/core/page_pool.c | 22 +++++++++++-----------
>  1 file changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 4011eb305cee..460d11a31fbc 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -371,7 +371,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
>  }
>  EXPORT_SYMBOL(page_pool_create);
>
> -static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
> +static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
>
>  static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
>  {
> @@ -409,7 +409,7 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
>                          * (2) break out to fallthrough to alloc_pages_node.
>                          * This limit stress on page buddy alloactor.
>                          */
> -                       page_pool_return_page(pool, netmem);
> +                       page_pool_return_netmem(pool, netmem);
>                         alloc_stat_inc(pool, waive);
>                         netmem = 0;
>                         break;
> @@ -712,7 +712,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
>   * a regular page (that will eventually be returned to the normal
>   * page-allocator via put_page).
>   */
> -void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
> +static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
>  {
>         int count;
>         bool put;
> @@ -829,7 +829,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
>          * will be invoking put_page.
>          */
>         recycle_stat_inc(pool, released_refcnt);
> -       page_pool_return_page(pool, netmem);
> +       page_pool_return_netmem(pool, netmem);
>
>         return 0;
>  }
> @@ -872,7 +872,7 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
>         if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
>                 /* Cache full, fallback to free pages */
>                 recycle_stat_inc(pool, ring_full);
> -               page_pool_return_page(pool, netmem);
> +               page_pool_return_netmem(pool, netmem);
>         }
>  }
>  EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
> @@ -915,7 +915,7 @@ static void page_pool_recycle_ring_bulk(struct page_pool *pool,
>          * since put_page() with refcnt == 1 can be an expensive operation.
>          */
>         for (; i < bulk_len; i++)
> -               page_pool_return_page(pool, bulk[i]);
> +               page_pool_return_netmem(pool, bulk[i]);
>  }
>
>  /**
> @@ -998,7 +998,7 @@ static netmem_ref page_pool_drain_frag(struct page_pool *pool,
>                 return netmem;
>         }
>
> -       page_pool_return_page(pool, netmem);
> +       page_pool_return_netmem(pool, netmem);
>         return 0;
>  }
>
> @@ -1012,7 +1012,7 @@ static void page_pool_free_frag(struct page_pool *pool)
>         if (!netmem || page_pool_unref_netmem(netmem, drain_count))
>                 return;
>
> -       page_pool_return_page(pool, netmem);
> +       page_pool_return_netmem(pool, netmem);
>  }
>
>  netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
> @@ -1079,7 +1079,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
>                         pr_crit("%s() page_pool refcnt %d violation\n",
>                                 __func__, netmem_ref_count(netmem));
>
> -               page_pool_return_page(pool, netmem);
> +               page_pool_return_netmem(pool, netmem);
>         }
>  }
>
> @@ -1112,7 +1112,7 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
>          */
>         while (pool->alloc.count) {
>                 netmem = pool->alloc.cache[--pool->alloc.count];
> -               page_pool_return_page(pool, netmem);
> +               page_pool_return_netmem(pool, netmem);
>         }
>  }
>
> @@ -1252,7 +1252,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
>         /* Flush pool alloc cache, as refill will check NUMA node */
>         while (pool->alloc.count) {
>                 netmem = pool->alloc.cache[--pool->alloc.count];
> -               page_pool_return_page(pool, netmem);
> +               page_pool_return_netmem(pool, netmem);
>         }
>  }
>  EXPORT_SYMBOL(page_pool_update_nid);
> --
> 2.17.1
>





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux