commit 8fc2347fb3ff4a3fc7929c70a5a21e1128935d4a
Author: Pavel Begunkov <asml.silence@xxxxxxxxx>
Date: Sat Jul 12 14:29:52 2025 +0100
net/mm: use PGTY for tracking page pool pages
Currently, we use page->pp_magic to determine whether a page belongs to
a page pool. It's not ideal as the field is aliased with other page
types, and thus needs to to rely on elaborated rules to work. Add a new
page type for page pool.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0ef2ba0c667a..975a013f1f17 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4175,7 +4175,7 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#ifdef CONFIG_PAGE_POOL
static inline bool page_pool_page_is_pp(struct page *page)
{
- return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
+ return PageNetpp(page);
}
#else
static inline bool page_pool_page_is_pp(struct page *page)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4fe5ee67535b..9bd1dfded2fc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -957,6 +957,7 @@ enum pagetype {
PGTY_zsmalloc = 0xf6,
PGTY_unaccepted = 0xf7,
PGTY_large_kmalloc = 0xf8,
+ PGTY_netpp = 0xf9,
PGTY_mapcount_underflow = 0xff
};
@@ -1101,6 +1102,11 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
+/*
+ * Marks page_pool allocated pages
+ */
+PAGE_TYPE_OPS(Netpp, netpp, netpp)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
diff --git a/include/net/netmem.h b/include/net/netmem.h
index de1d95f04076..20f5dbb08149 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -113,6 +113,8 @@ static inline bool netmem_is_net_iov(const netmem_ref netmem)
*/
static inline struct page *__netmem_to_page(netmem_ref netmem)
{
+ DEBUG_NET_WARN_ON_ONCE(netmem_is_net_iov(netmem));
+
return (__force struct page *)netmem;
}
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
index cd95394399b4..e38c64da1a78 100644
--- a/net/core/netmem_priv.h
+++ b/net/core/netmem_priv.h
@@ -13,16 +13,11 @@ static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
__netmem_clear_lsb(netmem)->pp_magic |= pp_magic;
}
-static inline void netmem_clear_pp_magic(netmem_ref netmem)
-{
- WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK);
-
- __netmem_clear_lsb(netmem)->pp_magic = 0;
-}
-
static inline bool netmem_is_pp(netmem_ref netmem)
{
- return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE;
+ if (netmem_is_net_iov(netmem))
+ return true;
+ return page_pool_page_is_pp(netmem_to_page(netmem));
}
static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 05e2e22a8f7c..52120e2912a6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -371,6 +371,13 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
}
EXPORT_SYMBOL(page_pool_create);
+static void page_pool_set_page_pp_info(struct page_pool *pool,
+ struct page *page)
+{
+ __SetPageNetpp(page);
+ page_pool_set_pp_info(page_to_netmem(page));
+}
+
static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
@@ -534,7 +541,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
}
alloc_stat_inc(pool, slow_high_order);
- page_pool_set_pp_info(pool, page_to_netmem(page));
+ page_pool_set_page_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -579,7 +586,7 @@ static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool
continue;
}
- page_pool_set_pp_info(pool, netmem);
+ page_pool_set_page_pp_info(pool, __netmem_to_page(netmem));
pool->alloc.cache[pool->alloc.count++] = netmem;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -654,7 +661,6 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
{
netmem_set_pp(netmem, pool);
- netmem_or_pp_magic(netmem, PP_SIGNATURE);
/* Ensuring all pages have been split into one fragment initially:
* page_pool_set_pp_info() is only called once for every page when it
@@ -669,7 +675,6 @@ void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
void page_pool_clear_pp_info(netmem_ref netmem)
{
- netmem_clear_pp_magic(netmem);
netmem_set_pp(netmem, NULL);
}
@@ -730,8 +735,11 @@ static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
trace_page_pool_state_release(pool, netmem, count);
if (put) {
+ struct page *page = netmem_to_page(netmem);
+
page_pool_clear_pp_info(netmem);
- put_page(netmem_to_page(netmem));
+ __ClearPageNetpp(page);
+ put_page(page);
}
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a