page_pool::p is driver-defined params, copied directly from the
structure passed via page_pool_create(). The structure isn't meant
to be modified by the Page Pool core code and this even might look
confusing[0][1].
In order to be able to alter some flags, let's define our own, internal
fields. Use the slot freed earlier to stay within the same cacheline as
before (or almost if it's shorter than 64 bytes).
The flag indicating whether to perform DMA mapping can be bool; as for
DMA sync, define it as an enum to be able to extend it later on. They
are defined as bits in the driver-set params, leave them so here as
well, to not waste byte-per-bit or so. Now there are 29 free bits left
in those 4 bytes + 4 free bytes more before the cacheline boundary.
We could've defined only new flags here or only the ones we may need
to alter, but checking some flags in one place while others in another
doesn't sound convenient or intuitive.
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Link[0]: https://lore.kernel.org/netdev/20230703133207.4f0c54ce@kernel.org
Suggested-by: Alexander Duyck <alexanderduyck@fb.com>
Link[1]: https://lore.kernel.org/netdev/CAKgT0UfZCGnWgOH96E4GV3ZP6LLbROHM7SHE8NKwq+exX+Gk_Q@mail.gmail.com
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
include/net/page_pool.h | 7 ++++++-
net/core/page_pool.c | 26 ++++++++++++++------------
2 files changed, 20 insertions(+), 13 deletions(-)
@@ -129,7 +129,12 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
struct page_pool {
struct page_pool_params p;
- long pad;
+
+ bool dma_map:1; /* Perform DMA mapping */
+ enum {
+ PP_DMA_SYNC_ACT_DISABLED = 0, /* Driver didn't ask to sync */
+ PP_DMA_SYNC_ACT_DO, /* Perform DMA sync ops */
+ } dma_sync_act:2;
long frag_users;
struct page *frag_page;
@@ -182,6 +182,8 @@ static int page_pool_init(struct page_pool *pool,
if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL;
+
+ pool->dma_map = true;
}
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
@@ -194,6 +196,8 @@ static int page_pool_init(struct page_pool *pool,
if (!pool->p.max_len)
return -EINVAL;
+ pool->dma_sync_act = PP_DMA_SYNC_ACT_DO;
+
/* pool->p.offset has to be set according to the address
* offset used by the DMA engine to start copying rx data
*/
@@ -213,7 +217,7 @@ static int page_pool_init(struct page_pool *pool,
/* Driver calling page_pool_create() also call page_pool_destroy() */
refcount_set(&pool->user_cnt, 1);
- if (pool->p.flags & PP_FLAG_DMA_MAP)
+ if (pool->dma_map)
get_device(pool->p.dev);
return 0;
@@ -341,7 +345,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
page_pool_set_dma_addr(page, dma);
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ if (pool->dma_sync_act == PP_DMA_SYNC_ACT_DO)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
return true;
@@ -380,8 +384,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
if (unlikely(!page))
return NULL;
- if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
- unlikely(!page_pool_dma_map(pool, page))) {
+ if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
return NULL;
}
@@ -401,8 +404,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
- unsigned int pp_flags = pool->p.flags;
unsigned int pp_order = pool->p.order;
+ bool dma_map = pool->dma_map;
struct page *page;
int i, nr_pages;
@@ -427,8 +430,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
*/
for (i = 0; i < nr_pages; i++) {
page = pool->alloc.cache[i];
- if ((pp_flags & PP_FLAG_DMA_MAP) &&
- unlikely(!page_pool_dma_map(pool, page))) {
+ if (dma_map && unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
continue;
}
@@ -500,7 +502,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
dma_addr_t dma;
int count;
- if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ if (!pool->dma_map)
/* Always account for inflight pages, even if we didn't
* map them
*/
@@ -573,7 +575,7 @@ static bool page_pool_recycle_in_cache(struct page *page,
}
/* If the page refcnt == 1, this will try to recycle the page.
- * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
+ * if pool->dma_sync_act is set, we'll try to sync the DMA area for
* the configured size min(dma_sync_size, pool->max_len).
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
@@ -594,7 +596,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ if (pool->dma_sync_act == PP_DMA_SYNC_ACT_DO)
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
@@ -695,7 +697,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
return NULL;
if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ if (pool->dma_sync_act == PP_DMA_SYNC_ACT_DO)
page_pool_dma_sync_for_device(pool, page, -1);
return page;
@@ -781,7 +783,7 @@ static void __page_pool_destroy(struct page_pool *pool)
ptr_ring_cleanup(&pool->ring, NULL);
- if (pool->p.flags & PP_FLAG_DMA_MAP)
+ if (pool->dma_map)
put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS