[RFC,v3,Optimizing,veth,xsk,performance,2/9] xsk: add dma_check_skip for skipping dma check
Commit Message
for the virtual net device such as veth, there is
no need to do dma check if we support zero copy.
add this flag after unaligned. beacause there is 4 bytes hole
pahole -V ./net/xdp/xsk_buff_pool.o:
-----------
...
/* --- cacheline 3 boundary (192 bytes) --- */
u32 chunk_size; /* 192 4 */
u32 frame_len; /* 196 4 */
u8 cached_need_wakeup; /* 200 1 */
bool uses_need_wakeup; /* 201 1 */
bool dma_need_sync; /* 202 1 */
bool unaligned; /* 203 1 */
/* XXX 4 bytes hole, try to pack */
void * addrs; /* 208 8 */
spinlock_t cq_lock; /* 216 4 */
...
-----------
Signed-off-by: Albert Huang <huangjie.albert@bytedance.com>
---
include/net/xsk_buff_pool.h | 1 +
net/xdp/xsk_buff_pool.c | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
@@ -81,6 +81,7 @@ struct xsk_buff_pool {
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
+ bool dma_check_skip;
void *addrs;
/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
* NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
@@ -85,6 +85,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
XDP_PACKET_HEADROOM;
pool->umem = umem;
pool->addrs = umem->addrs;
+ pool->dma_check_skip = false;
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->xskb_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);
@@ -202,7 +203,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
if (err)
goto err_unreg_pool;
- if (!pool->dma_pages) {
+ if (!pool->dma_pages && !pool->dma_check_skip) {
WARN(1, "Driver did not DMA map zero-copy buffers");
err = -EINVAL;
goto err_unreg_xsk;