[RFC,v3,Optimizing,veth,xsk,performance,3/9] veth: add support for send queue
Commit Message
in order to support native af_xdp for veth. we
need support for send queue for napi tx.
the upcoming patch will make use of it.
Signed-off-by: Albert Huang <huangjie.albert@bytedance.com>
---
drivers/net/veth.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
@@ -56,6 +56,11 @@ struct veth_rq_stats {
struct u64_stats_sync syncp;
};
+struct veth_sq_stats {
+ struct veth_stats vs;
+ struct u64_stats_sync syncp;
+};
+
struct veth_rq {
struct napi_struct xdp_napi;
struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
@@ -69,11 +74,25 @@ struct veth_rq {
struct page_pool *page_pool;
};
+struct veth_sq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct xdp_mem_info xdp_mem;
+ struct veth_sq_stats stats;
+ u32 queue_index;
+ /* for xsk */
+ struct {
+ struct xsk_buff_pool __rcu *pool;
+ u32 last_cpu;
+ } xsk;
+};
+
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
struct bpf_prog *_xdp_prog;
struct veth_rq *rq;
+ struct veth_sq *sq;
unsigned int requested_headroom;
};
@@ -1495,6 +1514,15 @@ static int veth_alloc_queues(struct net_device *dev)
u64_stats_init(&priv->rq[i].stats.syncp);
}
+ priv->sq = kcalloc(dev->num_tx_queues, sizeof(*priv->sq), GFP_KERNEL);
+ if (!priv->sq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ priv->sq[i].dev = dev;
+ u64_stats_init(&priv->sq[i].stats.syncp);
+ }
+
return 0;
}
@@ -1503,6 +1531,7 @@ static void veth_free_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
kfree(priv->rq);
+ kfree(priv->sq);
}
static int veth_dev_init(struct net_device *dev)