[RFC,Optimizing,veth,xsk,performance,06/10] veth: add ndo_xsk_wakeup callback for veth
Commit Message
Signed-off-by: huangjie.albert <huangjie.albert@bytedance.com>
---
drivers/net/veth.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
Comments
On Thu, Aug 03, 2023 at 10:04:32PM +0800, huangjie.albert wrote:
> Signed-off-by: huangjie.albert <huangjie.albert@bytedance.com>
> ---
> drivers/net/veth.c | 40 ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 40 insertions(+)
>
> diff --git a/drivers/net/veth.c b/drivers/net/veth.c
> index 944761807ca4..600225e27e9e 100644
> --- a/drivers/net/veth.c
> +++ b/drivers/net/veth.c
> @@ -1840,6 +1840,45 @@ static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
> rcu_read_unlock();
> }
>
> +static void veth_xsk_remote_trigger_napi(void *info)
> +{
> + struct veth_sq *sq = info;
> +
> + napi_schedule(&sq->xdp_napi);
> +}
> +
> +static int veth_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
> +{
> + struct veth_priv *priv;
> + struct veth_sq *sq;
> + u32 last_cpu, cur_cpu;
> +
> + if (!netif_running(dev))
> + return -ENETDOWN;
> +
> + if (qid >= dev->real_num_rx_queues)
> + return -EINVAL;
> +
> + priv = netdev_priv(dev);
> + sq = &priv->sq[qid];
> +
> + if (napi_if_scheduled_mark_missed(&sq->xdp_napi))
> + return 0;
> +
> + last_cpu = sq->xsk.last_cpu;
> + cur_cpu = get_cpu();
> +
> + /* raise a napi */
> + if (last_cpu == cur_cpu) {
> + napi_schedule(&sq->xdp_napi);
> + } else {
> + smp_call_function_single(last_cpu, veth_xsk_remote_trigger_napi, sq, true);
> + }
nit: no need for braces in the above.
if (last_cpu == cur_cpu)
napi_schedule(&sq->xdp_napi);
else
smp_call_function_single(last_cpu, veth_xsk_remote_trigger_napi, sq, true);
...
@@ -1840,6 +1840,45 @@ static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
rcu_read_unlock();
}
+static void veth_xsk_remote_trigger_napi(void *info)
+{
+ struct veth_sq *sq = info;
+
+ napi_schedule(&sq->xdp_napi);
+}
+
+static int veth_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct veth_priv *priv;
+ struct veth_sq *sq;
+ u32 last_cpu, cur_cpu;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= dev->real_num_rx_queues)
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ sq = &priv->sq[qid];
+
+ if (napi_if_scheduled_mark_missed(&sq->xdp_napi))
+ return 0;
+
+ last_cpu = sq->xsk.last_cpu;
+ cur_cpu = get_cpu();
+
+ /* raise a napi */
+ if (last_cpu == cur_cpu) {
+ napi_schedule(&sq->xdp_napi);
+ } else {
+ smp_call_function_single(last_cpu, veth_xsk_remote_trigger_napi, sq, true);
+ }
+
+ put_cpu();
+ return 0;
+}
+
static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2054,6 +2093,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
.ndo_xdp_xmit = veth_ndo_xdp_xmit,
+ .ndo_xsk_wakeup = veth_xsk_wakeup,
.ndo_get_peer_dev = veth_peer_dev,
};