[v3,net-next] xsk: avoid starving the xsk further down the list
Commit Message
In the previous implementation, when multiple xsk sockets were
associated with a single xsk_buff_pool, a situation could arise
where the xsk_tx_list maintained data at the front for one xsk
socket while starving the xsk sockets at the back of the list.
This could result in issues such as the inability to transmit packets,
increased latency, and jitter. To address this problem, we introduced
a new variable called tx_budget_cache, which limits each xsk to transmit
a maximum of MAX_PER_SOCKET_BUDGET tx descriptors. This allocation ensures
equitable opportunities for subsequent xsk sockets to send tx descriptors.
The value of MAX_PER_SOCKET_BUDGET is temporarily set to TX_BATCH_SIZE(32).
Signed-off-by: Albert Huang <huangjie.albert@bytedance.com>
---
include/net/xdp_sock.h | 5 +++++
net/xdp/xsk.c | 19 +++++++++++++++++++
2 files changed, 24 insertions(+)
Comments
On Mon, 23 Oct 2023 at 13:53, Albert Huang
<huangjie.albert@bytedance.com> wrote:
>
> In the previous implementation, when multiple xsk sockets were
> associated with a single xsk_buff_pool, a situation could arise
> where the xsk_tx_list maintained data at the front for one xsk
> socket while starving the xsk sockets at the back of the list.
> This could result in issues such as the inability to transmit packets,
> increased latency, and jitter. To address this problem, we introduced
we introduced -> introduce
> a new variable called tx_budget_cache, which limits each xsk to transmit
> a maximum of MAX_PER_SOCKET_BUDGET tx descriptors. This allocation ensures
> equitable opportunities for subsequent xsk sockets to send tx descriptors.
> The value of MAX_PER_SOCKET_BUDGET is temporarily set to TX_BATCH_SIZE(32).
It is not temporary I hope ;-). Just say "The value of
MAX_PER_SOCKET_BUDGET is set to 32."
>
> Signed-off-by: Albert Huang <huangjie.albert@bytedance.com>
> ---
> include/net/xdp_sock.h | 5 +++++
> net/xdp/xsk.c | 19 +++++++++++++++++++
> 2 files changed, 24 insertions(+)
>
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index 69b472604b86..08cbdf6fca85 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -63,6 +63,11 @@ struct xdp_sock {
>
> struct xsk_queue *tx ____cacheline_aligned_in_smp;
> struct list_head tx_list;
> + /* Record the actual number of times xsk has transmitted a tx
> + * descriptor, with a maximum limit not exceeding MAX_PER_SOCKET_BUDGET
> + */
> + u32 tx_budget_cache;
Since this is not a cache, I would prefer a name like tx_budget_spent.
Reflects more what the purpose is of this variable. Do not forget to
change this in the commit message too.
> +
> /* Protects generic receive. */
> spinlock_t rx_lock;
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index f5e96e0d6e01..fd0d54b7c046 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -33,6 +33,7 @@
> #include "xsk.h"
>
> #define TX_BATCH_SIZE 32
> +#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
>
> static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
>
> @@ -413,16 +414,25 @@ EXPORT_SYMBOL(xsk_tx_release);
>
> bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
> {
> + bool xsk_cache_full = false;
Same comment here, it is not a cache. How about just budget_exhausted?
No need for the xsk_ since this is a local variable.
> struct xdp_sock *xs;
>
> rcu_read_lock();
> +again:
> list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
> + if (xs->tx_budget_cache >= MAX_PER_SOCKET_BUDGET) {
> + xsk_cache_full = true;
> + continue;
> + }
> +
> if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
> if (xskq_has_descs(xs->tx))
> xskq_cons_release(xs->tx);
> continue;
> }
>
> + xs->tx_budget_cache++;
> +
> /* This is the backpressure mechanism for the Tx path.
> * Reserve space in the completion queue and only proceed
> * if there is space in it. This avoids having to implement
> @@ -436,6 +446,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
> return true;
> }
>
> + if (xsk_cache_full) {
> + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
> + xs->tx_budget_cache = 0;
> + }
No need for the {} here since there is only a single line in the loop.
Please remove them.
> + xsk_cache_full = false;
> + goto again;
> + }
> +
> out:
> rcu_read_unlock();
> return false;
> @@ -1230,6 +1248,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
> xs->zc = xs->umem->zc;
> xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
> xs->queue_id = qid;
> + xs->tx_budget_cache = 0;
This is not needed. The struct is zeroed at allocation.
> xp_add_xsk(xs->pool, xs);
>
> out_unlock:
> --
> 2.20.1
>
>
@@ -63,6 +63,11 @@ struct xdp_sock {
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
+ /* Record the actual number of times xsk has transmitted a tx
+ * descriptor, with a maximum limit not exceeding MAX_PER_SOCKET_BUDGET
+ */
+ u32 tx_budget_cache;
+
/* Protects generic receive. */
spinlock_t rx_lock;
@@ -33,6 +33,7 @@
#include "xsk.h"
#define TX_BATCH_SIZE 32
+#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
@@ -413,16 +414,25 @@ EXPORT_SYMBOL(xsk_tx_release);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{
+ bool xsk_cache_full = false;
struct xdp_sock *xs;
rcu_read_lock();
+again:
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
+ if (xs->tx_budget_cache >= MAX_PER_SOCKET_BUDGET) {
+ xsk_cache_full = true;
+ continue;
+ }
+
if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
if (xskq_has_descs(xs->tx))
xskq_cons_release(xs->tx);
continue;
}
+ xs->tx_budget_cache++;
+
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
@@ -436,6 +446,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
return true;
}
+ if (xsk_cache_full) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
+ xs->tx_budget_cache = 0;
+ }
+ xsk_cache_full = false;
+ goto again;
+ }
+
out:
rcu_read_unlock();
return false;
@@ -1230,6 +1248,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->zc = xs->umem->zc;
xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
xs->queue_id = qid;
+ xs->tx_budget_cache = 0;
xp_add_xsk(xs->pool, xs);
out_unlock: