> On Fri, 21 Jul 2023 14:21:53 +0800 Wei Fang wrote:
> > + /* Tx processing cannot call any XDP (or page pool) APIs if
> > + * the "budget" is 0. Because NAPI is called with budget of
> > + * 0 (such as netpoll) indicates we may be in an IRQ context,
> > + * however, we can't use the page pool from IRQ context.
> > + */
> > + if (unlikely(!budget))
> > + break;
> > +
> > xdpf = txq->tx_buf[index].xdp;
> > - if (bdp->cbd_bufaddr)
> > + if (bdp->cbd_bufaddr &&
> > + txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO)
> > dma_unmap_single(&fep->pdev->dev,
> > fec32_to_cpu(bdp->cbd_bufaddr),
> > fec16_to_cpu(bdp->cbd_datlen), @@ -1474,7
> +1486,10 @@
> > fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
> > /* Free the sk buffer associated with this last transmit */
> > dev_kfree_skb_any(skb);
> > } else {
> > - xdp_return_frame(xdpf);
> > + if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO)
> > + xdp_return_frame(xdpf);
> > + else
> > + xdp_return_frame_rx_napi(xdpf);
>
> I think that you need to also break if (!budget) here,
I think there is no need to break again here, because if the "budget" is 0, the code logic
will no jump here.
> xdp_return_frame() may call page pool APIs to return the frame to a page pool
> owned by another driver. And that needs to be fixed in net/main already?
Yes, you are right, I should fixed it in net tree first, and when the fix is merged
into net-next tree, I will send a 3rd version of this patch.
@@ -547,6 +547,7 @@ enum {
enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
+ FEC_TXBUF_T_XDP_TX,
};
struct fec_tx_buffer {
@@ -75,6 +75,8 @@
static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
+static int fec_enet_xdp_tx_xmit(struct net_device *ndev,
+ struct xdp_buff *xdp);
#define DRIVER_NAME "fec"
@@ -960,7 +962,8 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->tx_buf[i].skb = NULL;
}
} else {
- if (bdp->cbd_bufaddr)
+ if (bdp->cbd_bufaddr &&
+ txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
@@ -1370,7 +1373,7 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
}
static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep;
struct xdp_frame *xdpf;
@@ -1414,8 +1417,17 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (!skb)
goto tx_buf_done;
} else {
+ /* Tx processing cannot call any XDP (or page pool) APIs if
+ * the "budget" is 0. Because NAPI is called with budget of
+ * 0 (such as netpoll) indicates we may be in an IRQ context,
+ * however, we can't use the page pool from IRQ context.
+ */
+ if (unlikely(!budget))
+ break;
+
xdpf = txq->tx_buf[index].xdp;
- if (bdp->cbd_bufaddr)
+ if (bdp->cbd_bufaddr &&
+ txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
@@ -1474,7 +1486,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
} else {
- xdp_return_frame(xdpf);
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO)
+ xdp_return_frame(xdpf);
+ else
+ xdp_return_frame_rx_napi(xdpf);
txq->tx_buf[index].xdp = NULL;
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
@@ -1506,14 +1521,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
writel(0, txq->bd.reg_desc_active);
}
-static void fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i);
+ fec_enet_tx_queue(ndev, i, budget);
}
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
@@ -1565,11 +1580,18 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
}
break;
- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
case XDP_TX:
+ err = fec_enet_xdp_tx_xmit(fep->netdev, xdp);
+ if (err) {
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ } else {
+ ret = FEC_ENET_XDP_TX;
+ }
+ break;
+
+ default:
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
fallthrough;
@@ -1856,7 +1878,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
do {
done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev);
+ fec_enet_tx(ndev, budget);
} while ((done < budget) && fec_enet_collect_events(fep));
if (done < budget) {
@@ -3785,7 +3807,8 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
- struct xdp_frame *frame)
+ struct xdp_frame *frame,
+ bool ndo_xmit)
{
unsigned int index, status, estatus;
struct bufdesc *bdp;
@@ -3805,10 +3828,24 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
index = fec_enet_get_bd_index(bdp, &txq->bd);
- dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
- frame->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, dma_addr))
- return -ENOMEM;
+ if (ndo_xmit) {
+ dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
+ frame->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return -ENOMEM;
+
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(frame->data);
+
+ dma_addr = page_pool_get_dma_addr(page) + sizeof(*frame) +
+ frame->headroom;
+ dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
+ frame->len, DMA_BIDIRECTIONAL);
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
+ }
+
+ txq->tx_buf[index].xdp = frame;
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex)
@@ -3827,9 +3864,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
- txq->tx_buf[index].xdp = frame;
-
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
*/
@@ -3855,6 +3889,31 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
return 0;
}
+static int fec_enet_xdp_tx_xmit(struct net_device *ndev,
+ struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int queue, ret;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+ ret = fec_enet_txq_xmit_frame(fep, txq, xdpf, false);
+
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
static int fec_enet_xdp_xmit(struct net_device *dev,
int num_frames,
struct xdp_frame **frames,
@@ -3875,7 +3934,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frames; i++) {
- if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i], true) < 0)
break;
sent_frames++;
}