On Wed, Mar 15, 2023 at 12:05:45PM +0100, Markus Schneider-Pargmann wrote:
> Implement byte queue limiting in preparation for the use of xmit_more().
>
> Signed-off-by: Markus Schneider-Pargmann <msp@baylibre.com>
Nits below aside, this looks good to me.
Reviewed-by: Simon Horman <simon.horman@corigine.com>
> ---
> drivers/net/can/m_can/m_can.c | 49 +++++++++++++++++++++++++----------
> 1 file changed, 35 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
> index 3cb3d01e1a61..63d6e95717e3 100644
> --- a/drivers/net/can/m_can/m_can.c
> +++ b/drivers/net/can/m_can/m_can.c
...
> @@ -999,29 +1001,34 @@ static int m_can_poll(struct napi_struct *napi, int quota)
> * echo. timestamp is used for peripherals to ensure correct ordering
> * by rx-offload, and is ignored for non-peripherals.
> */
> -static void m_can_tx_update_stats(struct m_can_classdev *cdev,
> - unsigned int msg_mark,
> - u32 timestamp)
> +static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
> + unsigned int msg_mark, u32 timestamp)
> {
> struct net_device *dev = cdev->net;
> struct net_device_stats *stats = &dev->stats;
> + unsigned int frame_len;
>
> if (cdev->is_peripheral)
> stats->tx_bytes +=
> can_rx_offload_get_echo_skb(&cdev->offload,
> msg_mark,
> timestamp,
> - NULL);
> + &frame_len);
> else
> - stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
> + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
>
> stats->tx_packets++;
> +
> + return frame_len;
> }
>
> -static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted)
> +static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
> + int transmitted_frame_len)
nit: I think unsigned int would be a better type for transmitted_frame_len,
as that is the type of the 3rd argument to netdev_completed_queue()
> {
> unsigned long irqflags;
>
> + netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
> +
> spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
> if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
> netif_wake_queue(cdev->net);
> @@ -1060,6 +1067,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
> int err = 0;
> unsigned int msg_mark;
> int processed = 0;
> + int processed_frame_len = 0;
Likewise, here.
> struct m_can_classdev *cdev = netdev_priv(dev);
>
> @@ -1088,7 +1096,9 @@ static int m_can_echo_tx_event(struct net_device *dev)
> fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
>
> /* update stats */
> - m_can_tx_update_stats(cdev, msg_mark, timestamp);
> + processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
> + timestamp);
> +
> ++processed;
> }
>
> @@ -1096,7 +1106,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
> m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
> ack_fgi));
>
> - m_can_finish_tx(cdev, processed);
> + m_can_finish_tx(cdev, processed, processed_frame_len);
>
> return err;
> }
...
@@ -445,6 +445,8 @@ static void m_can_clean(struct net_device *net)
for (int i = 0; i != cdev->can.echo_skb_max; ++i)
can_free_echo_skb(cdev->net, i, NULL);
+ netdev_reset_queue(cdev->net);
+
spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
cdev->tx_fifo_in_flight = 0;
spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
@@ -999,29 +1001,34 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&cdev->offload,
msg_mark,
timestamp,
- NULL);
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
}
-static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted)
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ int transmitted_frame_len)
{
unsigned long irqflags;
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
netif_wake_queue(cdev->net);
@@ -1060,6 +1067,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
int err = 0;
unsigned int msg_mark;
int processed = 0;
+ int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1088,7 +1096,9 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
++processed;
}
@@ -1096,7 +1106,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
- m_can_finish_tx(cdev, processed);
+ m_can_finish_tx(cdev, processed, processed_frame_len);
return err;
}
@@ -1187,11 +1197,12 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- m_can_finish_tx(cdev, 1);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
if (ir & (IR_TEFN | IR_TEFW)) {
@@ -1720,6 +1731,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
u32 cccr, fdflags;
int err;
int putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
@@ -1765,7 +1777,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
@@ -1804,7 +1816,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
@@ -1875,14 +1887,23 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ netdev_tx_t ret;
+ unsigned int frame_len;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
+ frame_len = can_skb_get_frame_len(skb);
+
if (cdev->is_peripheral)
- return m_can_start_peripheral_xmit(cdev, skb);
+ ret = m_can_start_peripheral_xmit(cdev, skb);
else
- return m_can_start_fast_xmit(cdev, skb);
+ ret = m_can_start_fast_xmit(cdev, skb);
+
+ if (ret == NETDEV_TX_OK)
+ netdev_sent_queue(dev, frame_len);
+
+ return ret;
}
static int m_can_open(struct net_device *dev)