@@ -1508,6 +1508,9 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
@@ -1818,8 +1821,13 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
*/
can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
0 : cdev->tx_fifo_putidx);
}
@@ -1832,6 +1840,17 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ if (cdev->version == 30)
+ return;
+ if (!cdev->is_peripheral)
+ return;
+
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
@@ -1840,11 +1859,15 @@ static void m_can_tx_work_queue(struct work_struct *ws)
op->skb = NULL;
m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
}
-static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb)
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
{
cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
++cdev->next_tx_op;
@@ -1856,6 +1879,7 @@ static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
struct sk_buff *skb)
{
netdev_tx_t err;
+ bool submit;
if (cdev->can.state == CAN_STATE_BUS_OFF) {
m_can_clean(cdev->net);
@@ -1866,7 +1890,15 @@ static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
if (err != NETDEV_TX_OK)
return err;
- m_can_tx_queue_skb(cdev, skb);
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
return NETDEV_TX_OK;
}
@@ -1998,6 +2030,7 @@ static int m_can_get_coalesce(struct net_device *dev,
ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
@@ -2042,6 +2075,18 @@ static int m_can_set_coalesce(struct net_device *dev,
netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
return -EINVAL;
}
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
@@ -2052,6 +2097,7 @@ static int m_can_set_coalesce(struct net_device *dev,
cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
@@ -2069,6 +2115,7 @@ static const struct ethtool_ops m_can_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = m_can_get_coalesce,
@@ -74,6 +74,7 @@ struct m_can_tx_op {
struct m_can_classdev *cdev;
struct work_struct work;
struct sk_buff *skb;
+ bool submit;
};
struct m_can_classdev {
@@ -103,6 +104,7 @@ struct m_can_classdev {
u32 active_interrupts;
u32 rx_max_coalesced_frames_irq;
u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
u32 tx_max_coalesced_frames_irq;
u32 tx_coalesce_usecs_irq;
@@ -117,6 +119,10 @@ struct m_can_classdev {
int tx_fifo_size;
int next_tx_op;
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
+
struct mram_cfg mcfg[MRAM_CFG_NUM];
};