[6/6] r8152: Convert from tasklet to BH workqueue
Commit Message
tasklet is being replaced by BH workqueue. No noticeable behavior or
performance changes are expected. The following is how the two APIs map:
- tasklet_setup/init() -> INIT_WORK()
- tasklet_schedule() -> queue_work(system_bh_wq, ...)
- tasklet_hi_schedule() -> queue_work(system_bh_highpri_wq, ...)
- tasklet_disable_nosync() -> disable_work()
- tasklet_disable[_in_atomic]() -> disable_work_sync()
- tasklet_enable() -> enable_work() + queue_work()
- tasklet_kill() -> cancel_work_sync()
Note that unlike tasklet_enable(), enable_work() doesn't queue the work item
automatically according to whether the work item was queued while disabled.
While the caller can track this separately, unconditionally scheduling the
work item after enable_work() returns %true should work for most users.
r8152 conversion has been tested by repeatedly forcing the device to go
through resets using usbreset under iperf3 generated traffic.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
drivers/net/usb/r8152.c | 44 ++++++++++++++++++++++-------------------
1 file changed, 24 insertions(+), 20 deletions(-)
Comments
> tasklet is being replaced by BH workqueue. No noticeable behavior or
> performance changes are expected. The following is how the two APIs map:
>
> - tasklet_setup/init() -> INIT_WORK()
> - tasklet_schedule() -> queue_work(system_bh_wq, ...)
> - tasklet_hi_schedule() -> queue_work(system_bh_highpri_wq, ...)
> - tasklet_disable_nosync() -> disable_work()
> - tasklet_disable[_in_atomic]() -> disable_work_sync()
> - tasklet_enable() -> enable_work() + queue_work()
> - tasklet_kill() -> cancel_work_sync()
>
> Note that unlike tasklet_enable(), enable_work() doesn't queue the work item
> automatically according to whether the work item was queued while disabled.
> While the caller can track this separately, unconditionally scheduling the
> work item after enable_work() returns %true should work for most users.
>
> r8152 conversion has been tested by repeatedly forcing the device to go
> through resets using usbreset under iperf3 generated traffic.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
> ---
> drivers/net/usb/r8152.c | 44 ++++++++++++++++++++++-------------------
> 1 file changed, 24 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
> index 9bf2140fd0a1..24e284b9eb38 100644
> --- a/drivers/net/usb/r8152.c
> +++ b/drivers/net/usb/r8152.c
> @@ -882,7 +882,7 @@ struct r8152 {
> #ifdef CONFIG_PM_SLEEP
> struct notifier_block pm_notifier;
> #endif
> - struct tasklet_struct tx_tl;
> + struct work_struct tx_work;
>
> struct rtl_ops {
> void (*init)(struct r8152 *tp);
> @@ -1948,7 +1948,7 @@ static void write_bulk_callback(struct urb *urb)
> return;
>
> if (!skb_queue_empty(&tp->tx_queue))
> - tasklet_schedule(&tp->tx_tl);
> + queue_work(system_bh_wq, &tp->tx_work);
> }
>
> static void intr_callback(struct urb *urb)
> @@ -2746,9 +2746,9 @@ static void tx_bottom(struct r8152 *tp)
> } while (res == 0);
> }
>
> -static void bottom_half(struct tasklet_struct *t)
> +static void bottom_half(struct work_struct *work)
> {
> - struct r8152 *tp = from_tasklet(tp, t, tx_tl);
> + struct r8152 *tp = container_of(work, struct r8152, tx_work);
>
> if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
> return;
> @@ -2942,7 +2942,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
> schedule_delayed_work(&tp->schedule, 0);
> } else {
> usb_mark_last_busy(tp->udev);
> - tasklet_schedule(&tp->tx_tl);
> + queue_work(system_bh_wq, &tp->tx_work);
> }
> } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
> netif_stop_queue(netdev);
> @@ -6824,11 +6824,12 @@ static void set_carrier(struct r8152 *tp)
> } else {
> if (netif_carrier_ok(netdev)) {
> netif_carrier_off(netdev);
> - tasklet_disable(&tp->tx_tl);
> + disable_work_sync(&tp->tx_work);
> napi_disable(napi);
> tp->rtl_ops.disable(tp);
> napi_enable(napi);
> - tasklet_enable(&tp->tx_tl);
> + enable_work(&tp->tx_work);
> + queue_work(system_bh_wq, &tp->tx_work);
> netif_info(tp, link, netdev, "carrier off\n");
> }
> }
> @@ -6864,7 +6865,7 @@ static void rtl_work_func_t(struct work_struct *work)
> /* don't schedule tasket before linking */
> if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) &&
> netif_carrier_ok(tp->netdev))
> - tasklet_schedule(&tp->tx_tl);
> + queue_work(system_bh_wq, &tp->tx_work);
>
> if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
> !list_empty(&tp->rx_done))
> @@ -6971,7 +6972,7 @@ static int rtl8152_open(struct net_device *netdev)
> goto out_unlock;
> }
> napi_enable(&tp->napi);
> - tasklet_enable(&tp->tx_tl);
> + enable_work(&tp->tx_work);
I think we are missing queue_work() above, right?
To avoid such situations, could we combine enable_work() + queue_work(),
into a single API?
Perhaps, something like:
static inline bool enable_and_queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
if (enable_work(work))
{
queue_work(wq, work);
return true;
}
return false;
}
Thanks,
Allen
> mutex_unlock(&tp->control);
>
> @@ -6999,7 +7000,7 @@ static int rtl8152_close(struct net_device *netdev)
> #ifdef CONFIG_PM_SLEEP
> unregister_pm_notifier(&tp->pm_notifier);
> #endif
> - tasklet_disable(&tp->tx_tl);
> + disable_work_sync(&tp->tx_work);
> clear_bit(WORK_ENABLE, &tp->flags);
> usb_kill_urb(tp->intr_urb);
> cancel_delayed_work_sync(&tp->schedule);
> @@ -8421,7 +8422,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
> return 0;
>
> netif_stop_queue(netdev);
> - tasklet_disable(&tp->tx_tl);
> + disable_work_sync(&tp->tx_work);
> clear_bit(WORK_ENABLE, &tp->flags);
> usb_kill_urb(tp->intr_urb);
> cancel_delayed_work_sync(&tp->schedule);
> @@ -8466,7 +8467,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
> }
>
> napi_enable(&tp->napi);
> - tasklet_enable(&tp->tx_tl);
> + enable_work(&tp->tx_work);
> + queue_work(system_bh_wq, &tp->tx_work);
> netif_wake_queue(netdev);
> usb_submit_urb(tp->intr_urb, GFP_KERNEL);
>
> @@ -8625,12 +8627,13 @@ static int rtl8152_system_suspend(struct r8152 *tp)
>
> clear_bit(WORK_ENABLE, &tp->flags);
> usb_kill_urb(tp->intr_urb);
> - tasklet_disable(&tp->tx_tl);
> + disable_work_sync(&tp->tx_work);
> napi_disable(napi);
> cancel_delayed_work_sync(&tp->schedule);
> tp->rtl_ops.down(tp);
> napi_enable(napi);
> - tasklet_enable(&tp->tx_tl);
> + enable_work(&tp->tx_work);
> + queue_work(system_bh_wq, &tp->tx_work);
> }
>
> return 0;
> @@ -9387,11 +9390,12 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
> if (netif_carrier_ok(dev)) {
> netif_stop_queue(dev);
> napi_disable(&tp->napi);
> - tasklet_disable(&tp->tx_tl);
> + disable_work_sync(&tp->tx_work);
> tp->rtl_ops.disable(tp);
> tp->rtl_ops.enable(tp);
> rtl_start_rx(tp);
> - tasklet_enable(&tp->tx_tl);
> + enable_work(&tp->tx_work);
> + queue_work(system_bh_wq, &tp->tx_work);
> napi_enable(&tp->napi);
> rtl8152_set_rx_mode(dev);
> netif_wake_queue(dev);
> @@ -9819,8 +9823,8 @@ static int rtl8152_probe_once(struct usb_interface *intf,
> mutex_init(&tp->control);
> INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
> INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
> - tasklet_setup(&tp->tx_tl, bottom_half);
> - tasklet_disable(&tp->tx_tl);
> + INIT_WORK(&tp->tx_work, bottom_half);
> + disable_work(&tp->tx_work);
>
> netdev->netdev_ops = &rtl8152_netdev_ops;
> netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
> @@ -9954,7 +9958,7 @@ static int rtl8152_probe_once(struct usb_interface *intf,
> unregister_netdev(netdev);
>
> out1:
> - tasklet_kill(&tp->tx_tl);
> + cancel_work_sync(&tp->tx_work);
> cancel_delayed_work_sync(&tp->hw_phy_work);
> if (tp->rtl_ops.unload)
> tp->rtl_ops.unload(tp);
> @@ -10010,7 +10014,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
> rtl_set_unplug(tp);
>
> unregister_netdev(tp->netdev);
> - tasklet_kill(&tp->tx_tl);
> + cancel_work_sync(&tp->tx_work);
> cancel_delayed_work_sync(&tp->hw_phy_work);
> if (tp->rtl_ops.unload)
> tp->rtl_ops.unload(tp);
> --
> 2.43.2
>
Hello,
On Wed, Feb 28, 2024 at 09:33:40AM -0800, Allen wrote:
> > @@ -6971,7 +6972,7 @@ static int rtl8152_open(struct net_device *netdev)
> > goto out_unlock;
> > }
> > napi_enable(&tp->napi);
> > - tasklet_enable(&tp->tx_tl);
> > + enable_work(&tp->tx_work);
>
> I think we are missing queue_work() above, right?
>
> To avoid such situations, could we combine enable_work() + queue_work(),
> into a single API?
Here, the device is newly being opened and the work item is just disabled
from the init. So, it doesn't need queueing.
> Perhaps, something like:
>
> static inline bool enable_and_queue_work(struct workqueue_struct *wq,
> struct work_struct *work)
> {
> if (enable_work(work))
> {
> queue_work(wq, work);
> return true;
> }
> return false;
> }
That said, this may still be nice to have.
Thanks.
> > > @@ -6971,7 +6972,7 @@ static int rtl8152_open(struct net_device *netdev)
> > > goto out_unlock;
> > > }
> > > napi_enable(&tp->napi);
> > > - tasklet_enable(&tp->tx_tl);
> > > + enable_work(&tp->tx_work);
> >
> > I think we are missing queue_work() above, right?
> >
> > To avoid such situations, could we combine enable_work() + queue_work(),
> > into a single API?
>
> Here, the device is newly being opened and the work item is just disabled
> from the init. So, it doesn't need queueing.
>
Ah, my bad. Thanks for pointing it out.
> > Perhaps, something like:
> >
> > static inline bool enable_and_queue_work(struct workqueue_struct *wq,
> > struct work_struct *work)
> > {
> > if (enable_work(work))
> > {
> > queue_work(wq, work);
> > return true;
> > }
> > return false;
> > }
>
> That said, this may still be nice to have.
>
If the above is okay, I could send out a patch. Please let me know.
Thanks.
On Wed, Feb 28, 2024 at 10:00:08AM -0800, Allen wrote:
> > > static inline bool enable_and_queue_work(struct workqueue_struct *wq,
> > > struct work_struct *work)
> > > {
> > > if (enable_work(work))
> > > {
> > > queue_work(wq, work);
> > > return true;
> > > }
> > > return false;
> > > }
> >
> > That said, this may still be nice to have.
>
> If the above is okay, I could send out a patch. Please let me know.
Yes, please.
Thanks.
@@ -882,7 +882,7 @@ struct r8152 {
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notifier;
#endif
- struct tasklet_struct tx_tl;
+ struct work_struct tx_work;
struct rtl_ops {
void (*init)(struct r8152 *tp);
@@ -1948,7 +1948,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- tasklet_schedule(&tp->tx_tl);
+ queue_work(system_bh_wq, &tp->tx_work);
}
static void intr_callback(struct urb *urb)
@@ -2746,9 +2746,9 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(struct tasklet_struct *t)
+static void bottom_half(struct work_struct *work)
{
- struct r8152 *tp = from_tasklet(tp, t, tx_tl);
+ struct r8152 *tp = container_of(work, struct r8152, tx_work);
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
@@ -2942,7 +2942,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
schedule_delayed_work(&tp->schedule, 0);
} else {
usb_mark_last_busy(tp->udev);
- tasklet_schedule(&tp->tx_tl);
+ queue_work(system_bh_wq, &tp->tx_work);
}
} else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
netif_stop_queue(netdev);
@@ -6824,11 +6824,12 @@ static void set_carrier(struct r8152 *tp)
} else {
if (netif_carrier_ok(netdev)) {
netif_carrier_off(netdev);
- tasklet_disable(&tp->tx_tl);
+ disable_work_sync(&tp->tx_work);
napi_disable(napi);
tp->rtl_ops.disable(tp);
napi_enable(napi);
- tasklet_enable(&tp->tx_tl);
+ enable_work(&tp->tx_work);
+ queue_work(system_bh_wq, &tp->tx_work);
netif_info(tp, link, netdev, "carrier off\n");
}
}
@@ -6864,7 +6865,7 @@ static void rtl_work_func_t(struct work_struct *work)
/* don't schedule tasket before linking */
if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) &&
netif_carrier_ok(tp->netdev))
- tasklet_schedule(&tp->tx_tl);
+ queue_work(system_bh_wq, &tp->tx_work);
if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
!list_empty(&tp->rx_done))
@@ -6971,7 +6972,7 @@ static int rtl8152_open(struct net_device *netdev)
goto out_unlock;
}
napi_enable(&tp->napi);
- tasklet_enable(&tp->tx_tl);
+ enable_work(&tp->tx_work);
mutex_unlock(&tp->control);
@@ -6999,7 +7000,7 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- tasklet_disable(&tp->tx_tl);
+ disable_work_sync(&tp->tx_work);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -8421,7 +8422,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
return 0;
netif_stop_queue(netdev);
- tasklet_disable(&tp->tx_tl);
+ disable_work_sync(&tp->tx_work);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -8466,7 +8467,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
}
napi_enable(&tp->napi);
- tasklet_enable(&tp->tx_tl);
+ enable_work(&tp->tx_work);
+ queue_work(system_bh_wq, &tp->tx_work);
netif_wake_queue(netdev);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
@@ -8625,12 +8627,13 @@ static int rtl8152_system_suspend(struct r8152 *tp)
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
- tasklet_disable(&tp->tx_tl);
+ disable_work_sync(&tp->tx_work);
napi_disable(napi);
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
napi_enable(napi);
- tasklet_enable(&tp->tx_tl);
+ enable_work(&tp->tx_work);
+ queue_work(system_bh_wq, &tp->tx_work);
}
return 0;
@@ -9387,11 +9390,12 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
if (netif_carrier_ok(dev)) {
netif_stop_queue(dev);
napi_disable(&tp->napi);
- tasklet_disable(&tp->tx_tl);
+ disable_work_sync(&tp->tx_work);
tp->rtl_ops.disable(tp);
tp->rtl_ops.enable(tp);
rtl_start_rx(tp);
- tasklet_enable(&tp->tx_tl);
+ enable_work(&tp->tx_work);
+ queue_work(system_bh_wq, &tp->tx_work);
napi_enable(&tp->napi);
rtl8152_set_rx_mode(dev);
netif_wake_queue(dev);
@@ -9819,8 +9823,8 @@ static int rtl8152_probe_once(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
- tasklet_setup(&tp->tx_tl, bottom_half);
- tasklet_disable(&tp->tx_tl);
+ INIT_WORK(&tp->tx_work, bottom_half);
+ disable_work(&tp->tx_work);
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -9954,7 +9958,7 @@ static int rtl8152_probe_once(struct usb_interface *intf,
unregister_netdev(netdev);
out1:
- tasklet_kill(&tp->tx_tl);
+ cancel_work_sync(&tp->tx_work);
cancel_delayed_work_sync(&tp->hw_phy_work);
if (tp->rtl_ops.unload)
tp->rtl_ops.unload(tp);
@@ -10010,7 +10014,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
rtl_set_unplug(tp);
unregister_netdev(tp->netdev);
- tasklet_kill(&tp->tx_tl);
+ cancel_work_sync(&tp->tx_work);
cancel_delayed_work_sync(&tp->hw_phy_work);
if (tp->rtl_ops.unload)
tp->rtl_ops.unload(tp);