[1/3] mei: vsc: Call wake_up() and event handler in a workqueue

Message ID 20240212094618.344921-2-sakari.ailus@linux.intel.com
State New
Headers
Series MEI VSC fixes and cleanups |

Commit Message

Sakari Ailus Feb. 12, 2024, 9:46 a.m. UTC
  The event handler, in this case that of mei_vsc_event_cb() of
platform-vsc.c, is called from a threaded interrupt handler in
uninterruptible context. However there are multiple places where the
handler may sleep. This patch creates a per-device workqueue and calls
wake_up() and the event callback from queued work where sleeping is
allowed.

Fixes: 566f5ca97680 ("mei: Add transport driver for IVSC device")
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
---
 drivers/misc/mei/vsc-tp.c | 35 ++++++++++++++++++++++++-----------
 1 file changed, 24 insertions(+), 11 deletions(-)
  

Comments

Wu, Wentong Feb. 18, 2024, 1:23 a.m. UTC | #1
Hi Sakari,

Thanks, and sorry for the late response because I'm in vacation this week.

> From: Sakari Ailus <sakari.ailus@linux.intel.com>
> 
> The event handler, in this case that of mei_vsc_event_cb() of platform-vsc.c,
> is called from a threaded interrupt handler in uninterruptible context.

But why this thread is uninterruptible?

https://github.com/torvalds/linux/blob/master/kernel/irq/manage.c#L1294

https://lwn.net/Articles/302043/

BR,
Wentong
> However there are multiple places where the handler may sleep. This patch
> creates a per-device workqueue and calls
> wake_up() and the event callback from queued work where sleeping is
> allowed.
> 
> Fixes: 566f5ca97680 ("mei: Add transport driver for IVSC device")
> Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
> ---
>  drivers/misc/mei/vsc-tp.c | 35 ++++++++++++++++++++++++-----------
>  1 file changed, 24 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index
> 6f4a4be6ccb5..264ece72f0bf 100644
> --- a/drivers/misc/mei/vsc-tp.c
> +++ b/drivers/misc/mei/vsc-tp.c
> @@ -72,6 +72,8 @@ struct vsc_tp {
>  	atomic_t assert_cnt;
>  	wait_queue_head_t xfer_wait;
> 
> +	struct workqueue_struct *event_workqueue;
> +	struct work_struct event_work;
>  	vsc_tp_event_cb_t event_notify;
>  	void *event_notify_context;
> 
> @@ -416,19 +418,19 @@ static irqreturn_t vsc_tp_isr(int irq, void *data)
> 
>  	atomic_inc(&tp->assert_cnt);
> 
> -	wake_up(&tp->xfer_wait);
> +	queue_work(tp->event_workqueue, &tp->event_work);
> 
> -	return IRQ_WAKE_THREAD;
> +	return IRQ_HANDLED;
>  }
> 
> -static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
> +static void vsc_tp_event_work(struct work_struct *work)
>  {
> -	struct vsc_tp *tp = data;
> +	struct vsc_tp *tp = container_of(work, struct vsc_tp, event_work);;
> +
> +	wake_up(&tp->xfer_wait);
> 
>  	if (tp->event_notify)
>  		tp->event_notify(tp->event_notify_context);
> -
> -	return IRQ_HANDLED;
>  }
> 
>  static int vsc_tp_match_any(struct acpi_device *adev, void *data) @@ -
> 481,13 +483,18 @@ static int vsc_tp_probe(struct spi_device *spi)
>  	init_waitqueue_head(&tp->xfer_wait);
>  	tp->spi = spi;
> 
> +	tp->event_workqueue =
> create_singlethread_workqueue(dev_name(dev));
> +	if (!tp->event_workqueue)
> +		return -ENOMEM;
> +
> +	INIT_WORK(&tp->event_work, vsc_tp_event_work);
> +
>  	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
> -	ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
> -					vsc_tp_thread_isr,
> -					IRQF_TRIGGER_FALLING |
> IRQF_ONESHOT,
> -					dev_name(dev), tp);
> +	ret = devm_request_irq(dev, spi->irq, vsc_tp_isr,
> +			       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
> +			       dev_name(dev), tp);
>  	if (ret)
> -		return ret;
> +		goto err_destroy_workqueue;
> 
>  	mutex_init(&tp->mutex);
> 
> @@ -516,6 +523,10 @@ static int vsc_tp_probe(struct spi_device *spi)
> 
>  	return 0;
> 
> +err_destroy_workqueue:
> +	destroy_workqueue(tp->event_workqueue);
> +	kfree(tp->event_workqueue);
> +
>  err_destroy_lock:
>  	mutex_destroy(&tp->mutex);
> 
> @@ -528,6 +539,8 @@ static void vsc_tp_remove(struct spi_device *spi)
> 
>  	platform_device_unregister(tp->pdev);
> 
> +	destroy_workqueue(tp->event_workqueue);
> +	kfree(tp->event_workqueue);
>  	mutex_destroy(&tp->mutex);
>  }
> 
> --
> 2.39.2
  
Sakari Ailus Feb. 19, 2024, 6:18 p.m. UTC | #2
Hi Wentong,

On Sun, Feb 18, 2024 at 01:23:30AM +0000, Wu, Wentong wrote:
> Hi Sakari,
> 
> Thanks, and sorry for the late response because I'm in vacation this week.

No worries and thanks for the review.

> 
> > From: Sakari Ailus <sakari.ailus@linux.intel.com>
> > 
> > The event handler, in this case that of mei_vsc_event_cb() of platform-vsc.c,
> > is called from a threaded interrupt handler in uninterruptible context.
> 
> But why this thread is uninterruptible?
> 
> https://github.com/torvalds/linux/blob/master/kernel/irq/manage.c#L1294
> 
> https://lwn.net/Articles/302043/

I guess I sent this too hastily. You can indeed sleep there.

Moving wake_up() to the threaded handler should thus be enough. I'll send
v2.
  

Patch

diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 6f4a4be6ccb5..264ece72f0bf 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -72,6 +72,8 @@  struct vsc_tp {
 	atomic_t assert_cnt;
 	wait_queue_head_t xfer_wait;
 
+	struct workqueue_struct *event_workqueue;
+	struct work_struct event_work;
 	vsc_tp_event_cb_t event_notify;
 	void *event_notify_context;
 
@@ -416,19 +418,19 @@  static irqreturn_t vsc_tp_isr(int irq, void *data)
 
 	atomic_inc(&tp->assert_cnt);
 
-	wake_up(&tp->xfer_wait);
+	queue_work(tp->event_workqueue, &tp->event_work);
 
-	return IRQ_WAKE_THREAD;
+	return IRQ_HANDLED;
 }
 
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+static void vsc_tp_event_work(struct work_struct *work)
 {
-	struct vsc_tp *tp = data;
+	struct vsc_tp *tp = container_of(work, struct vsc_tp, event_work);;
+
+	wake_up(&tp->xfer_wait);
 
 	if (tp->event_notify)
 		tp->event_notify(tp->event_notify_context);
-
-	return IRQ_HANDLED;
 }
 
 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
@@ -481,13 +483,18 @@  static int vsc_tp_probe(struct spi_device *spi)
 	init_waitqueue_head(&tp->xfer_wait);
 	tp->spi = spi;
 
+	tp->event_workqueue = create_singlethread_workqueue(dev_name(dev));
+	if (!tp->event_workqueue)
+		return -ENOMEM;
+
+	INIT_WORK(&tp->event_work, vsc_tp_event_work);
+
 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
-	ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
-					vsc_tp_thread_isr,
-					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-					dev_name(dev), tp);
+	ret = devm_request_irq(dev, spi->irq, vsc_tp_isr,
+			       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			       dev_name(dev), tp);
 	if (ret)
-		return ret;
+		goto err_destroy_workqueue;
 
 	mutex_init(&tp->mutex);
 
@@ -516,6 +523,10 @@  static int vsc_tp_probe(struct spi_device *spi)
 
 	return 0;
 
+err_destroy_workqueue:
+	destroy_workqueue(tp->event_workqueue);
+	kfree(tp->event_workqueue);
+
 err_destroy_lock:
 	mutex_destroy(&tp->mutex);
 
@@ -528,6 +539,8 @@  static void vsc_tp_remove(struct spi_device *spi)
 
 	platform_device_unregister(tp->pdev);
 
+	destroy_workqueue(tp->event_workqueue);
+	kfree(tp->event_workqueue);
 	mutex_destroy(&tp->mutex);
 }