[15/17] workqueue: Update how start_flush_work() is called
Commit Message
In prepartion of in-BH canceling of BH work items, update start_flush_work()
so that:
- rcu_read_lock()'ing is moved to the caller.
- Instead of true or false, it now returns the worker_pool associated with
the work item if the work item needs to be waited for. NULL if waiting is
not needed.
- Add a WARN if it encounters a queued work item when @from_cancel. This
shouldn't happen.
No behavior changes are intended.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/workqueue.c | 39 ++++++++++++++++++++++-----------------
1 file changed, 22 insertions(+), 17 deletions(-)
@@ -3995,8 +3995,9 @@ void drain_workqueue(struct workqueue_struct *wq)
}
EXPORT_SYMBOL_GPL(drain_workqueue);
-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
- bool from_cancel)
+static struct worker_pool *start_flush_work(struct work_struct *work,
+ struct wq_barrier *barr,
+ bool from_cancel)
{
struct worker *worker = NULL;
struct worker_pool *pool;
@@ -4005,12 +4006,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
might_sleep();
- rcu_read_lock();
pool = get_work_pool(work);
- if (!pool) {
- rcu_read_unlock();
- return false;
- }
+ if (!pool)
+ return NULL;
raw_spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
@@ -4018,6 +4016,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
if (pwq) {
if (unlikely(pwq->pool != pool))
goto already_gone;
+ /*
+ * Cancel path should already have removed @work from worklist
+ * in try_to_grab_pending(). Control should get here iff we need
+ * to wait for the current execution to finish.
+ */
+ WARN_ON_ONCE(from_cancel);
} else {
worker = find_worker_executing_work(pool, work);
if (!worker)
@@ -4045,17 +4049,16 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
touch_wq_lockdep_map(wq);
- rcu_read_unlock();
- return true;
+ return pool;
already_gone:
raw_spin_unlock_irq(&pool->lock);
- rcu_read_unlock();
- return false;
+ return NULL;
}
static bool __flush_work(struct work_struct *work, bool from_cancel)
{
struct wq_barrier barr;
+ struct worker_pool *pool;
if (WARN_ON(!wq_online))
return false;
@@ -4063,13 +4066,15 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!work->func))
return false;
- if (start_flush_work(work, &barr, from_cancel)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
+ rcu_read_lock();
+ pool = start_flush_work(work, &barr, from_cancel);
+ rcu_read_unlock();
+ if (!pool)
return false;
- }
+
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
}
/**