From: Valentin Schneider <valentin.schneider@arm.com>
This lets us assert p->blocked_lock is held whenever we access
p->blocked_on, as well as warn us for unexpected state changes.
Cc: Joel Fernandes <joelaf@google.com>
Cc: Qais Yousef <qyousef@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Zimuzo Ezeozue <zezeozue@google.com>
Cc: Youssef Esmat <youssefesmat@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: kernel-team@android.com
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
[fix conflicts, call in more places]
Signed-off-by: Connor O'Brien <connoro@google.com>
[jstultz: tweaked commit subject, added get_task_blocked_on() as well]
Signed-off-by: John Stultz <jstultz@google.com>
---
v2:
* Added get_task_blocked_on() accessor
v4:
* Address READ_ONCE usage that was dropped in v2
* Reordered to be a later add on to the main patch series as
Peter was unhappy with similar wrappers in other patches.
v5:
* Added some extra correctness checking in wrappers
---
include/linux/sched.h | 22 ++++++++++++++++++++++
kernel/locking/mutex-debug.c | 4 ++--
kernel/locking/mutex.c | 10 +++++-----
kernel/locking/ww_mutex.h | 4 ++--
4 files changed, 31 insertions(+), 9 deletions(-)
@@ -2248,6 +2248,28 @@ static inline int rwlock_needbreak(rwlock_t *lock)
#endif
}
+static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ lockdep_assert_held(&p->blocked_lock);
+
+ /* We should be setting values to NULL or NULL to values */
+ WARN_ON((!m && !p->blocked_on) || (m && p->blocked_on));
+
+ p->blocked_on = m;
+}
+
+static inline struct mutex *get_task_blocked_on(struct task_struct *p)
+{
+ lockdep_assert_held(&p->blocked_lock);
+
+ return p->blocked_on;
+}
+
+static inline struct mutex *get_task_blocked_on_once(struct task_struct *p)
+{
+ return READ_ONCE(p->blocked_on);
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -53,13 +53,13 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
lockdep_assert_held(&lock->wait_lock);
/* Current thread can't be already blocked (since it's executing!) */
- DEBUG_LOCKS_WARN_ON(task->blocked_on);
+ DEBUG_LOCKS_WARN_ON(get_task_blocked_on(task));
}
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
- struct mutex *blocked_on = READ_ONCE(task->blocked_on);
+ struct mutex *blocked_on = get_task_blocked_on_once(task);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);
@@ -623,7 +623,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
goto err_early_kill;
}
- current->blocked_on = lock;
+ set_task_blocked_on(current, lock);
set_current_state(state);
trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
@@ -670,7 +670,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
/*
* Gets reset by unlock path().
*/
- current->blocked_on = lock;
+ set_task_blocked_on(current, lock);
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
@@ -699,7 +699,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
}
acquired:
- current->blocked_on = NULL;
+ set_task_blocked_on(current, NULL);
__set_current_state(TASK_RUNNING);
if (ww_ctx) {
@@ -731,7 +731,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
return 0;
err:
- current->blocked_on = NULL;
+ set_task_blocked_on(current, NULL);
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
@@ -950,7 +950,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
debug_mutex_wake_waiter(lock, waiter);
raw_spin_lock(&next->blocked_lock);
WARN_ON(next->blocked_on != lock);
- next->blocked_on = NULL;
+ set_task_blocked_on(current, NULL);
raw_spin_unlock(&next->blocked_lock);
wake_q_add(&wake_q, next);
}
@@ -292,7 +292,7 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
* blocked_on relationships that can't resolve.
*/
WARN_ON(waiter->task->blocked_on != lock);
- waiter->task->blocked_on = NULL;
+ set_task_blocked_on(waiter->task, NULL);
wake_q_add(wake_q, waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
}
@@ -349,7 +349,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
* blocked_on pointer. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
- owner->blocked_on = NULL;
+ set_task_blocked_on(owner, NULL);
wake_q_add(wake_q, owner);
raw_spin_unlock(&owner->blocked_lock);
}