From: Peter Zijlstra <peterz@infradead.org>
Add link to the task this task is proxying for, and use it so we
do intelligent hand-off of the owned mutex to the task we're
running on behalf.
Cc: Joel Fernandes <joelaf@google.com>
Cc: Qais Yousef <qyousef@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Zimuzo Ezeozue <zezeozue@google.com>
Cc: Youssef Esmat <youssefesmat@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Metin Kaya <Metin.Kaya@arm.com>
Cc: Xuewen Yan <xuewen.yan94@gmail.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel-team@android.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Connor O'Brien <connoro@google.com>
[jstultz: This patch was split out from larger proxy patch]
Signed-off-by: John Stultz <jstultz@google.com>
---
v5:
* Split out from larger proxy patch
v6:
* Moved proxied value from earlier patch to this one where it
is actually used
* Rework logic to check sched_proxy_exec() instead of using ifdefs
* Moved comment change to this patch where it makes sense
v7:
* Use more descriptive term then "us" in comments, as suggested
by Metin Kaya.
* Minor typo fixup from Metin Kaya
* Reworked proxied variable to prev_not_proxied to simplify usage
---
include/linux/sched.h | 1 +
kernel/fork.c | 1 +
kernel/locking/mutex.c | 35 ++++++++++++++++++++++++++++++++---
kernel/sched/core.c | 19 +++++++++++++++++--
4 files changed, 51 insertions(+), 5 deletions(-)
@@ -1157,6 +1157,7 @@ struct task_struct {
enum blocked_on_state blocked_on_state;
struct mutex *blocked_on; /* lock we're blocked on */
+ struct task_struct *blocked_donor; /* task that is boosting this task */
raw_spinlock_t blocked_lock;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -2459,6 +2459,7 @@ __latent_entropy struct task_struct *copy_process(
p->blocked_on_state = BO_RUNNABLE;
p->blocked_on = NULL; /* not blocked yet */
+ p->blocked_donor = NULL; /* nobody is boosting p yet */
#ifdef CONFIG_BCACHE
p->sequential_io = 0;
p->sequential_io_avg = 0;
@@ -910,7 +910,7 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
*/
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
- struct task_struct *next = NULL;
+ struct task_struct *donor, *next = NULL;
DEFINE_WAKE_Q(wake_q);
unsigned long owner;
unsigned long flags;
@@ -948,7 +948,34 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
preempt_disable();
raw_spin_lock_irqsave(&lock->wait_lock, flags);
debug_mutex_unlock(lock);
- if (!list_empty(&lock->wait_list)) {
+
+ if (sched_proxy_exec()) {
+ raw_spin_lock(¤t->blocked_lock);
+ /*
+ * If we have a task boosting current, and that task was boosting
+ * current through this lock, hand the lock to that task, as that
+ * is the highest waiter, as selected by the scheduling function.
+ */
+ donor = current->blocked_donor;
+ if (donor) {
+ struct mutex *next_lock;
+
+ raw_spin_lock_nested(&donor->blocked_lock, SINGLE_DEPTH_NESTING);
+ next_lock = get_task_blocked_on(donor);
+ if (next_lock == lock) {
+ next = donor;
+ donor->blocked_on_state = BO_WAKING;
+ wake_q_add(&wake_q, donor);
+ current->blocked_donor = NULL;
+ }
+ raw_spin_unlock(&donor->blocked_lock);
+ }
+ }
+
+ /*
+ * Failing that, pick any on the wait list.
+ */
+ if (!next && !list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
struct mutex_waiter *waiter =
list_first_entry(&lock->wait_list,
@@ -956,7 +983,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
next = waiter->task;
- raw_spin_lock(&next->blocked_lock);
+ raw_spin_lock_nested(&next->blocked_lock, SINGLE_DEPTH_NESTING);
debug_mutex_wake_waiter(lock, waiter);
WARN_ON(next->blocked_on != lock);
next->blocked_on_state = BO_WAKING;
@@ -967,6 +994,8 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
if (owner & MUTEX_FLAG_HANDOFF)
__mutex_handoff(lock, next);
+ if (sched_proxy_exec())
+ raw_spin_unlock(¤t->blocked_lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
wake_up_q(&wake_q);
preempt_enable();
@@ -6765,7 +6765,17 @@ proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
* Find who @next (currently blocked on a mutex) can proxy for.
*
* Follow the blocked-on relation:
- * task->blocked_on -> mutex->owner -> task...
+ *
+ * ,-> task
+ * | | blocked-on
+ * | v
+ * blocked_donor | mutex
+ * | | owner
+ * | v
+ * `-- task
+ *
+ * and set the blocked_donor relation, this latter is used by the mutex
+ * code to find which (blocked) task to hand-off to.
*
* Lock order:
*
@@ -6897,6 +6907,8 @@ find_proxy_task(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
*/
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
+
+ owner->blocked_donor = p;
}
WARN_ON_ONCE(owner && !owner->on_rq);
@@ -6979,6 +6991,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
unsigned long prev_state;
struct rq_flags rf;
struct rq *rq;
+ bool prev_not_proxied;
int cpu;
bool preserve_need_resched = false;
@@ -7030,9 +7043,11 @@ static void __sched notrace __schedule(unsigned int sched_mode)
switch_count = &prev->nvcsw;
}
+ prev_not_proxied = !prev->blocked_donor;
pick_again:
next = pick_next_task(rq, rq_selected(rq), &rf);
rq_set_selected(rq, next);
+ next->blocked_donor = NULL;
if (unlikely(task_is_blocked(next))) {
next = find_proxy_task(rq, next, &rf);
if (!next) {
@@ -7088,7 +7103,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
rq = context_switch(rq, prev, next, &rf);
} else {
/* In case next was already curr but just got blocked_donor*/
- if (!task_current_selected(rq, next))
+ if (prev_not_proxied && next->blocked_donor)
proxy_tag_curr(rq, next);
rq_unpin_lock(rq, &rf);