[v4,3/8] sched: Tighten unpinned rq lock window in newidle_balance()
Commit Message
In newidle_balance(), we may drop and reacquire the rq lock in the
load-balance phase of the function. We currently do this before we check
rq->rd->overload or rq->avg_idle, which is unnecessary. Let's tighten
the window where we call rq_unpin_lock().
Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: David Vernet <void@manifault.com>
---
kernel/sched/fair.c | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
@@ -12296,14 +12296,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
if (!cpu_active(this_cpu))
return 0;
- /*
- * This is OK, because current is on_cpu, which avoids it being picked
- * for load-balance and preemption/IRQs are still disabled avoiding
- * further scheduler activity on it and we're being very careful to
- * re-start the picking loop.
- */
- rq_unpin_lock(this_rq, rf);
-
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);
@@ -12318,6 +12310,13 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
}
rcu_read_unlock();
+ /*
+ * This is OK, because current is on_cpu, which avoids it being picked
+ * for load-balance and preemption/IRQs are still disabled avoiding
+ * further scheduler activity on it and we're being very careful to
+ * re-start the picking loop.
+ */
+ rq_unpin_lock(this_rq, rf);
raw_spin_rq_unlock(this_rq);
t0 = sched_clock_cpu(this_cpu);
@@ -12358,6 +12357,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
rcu_read_unlock();
raw_spin_rq_lock(this_rq);
+ rq_repin_lock(this_rq, rf);
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
@@ -12384,8 +12384,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
else
nohz_newidle_balance(this_rq);
- rq_repin_lock(this_rq, rf);
-
return pulled_task;
}