[21/30] sched: schedule eagerly in resched_cpu()

Message ID 20240213055554.1802415-22-ankur.a.arora@oracle.com
State New
Headers
Series PREEMPT_AUTO: support lazy rescheduling |

Commit Message

Ankur Arora Feb. 13, 2024, 5:55 a.m. UTC
  resched_cpu() is used as an RCU hammer of last resort. Force
rescheduling eagerly with tif_resched(NR_now).

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 kernel/sched/core.c  | 15 +++++++++++----
 kernel/sched/sched.h |  1 +
 2 files changed, 12 insertions(+), 4 deletions(-)
  

Patch

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5e3dd95efb73..de963e8e2bee 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1035,9 +1035,9 @@  void wake_up_q(struct wake_q_head *head)
  * For preemption models other than PREEMPT_AUTO: always schedule
  * eagerly.
  *
- * For PREEMPT_AUTO: allow everything, whether running in user or
- * kernel context, to finish its time quanta, and mark for
- * rescheduling at the next exit to user.
+ * For PREEMPT_AUTO: schedule idle threads eagerly, allow everything
+ * else, whether running in user or kernel context, to finish its time
+ * quanta, and mark for rescheduling at the next exit to user.
  */
 static resched_t resched_opt_translate(struct task_struct *curr,
 				       enum resched_opt opt)
@@ -1045,6 +1045,9 @@  static resched_t resched_opt_translate(struct task_struct *curr,
 	if (!IS_ENABLED(CONFIG_PREEMPT_AUTO))
 		return NR_now;
 
+	if (opt == RESCHED_FORCE)
+		return NR_now;
+
 	if (is_idle_task(curr))
 		return NR_now;
 
@@ -1106,7 +1109,11 @@  void resched_cpu(int cpu)
 
 	raw_spin_rq_lock_irqsave(rq, flags);
 	if (cpu_online(cpu) || cpu == smp_processor_id())
-		resched_curr(rq);
+		/*
+		 * resched_cpu() is typically used as an RCU hammer.
+		 * Mark for imminent resched.
+		 */
+		__resched_curr(rq, RESCHED_FORCE);
 	raw_spin_rq_unlock_irqrestore(rq, flags);
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c3ae70ad23ec..3600a8673d08 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2464,6 +2464,7 @@  extern void reweight_task(struct task_struct *p, int prio);
 
 enum resched_opt {
 	RESCHED_DEFAULT,
+	RESCHED_FORCE,
 };
 
 extern void __resched_curr(struct rq *rq, enum resched_opt opt);