[v2,2/7] sched: Prepare sched_asym_prefer() to handle idle state of SMT siblings

Message ID 20221122203532.15013-3-ricardo.neri-calderon@linux.intel.com
State New
Headers
Series x86/sched: Avoid unnecessary migrations within SMT domains |

Commit Message

Ricardo Neri Nov. 22, 2022, 8:35 p.m. UTC
  The throughput of an SMT sibling decreases if one or more of its siblings
are also busy. Idle, lower-priority cores can help. Thus, it is necessary
to consider the idle state of the SMT siblings of CPUs when selecting by
priority.

In some cases, sched_asym_prefer() does not care about the idle state
(when building sched domains or looking at the priority of the preferred
CPU in a sched group).

Add a new parameter to check the state of the SMT siblings of a CPU when
applicable.

While here, remove a spurious newline.

Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim C. Chen <tim.c.chen@intel.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
---
Changes since v1:
 * Introduced this patch
---
 kernel/sched/fair.c     | 17 ++++++++++-------
 kernel/sched/sched.h    |  8 ++++++--
 kernel/sched/topology.c |  6 +++++-
 3 files changed, 21 insertions(+), 10 deletions(-)
  

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18c672ff39ef..d18947a9c03e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8921,7 +8921,7 @@  static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
 		 * can help if it has higher priority and is idle (i.e.,
 		 * it has no running tasks).
 		 */
-		return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
+		return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu, false);
 	}
 
 	/*
@@ -8934,7 +8934,7 @@  static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
 	 * exactly one busy CPU. This covers SMT and non-SMT sched groups.
 	 */
 	if (sg_busy_cpus == 1 && !sds->local_stat.sum_nr_running)
-		return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
+		return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu, false);
 
 	return false;
 #else
@@ -8952,7 +8952,8 @@  sched_asym(struct lb_env *env, struct sd_lb_stats *sds,  struct sg_lb_stats *sgs
 	    (group->flags & SD_SHARE_CPUCAPACITY))
 		return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group);
 
-	return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
+	/* Neither env::dst_cpu nor group::asym_prefer_cpu have SMT siblings. */
+	return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu, false);
 }
 
 static inline bool
@@ -9118,7 +9119,9 @@  static bool update_sd_pick_busiest(struct lb_env *env,
 
 	case group_asym_packing:
 		/* Prefer to move from lowest priority CPU's work */
-		if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
+		if (sched_asym_prefer(sg->asym_prefer_cpu,
+				      sds->busiest->asym_prefer_cpu,
+				      false))
 			return false;
 		break;
 
@@ -10060,7 +10063,7 @@  static struct rq *find_busiest_queue(struct lb_env *env,
 
 		/* Make sure we only pull tasks from a CPU of lower priority */
 		if ((env->sd->flags & SD_ASYM_PACKING) &&
-		    sched_asym_prefer(i, env->dst_cpu) &&
+		    sched_asym_prefer(i, env->dst_cpu, true) &&
 		    nr_running == 1)
 			continue;
 
@@ -10153,7 +10156,7 @@  asym_active_balance(struct lb_env *env)
 	 * highest priority CPUs.
 	 */
 	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
-	       sched_asym_prefer(env->dst_cpu, env->src_cpu);
+	       sched_asym_prefer(env->dst_cpu, env->src_cpu, true);
 }
 
 static inline bool
@@ -10889,7 +10892,7 @@  static void nohz_balancer_kick(struct rq *rq)
 		 * around.
 		 */
 		for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
-			if (sched_asym_prefer(i, cpu)) {
+			if (sched_asym_prefer(i, cpu, true)) {
 				flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
 				goto unlock;
 			}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a4a20046e586..0fc7c0130755 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -795,8 +795,12 @@  static inline long se_weight(struct sched_entity *se)
 	return scale_load_down(se->load.weight);
 }
 
-
-static inline bool sched_asym_prefer(int a, int b)
+/*
+ * Used to compare specific CPUs. Also when comparing the preferred CPU of a
+ * sched group or building the sched domains; in such cases checking the state
+ * of SMT siblings, if any, is not needed.
+ */
+static inline bool sched_asym_prefer(int a, int b, bool check_smt)
 {
 	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
 }
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8739c2a5a54e..8154ef590b9f 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1282,7 +1282,11 @@  static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 		for_each_cpu(cpu, sched_group_span(sg)) {
 			if (max_cpu < 0)
 				max_cpu = cpu;
-			else if (sched_asym_prefer(cpu, max_cpu))
+			/*
+			 * We want the CPU priorities unaffected by the idle
+			 * state of its SMT siblings, if any.
+			 */
+			else if (sched_asym_prefer(cpu, max_cpu, false))
 				max_cpu = cpu;
 		}
 		sg->asym_prefer_cpu = max_cpu;