[v3,3/4] sched/fair: packing func sched_use_asym_prio()/sched_asym_prefer()
Commit Message
From: Alex Shi <alexs@kernel.org>
Consolidate the functions sched_use_asym_prio() and sched_asym_prefer()
into one. and rename sched_asym() as sched_group_asym().
This makes the code easier to read. No functional changes.
Signed-off-by: Alex Shi <alexs@kernel.org>
To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
To: Valentin Schneider <vschneid@redhat.com>
To: Vincent Guittot <vincent.guittot@linaro.org>
To: Peter Zijlstra <peterz@infradead.org>
To: Ingo Molnar <mingo@redhat.com>
---
kernel/sched/fair.c | 35 ++++++++++++++++++-----------------
1 file changed, 18 insertions(+), 17 deletions(-)
Comments
Hi Ricardo,
Since your good suggestions took in this and the next patch, do you mind to give Reviewed-by for both of them?
Thanks
Alex
On 2/1/24 7:54 PM, alexs@kernel.org wrote:
> From: Alex Shi <alexs@kernel.org>
>
> Consolidate the functions sched_use_asym_prio() and sched_asym_prefer()
> into one. and rename sched_asym() as sched_group_asym().
> This makes the code easier to read. No functional changes.
>
> Signed-off-by: Alex Shi <alexs@kernel.org>
> To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> To: Valentin Schneider <vschneid@redhat.com>
> To: Vincent Guittot <vincent.guittot@linaro.org>
> To: Peter Zijlstra <peterz@infradead.org>
> To: Ingo Molnar <mingo@redhat.com>
> ---
> kernel/sched/fair.c | 35 ++++++++++++++++++-----------------
> 1 file changed, 18 insertions(+), 17 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8d70417f5125..44fd5e2ca642 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9747,8 +9747,15 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
> }
>
> +static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
> +{
> + /* Check if asym balance applicable, then check priorities.*/
> + return sched_use_asym_prio(sd, dst_cpu) &&
> + sched_asym_prefer(dst_cpu, src_cpu);
> +}
> +
> /**
> - * sched_asym - Check if the destination CPU can do asym_packing load balance
> + * sched_group_asym - Check if the destination CPU can do asym_packing balance
> * @env: The load balancing environment
> * @sgs: Load-balancing statistics of the candidate busiest group
> * @group: The candidate busiest group
> @@ -9768,22 +9775,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> * otherwise.
> */
> static inline bool
> -sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> +sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> {
> - /* Ensure that the whole local core is idle, if applicable. */
> - if (!sched_use_asym_prio(env->sd, env->dst_cpu))
> - return false;
> -
> /*
> - * CPU priorities does not make sense for SMT cores with more than one
> + * CPU priorities do not make sense for SMT cores with more than one
> * busy sibling.
> */
> - if (group->flags & SD_SHARE_CPUCAPACITY) {
> - if (sgs->group_weight - sgs->idle_cpus != 1)
> - return false;
> - }
>
> - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
> + if ((group->flags & SD_SHARE_CPUCAPACITY) &&
> + (sgs->group_weight - sgs->idle_cpus != 1))
> + return false;
> +
> + return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
> }
>
> /* One group has more than one SMT CPU while the other group does not */
> @@ -9939,7 +9942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> /* Check if dst CPU is idle and preferred to this group */
> if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> - sched_asym(env, sgs, group)) {
> + sched_group_asym(env, sgs, group)) {
> sgs->group_asym_packing = 1;
> }
>
> @@ -11038,8 +11041,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> * SMT cores with more than one busy sibling.
> */
> if ((env->sd->flags & SD_ASYM_PACKING) &&
> - sched_use_asym_prio(env->sd, i) &&
> - sched_asym_prefer(i, env->dst_cpu) &&
> + sched_asym(env->sd, i, env->dst_cpu) &&
> nr_running == 1)
> continue;
>
> @@ -11909,8 +11911,7 @@ static void nohz_balancer_kick(struct rq *rq)
> * preferred CPU must be idle.
> */
> for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
> - if (sched_use_asym_prio(sd, i) &&
> - sched_asym_prefer(i, cpu)) {
> + if (sched_asym(sd, i, cpu)) {
> flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
> goto unlock;
> }
On Thu, Feb 01, 2024 at 07:54:46PM +0800, alexs@kernel.org wrote:
> From: Alex Shi <alexs@kernel.org>
subject:
sched/fair: packing func sched_use_asym_prio()/sched_asym_prefer()
Do not use gerund mood in the subject. Better to say:
sched/fair: Rework sched_use_asym_prio() and sched_asym_prefer()
>
> Consolidate the functions sched_use_asym_prio() and sched_asym_prefer()
> into one. and rename sched_asym() as sched_group_asym().
> This makes the code easier to read. No functional changes.
Maybe giving more reasons?
sched_use_asym_prio() sched_asym_prefer() are used together in various
places. Consolidate them into a single function sched_asym().
The existing sched_group_asym() is only used when collecting statistics
of a scheduling group. Rename it as sched_group_asym().
>
> Signed-off-by: Alex Shi <alexs@kernel.org>
> To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> To: Valentin Schneider <vschneid@redhat.com>
> To: Vincent Guittot <vincent.guittot@linaro.org>
> To: Peter Zijlstra <peterz@infradead.org>
> To: Ingo Molnar <mingo@redhat.com>
> ---
> kernel/sched/fair.c | 35 ++++++++++++++++++-----------------
> 1 file changed, 18 insertions(+), 17 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8d70417f5125..44fd5e2ca642 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9747,8 +9747,15 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
> }
>
> +static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
> +{
> + /* Check if asym balance applicable, then check priorities.*/
Perhaps the comment can be made more descriptive:
/*
* First check if @dst_cpu can do asym_packing load balance. Only do it
* if it has higher priority than @src_cpu.
*/
> + return sched_use_asym_prio(sd, dst_cpu) &&
> + sched_asym_prefer(dst_cpu, src_cpu);
> +}
> +
> /**
> - * sched_asym - Check if the destination CPU can do asym_packing load balance
> + * sched_group_asym - Check if the destination CPU can do asym_packing balance
> * @env: The load balancing environment
> * @sgs: Load-balancing statistics of the candidate busiest group
> * @group: The candidate busiest group
> @@ -9768,22 +9775,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> * otherwise.
> */
> static inline bool
> -sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> +sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> {
> - /* Ensure that the whole local core is idle, if applicable. */
> - if (!sched_use_asym_prio(env->sd, env->dst_cpu))
> - return false;
> -
> /*
> - * CPU priorities does not make sense for SMT cores with more than one
> + * CPU priorities do not make sense for SMT cores with more than one
> * busy sibling.
> */
> - if (group->flags & SD_SHARE_CPUCAPACITY) {
> - if (sgs->group_weight - sgs->idle_cpus != 1)
> - return false;
> - }
>
> - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
After applying this patch there is a blank line between the comment and the
return statement. Can you remove it?
> + if ((group->flags & SD_SHARE_CPUCAPACITY) &&
> + (sgs->group_weight - sgs->idle_cpus != 1))
> + return false;
> +
> + return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
> }
>
> /* One group has more than one SMT CPU while the other group does not */
> @@ -9939,7 +9942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> /* Check if dst CPU is idle and preferred to this group */
> if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> - sched_asym(env, sgs, group)) {
> + sched_group_asym(env, sgs, group)) {
> sgs->group_asym_packing = 1;
> }
>
> @@ -11038,8 +11041,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> * SMT cores with more than one busy sibling.
> */
> if ((env->sd->flags & SD_ASYM_PACKING) &&
> - sched_use_asym_prio(env->sd, i) &&
> - sched_asym_prefer(i, env->dst_cpu) &&
> + sched_asym(env->sd, i, env->dst_cpu) &&
> nr_running == 1)
> continue;
>
> @@ -11909,8 +11911,7 @@ static void nohz_balancer_kick(struct rq *rq)
> * preferred CPU must be idle.
> */
> for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
> - if (sched_use_asym_prio(sd, i) &&
> - sched_asym_prefer(i, cpu)) {
> + if (sched_asym(sd, i, cpu)) {
> flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
> goto unlock;
> }
> --
> 2.43.0
>
@@ -9747,8 +9747,15 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
}
+static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
+{
+ /* Check if asym balance applicable, then check priorities.*/
+ return sched_use_asym_prio(sd, dst_cpu) &&
+ sched_asym_prefer(dst_cpu, src_cpu);
+}
+
/**
- * sched_asym - Check if the destination CPU can do asym_packing load balance
+ * sched_group_asym - Check if the destination CPU can do asym_packing balance
* @env: The load balancing environment
* @sgs: Load-balancing statistics of the candidate busiest group
* @group: The candidate busiest group
@@ -9768,22 +9775,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
* otherwise.
*/
static inline bool
-sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
+sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
{
- /* Ensure that the whole local core is idle, if applicable. */
- if (!sched_use_asym_prio(env->sd, env->dst_cpu))
- return false;
-
/*
- * CPU priorities does not make sense for SMT cores with more than one
+ * CPU priorities do not make sense for SMT cores with more than one
* busy sibling.
*/
- if (group->flags & SD_SHARE_CPUCAPACITY) {
- if (sgs->group_weight - sgs->idle_cpus != 1)
- return false;
- }
- return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
+ if ((group->flags & SD_SHARE_CPUCAPACITY) &&
+ (sgs->group_weight - sgs->idle_cpus != 1))
+ return false;
+
+ return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
}
/* One group has more than one SMT CPU while the other group does not */
@@ -9939,7 +9942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
- sched_asym(env, sgs, group)) {
+ sched_group_asym(env, sgs, group)) {
sgs->group_asym_packing = 1;
}
@@ -11038,8 +11041,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* SMT cores with more than one busy sibling.
*/
if ((env->sd->flags & SD_ASYM_PACKING) &&
- sched_use_asym_prio(env->sd, i) &&
- sched_asym_prefer(i, env->dst_cpu) &&
+ sched_asym(env->sd, i, env->dst_cpu) &&
nr_running == 1)
continue;
@@ -11909,8 +11911,7 @@ static void nohz_balancer_kick(struct rq *rq)
* preferred CPU must be idle.
*/
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
- if (sched_use_asym_prio(sd, i) &&
- sched_asym_prefer(i, cpu)) {
+ if (sched_asym(sd, i, cpu)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}