[v4,4/4] sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio()

Message ID 20240207034704.935774-4-alexs@kernel.org
State New
Headers
Series [v4,1/4] sched/topology: Remove duplicate descriptions from TOPOLOGY_SD_FLAGS |

Commit Message

alexs@kernel.org Feb. 7, 2024, 3:47 a.m. UTC
  From: Alex Shi <alexs@kernel.org>

sched_use_asym_prio() checks whether CPU priorities should be used. It
makes sense to check for the SD_ASYM_PACKING() inside the function.
Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
remove the now superfluous checks for the flag in various places.

Signed-off-by: Alex Shi <alexs@kernel.org>
To: linux-kernel@vger.kernel.org
To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
To: Ben Segall <bsegall@google.com>
To: Steven Rostedt <rostedt@goodmis.org>
To: Dietmar Eggemann <dietmar.eggemann@arm.com>
To: Valentin Schneider <vschneid@redhat.com>
To: Daniel Bristot de Oliveira <bristot@redhat.com>
To: Vincent Guittot <vincent.guittot@linaro.org>
To: Juri Lelli <juri.lelli@redhat.com>
To: Peter Zijlstra <peterz@infradead.org>
To: Ingo Molnar <mingo@redhat.com>
---
 kernel/sched/fair.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)
  

Comments

kuiliang Shi Feb. 9, 2024, 11:12 a.m. UTC | #1
Hi Valentin&Ricardo,

Any more comment for this patch? or Reviewed-by from you as a Chinese new year gift for this patch? :)

Thanks
Alex

On 2/7/24 11:47 AM, alexs@kernel.org wrote:
> From: Alex Shi <alexs@kernel.org>
> 
> sched_use_asym_prio() checks whether CPU priorities should be used. It
> makes sense to check for the SD_ASYM_PACKING() inside the function.
> Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
> remove the now superfluous checks for the flag in various places.
> 
> Signed-off-by: Alex Shi <alexs@kernel.org>
> To: linux-kernel@vger.kernel.org
> To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> To: Ben Segall <bsegall@google.com>
> To: Steven Rostedt <rostedt@goodmis.org>
> To: Dietmar Eggemann <dietmar.eggemann@arm.com>
> To: Valentin Schneider <vschneid@redhat.com>
> To: Daniel Bristot de Oliveira <bristot@redhat.com>
> To: Vincent Guittot <vincent.guittot@linaro.org>
> To: Juri Lelli <juri.lelli@redhat.com>
> To: Peter Zijlstra <peterz@infradead.org>
> To: Ingo Molnar <mingo@redhat.com>
> ---
>  kernel/sched/fair.c | 16 +++++++---------
>  1 file changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 942b6358f683..10ae28e1c088 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9740,6 +9740,9 @@ group_type group_classify(unsigned int imbalance_pct,
>   */
>  static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
>  {
> +	if (!(sd->flags & SD_ASYM_PACKING))
> +		return false;
> +
>  	if (!sched_smt_active())
>  		return true;
>  
> @@ -9941,11 +9944,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>  	sgs->group_weight = group->group_weight;
>  
>  	/* Check if dst CPU is idle and preferred to this group */
> -	if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> -	    env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> -	    sched_group_asym(env, sgs, group)) {
> +	if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> +	    sched_group_asym(env, sgs, group))
>  		sgs->group_asym_packing = 1;
> -	}
>  
>  	/* Check for loaded SMT group to be balanced to dst CPU */
>  	if (!local_group && smt_balance(env, sgs, group))
> @@ -11041,9 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>  		 * If balancing between cores, let lower priority CPUs help
>  		 * SMT cores with more than one busy sibling.
>  		 */
> -		if ((env->sd->flags & SD_ASYM_PACKING) &&
> -		    sched_asym(env->sd, i, env->dst_cpu) &&
> -		    nr_running == 1)
> +		if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
>  			continue;
>  
>  		switch (env->migration_type) {
> @@ -11139,8 +11138,7 @@ asym_active_balance(struct lb_env *env)
>  	 * the lower priority @env::dst_cpu help it. Do not follow
>  	 * CPU priority.
>  	 */
> -	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
> -	       sched_use_asym_prio(env->sd, env->dst_cpu) &&
> +	return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
>  	       (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
>  		!sched_use_asym_prio(env->sd, env->src_cpu));
>  }
  
Ricardo Neri Feb. 9, 2024, 1:28 p.m. UTC | #2
On Fri, Feb 09, 2024 at 07:12:10PM +0800, kuiliang Shi wrote:
> Hi Valentin&Ricardo,
> 
> Any more comment for this patch? or Reviewed-by from you as a Chinese new year gift for this patch? :)

I will give you a Tested-by tag ;). I have started testing your patches but
I am not done yet.

> 
> Thanks
> Alex
> 
> On 2/7/24 11:47 AM, alexs@kernel.org wrote:
> > From: Alex Shi <alexs@kernel.org>
> > 
> > sched_use_asym_prio() checks whether CPU priorities should be used. It
> > makes sense to check for the SD_ASYM_PACKING() inside the function.
> > Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
> > remove the now superfluous checks for the flag in various places.
> > 
> > Signed-off-by: Alex Shi <alexs@kernel.org>
> > To: linux-kernel@vger.kernel.org
> > To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> > To: Ben Segall <bsegall@google.com>
> > To: Steven Rostedt <rostedt@goodmis.org>
> > To: Dietmar Eggemann <dietmar.eggemann@arm.com>
> > To: Valentin Schneider <vschneid@redhat.com>
> > To: Daniel Bristot de Oliveira <bristot@redhat.com>
> > To: Vincent Guittot <vincent.guittot@linaro.org>
> > To: Juri Lelli <juri.lelli@redhat.com>
> > To: Peter Zijlstra <peterz@infradead.org>
> > To: Ingo Molnar <mingo@redhat.com>
> > ---
> >  kernel/sched/fair.c | 16 +++++++---------
> >  1 file changed, 7 insertions(+), 9 deletions(-)
> > 
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 942b6358f683..10ae28e1c088 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9740,6 +9740,9 @@ group_type group_classify(unsigned int imbalance_pct,
> >   */
> >  static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> >  {
> > +	if (!(sd->flags & SD_ASYM_PACKING))
> > +		return false;
> > +
> >  	if (!sched_smt_active())
> >  		return true;
> >  
> > @@ -9941,11 +9944,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> >  	sgs->group_weight = group->group_weight;
> >  
> >  	/* Check if dst CPU is idle and preferred to this group */
> > -	if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> > -	    env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > -	    sched_group_asym(env, sgs, group)) {
> > +	if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > +	    sched_group_asym(env, sgs, group))
> >  		sgs->group_asym_packing = 1;
> > -	}
> >  
> >  	/* Check for loaded SMT group to be balanced to dst CPU */
> >  	if (!local_group && smt_balance(env, sgs, group))
> > @@ -11041,9 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> >  		 * If balancing between cores, let lower priority CPUs help
> >  		 * SMT cores with more than one busy sibling.
> >  		 */
> > -		if ((env->sd->flags & SD_ASYM_PACKING) &&
> > -		    sched_asym(env->sd, i, env->dst_cpu) &&
> > -		    nr_running == 1)
> > +		if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
> >  			continue;
> >  
> >  		switch (env->migration_type) {
> > @@ -11139,8 +11138,7 @@ asym_active_balance(struct lb_env *env)
> >  	 * the lower priority @env::dst_cpu help it. Do not follow
> >  	 * CPU priority.
> >  	 */
> > -	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
> > -	       sched_use_asym_prio(env->sd, env->dst_cpu) &&
> > +	return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
> >  	       (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
> >  		!sched_use_asym_prio(env->sd, env->src_cpu));
> >  }
  
Ricardo Neri Feb. 10, 2024, 1:12 a.m. UTC | #3
On Wed, Feb 07, 2024 at 11:47:04AM +0800, alexs@kernel.org wrote:
> From: Alex Shi <alexs@kernel.org>
> 
> sched_use_asym_prio() checks whether CPU priorities should be used. It
> makes sense to check for the SD_ASYM_PACKING() inside the function.
> Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
> remove the now superfluous checks for the flag in various places.

Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>

Tested on Alder Lake and Meteor Lake, which use asym_packing.

Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>

> 
> Signed-off-by: Alex Shi <alexs@kernel.org>
> To: linux-kernel@vger.kernel.org
> To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> To: Ben Segall <bsegall@google.com>
> To: Steven Rostedt <rostedt@goodmis.org>
> To: Dietmar Eggemann <dietmar.eggemann@arm.com>
> To: Valentin Schneider <vschneid@redhat.com>
> To: Daniel Bristot de Oliveira <bristot@redhat.com>
> To: Vincent Guittot <vincent.guittot@linaro.org>
> To: Juri Lelli <juri.lelli@redhat.com>
> To: Peter Zijlstra <peterz@infradead.org>
> To: Ingo Molnar <mingo@redhat.com>
> ---
>  kernel/sched/fair.c | 16 +++++++---------
>  1 file changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 942b6358f683..10ae28e1c088 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9740,6 +9740,9 @@ group_type group_classify(unsigned int imbalance_pct,
>   */
>  static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
>  {
> +	if (!(sd->flags & SD_ASYM_PACKING))
> +		return false;
> +
>  	if (!sched_smt_active())
>  		return true;
>  
> @@ -9941,11 +9944,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>  	sgs->group_weight = group->group_weight;
>  
>  	/* Check if dst CPU is idle and preferred to this group */
> -	if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> -	    env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> -	    sched_group_asym(env, sgs, group)) {
> +	if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> +	    sched_group_asym(env, sgs, group))
>  		sgs->group_asym_packing = 1;
> -	}
>  
>  	/* Check for loaded SMT group to be balanced to dst CPU */
>  	if (!local_group && smt_balance(env, sgs, group))
> @@ -11041,9 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>  		 * If balancing between cores, let lower priority CPUs help
>  		 * SMT cores with more than one busy sibling.
>  		 */
> -		if ((env->sd->flags & SD_ASYM_PACKING) &&
> -		    sched_asym(env->sd, i, env->dst_cpu) &&
> -		    nr_running == 1)
> +		if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
>  			continue;
>  
>  		switch (env->migration_type) {
> @@ -11139,8 +11138,7 @@ asym_active_balance(struct lb_env *env)
>  	 * the lower priority @env::dst_cpu help it. Do not follow
>  	 * CPU priority.
>  	 */
> -	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
> -	       sched_use_asym_prio(env->sd, env->dst_cpu) &&
> +	return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
>  	       (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
>  		!sched_use_asym_prio(env->sd, env->src_cpu));
>  }
> -- 
> 2.43.0
>
  
kuiliang Shi Feb. 10, 2024, 11:08 a.m. UTC | #4
Ricardo Neri <ricardo.neri-calderon@linux.intel.com> 于2024年2月10日周六 09:11写道:
>
> On Wed, Feb 07, 2024 at 11:47:04AM +0800, alexs@kernel.org wrote:
> > From: Alex Shi <alexs@kernel.org>
> >
> > sched_use_asym_prio() checks whether CPU priorities should be used. It
> > makes sense to check for the SD_ASYM_PACKING() inside the function.
> > Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
> > remove the now superfluous checks for the flag in various places.
>
> Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
>
> Tested on Alder Lake and Meteor Lake, which use asym_packing.
>
> Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>

It's the best gift for my lunar new year! :)
Next version with your Tested and Reviewed is coming.

Thanks a lot!
Alex
>
> >
> > Signed-off-by: Alex Shi <alexs@kernel.org>
> > To: linux-kernel@vger.kernel.org
> > To: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
> > To: Ben Segall <bsegall@google.com>
> > To: Steven Rostedt <rostedt@goodmis.org>
> > To: Dietmar Eggemann <dietmar.eggemann@arm.com>
> > To: Valentin Schneider <vschneid@redhat.com>
> > To: Daniel Bristot de Oliveira <bristot@redhat.com>
> > To: Vincent Guittot <vincent.guittot@linaro.org>
> > To: Juri Lelli <juri.lelli@redhat.com>
> > To: Peter Zijlstra <peterz@infradead.org>
> > To: Ingo Molnar <mingo@redhat.com>
> > ---
> >  kernel/sched/fair.c | 16 +++++++---------
> >  1 file changed, 7 insertions(+), 9 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 942b6358f683..10ae28e1c088 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9740,6 +9740,9 @@ group_type group_classify(unsigned int imbalance_pct,
> >   */
> >  static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> >  {
> > +     if (!(sd->flags & SD_ASYM_PACKING))
> > +             return false;
> > +
> >       if (!sched_smt_active())
> >               return true;
> >
> > @@ -9941,11 +9944,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> >       sgs->group_weight = group->group_weight;
> >
> >       /* Check if dst CPU is idle and preferred to this group */
> > -     if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> > -         env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > -         sched_group_asym(env, sgs, group)) {
> > +     if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > +         sched_group_asym(env, sgs, group))
> >               sgs->group_asym_packing = 1;
> > -     }
> >
> >       /* Check for loaded SMT group to be balanced to dst CPU */
> >       if (!local_group && smt_balance(env, sgs, group))
> > @@ -11041,9 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> >                * If balancing between cores, let lower priority CPUs help
> >                * SMT cores with more than one busy sibling.
> >                */
> > -             if ((env->sd->flags & SD_ASYM_PACKING) &&
> > -                 sched_asym(env->sd, i, env->dst_cpu) &&
> > -                 nr_running == 1)
> > +             if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
> >                       continue;
> >
> >               switch (env->migration_type) {
> > @@ -11139,8 +11138,7 @@ asym_active_balance(struct lb_env *env)
> >        * the lower priority @env::dst_cpu help it. Do not follow
> >        * CPU priority.
> >        */
> > -     return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
> > -            sched_use_asym_prio(env->sd, env->dst_cpu) &&
> > +     return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
> >              (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
> >               !sched_use_asym_prio(env->sd, env->src_cpu));
> >  }
> > --
> > 2.43.0
> >
  

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 942b6358f683..10ae28e1c088 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9740,6 +9740,9 @@  group_type group_classify(unsigned int imbalance_pct,
  */
 static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
 {
+	if (!(sd->flags & SD_ASYM_PACKING))
+		return false;
+
 	if (!sched_smt_active())
 		return true;
 
@@ -9941,11 +9944,9 @@  static inline void update_sg_lb_stats(struct lb_env *env,
 	sgs->group_weight = group->group_weight;
 
 	/* Check if dst CPU is idle and preferred to this group */
-	if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
-	    env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
-	    sched_group_asym(env, sgs, group)) {
+	if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
+	    sched_group_asym(env, sgs, group))
 		sgs->group_asym_packing = 1;
-	}
 
 	/* Check for loaded SMT group to be balanced to dst CPU */
 	if (!local_group && smt_balance(env, sgs, group))
@@ -11041,9 +11042,7 @@  static struct rq *find_busiest_queue(struct lb_env *env,
 		 * If balancing between cores, let lower priority CPUs help
 		 * SMT cores with more than one busy sibling.
 		 */
-		if ((env->sd->flags & SD_ASYM_PACKING) &&
-		    sched_asym(env->sd, i, env->dst_cpu) &&
-		    nr_running == 1)
+		if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
 			continue;
 
 		switch (env->migration_type) {
@@ -11139,8 +11138,7 @@  asym_active_balance(struct lb_env *env)
 	 * the lower priority @env::dst_cpu help it. Do not follow
 	 * CPU priority.
 	 */
-	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
-	       sched_use_asym_prio(env->sd, env->dst_cpu) &&
+	return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
 	       (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
 		!sched_use_asym_prio(env->sd, env->src_cpu));
 }