[4/5] sched/fair: add a func _sched_asym
Commit Message
From: Alex Shi <alexs@kernel.org>
Use this func in sched_asym and other path to simply code.
No function change.
Signed-off-by: Alex Shi <alexs@kernel.org>
To: Valentin Schneider <vschneid@redhat.com>
To: Vincent Guittot <vincent.guittot@linaro.org>
To: Peter Zijlstra <peterz@infradead.org>
To: Ingo Molnar <mingo@redhat.com>
---
kernel/sched/fair.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)
Comments
On Wed, Jan 17, 2024 at 04:57:14PM +0800, alexs@kernel.org wrote:
> From: Alex Shi <alexs@kernel.org>
>
> Use this func in sched_asym and other path to simply code.
> No function change.
>
> Signed-off-by: Alex Shi <alexs@kernel.org>
> To: Valentin Schneider <vschneid@redhat.com>
> To: Vincent Guittot <vincent.guittot@linaro.org>
> To: Peter Zijlstra <peterz@infradead.org>
> To: Ingo Molnar <mingo@redhat.com>
> ---
> kernel/sched/fair.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index ebd659af2d78..96163ab69ae0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9745,6 +9745,14 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> (sd->flags & SD_SHARE_CPUCAPACITY) || is_core_idle(cpu);
> }
>
> +static inline bool _sched_asym(struct sched_domain *sd,
> + int dst_cpu, int repl_cpu)
What does repl_cpu mean? Maybe renaming to src_cpu?
> +{
> + /* Ensure that the whole local core is idle, if applicable. */
> + return sched_use_asym_prio(sd, dst_cpu) &&
> + sched_asym_prefer(dst_cpu, repl_cpu);
The comment no longer applies to the whole expression. Perhaps rewording is
in order. First we check for the whole idle core if applicable (i.e., when
not balancing among SMT siblings). Then we check priorities.
Also, indentation should be aligned with `return`, IMO.
> +}
> +
> /**
> * sched_asym - Check if the destination CPU can do asym_packing load balance
> * @env: The load balancing environment
> @@ -9768,20 +9776,13 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> static inline bool
> sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> {
> - /* Ensure that the whole local core is idle, if applicable. */
> - if (!sched_use_asym_prio(env->sd, env->dst_cpu))
> - return false;
> -
> /*
> * CPU priorities does not make sense for SMT cores with more than one
> * busy sibling.
While here, it might be good to fix a syntax error above: s/does/do/.
> */
> - if (group->flags & SD_SHARE_CPUCAPACITY) {
> - if (sgs->group_weight - sgs->idle_cpus != 1)
> - return false;
> - }
> -
> - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
> + return !(group->flags & SD_SHARE_CPUCAPACITY &&
> + sgs->group_weight - sgs->idle_cpus != 1) &&
> + _sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
Perhaps we can come up with a better name than _sched_asym(). After this
patch the difference between sched_asym() and _sched_asym() is that the
former considers the stats of the source group. Maybe sched_asym() can be
renamed as sched_group_asym(); it is only used in update_sg_lb_stats().
Your _sched_asym() can become sched_asym().
> }
>
> /* One group has more than one SMT CPU while the other group does not */
> @@ -11036,8 +11037,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> * SMT cores with more than one busy sibling.
> */
> if ((env->sd->flags & SD_ASYM_PACKING) &&
> - sched_use_asym_prio(env->sd, i) &&
> - sched_asym_prefer(i, env->dst_cpu) &&
> + _sched_asym(env->sd, i, env->dst_cpu) &&
> nr_running == 1)
> continue;
>
> @@ -11907,8 +11907,7 @@ static void nohz_balancer_kick(struct rq *rq)
> * preferred CPU must be idle.
> */
> for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
> - if (sched_use_asym_prio(sd, i) &&
> - sched_asym_prefer(i, cpu)) {
> + if (_sched_asym(sd, i, cpu)) {
> flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
> goto unlock;
> }
> --
> 2.43.0
>
On 1/26/24 5:56 AM, Ricardo Neri wrote:
> On Wed, Jan 17, 2024 at 04:57:14PM +0800, alexs@kernel.org wrote:
>> From: Alex Shi <alexs@kernel.org>
>>
>> Use this func in sched_asym and other path to simply code.
>> No function change.
>>
>> Signed-off-by: Alex Shi <alexs@kernel.org>
>> To: Valentin Schneider <vschneid@redhat.com>
>> To: Vincent Guittot <vincent.guittot@linaro.org>
>> To: Peter Zijlstra <peterz@infradead.org>
>> To: Ingo Molnar <mingo@redhat.com>
>> ---
>> kernel/sched/fair.c | 27 +++++++++++++--------------
>> 1 file changed, 13 insertions(+), 14 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index ebd659af2d78..96163ab69ae0 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -9745,6 +9745,14 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
>> (sd->flags & SD_SHARE_CPUCAPACITY) || is_core_idle(cpu);
>> }
>>
>> +static inline bool _sched_asym(struct sched_domain *sd,
>> + int dst_cpu, int repl_cpu)
>
> What does repl_cpu mean? Maybe renaming to src_cpu?
Uh, src_cpu is better than a 'replacing' cpu. Thanks!
>
>> +{
>> + /* Ensure that the whole local core is idle, if applicable. */
>> + return sched_use_asym_prio(sd, dst_cpu) &&
>> + sched_asym_prefer(dst_cpu, repl_cpu);
>
> The comment no longer applies to the whole expression. Perhaps rewording is
> in order. First we check for the whole idle core if applicable (i.e., when
> not balancing among SMT siblings). Then we check priorities.
Right will fix this by v2.
>
> Also, indentation should be aligned with `return`, IMO.
>
>> +}
>> +
>> /**
>> * sched_asym - Check if the destination CPU can do asym_packing load balance
>> * @env: The load balancing environment
>> @@ -9768,20 +9776,13 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
>> static inline bool
>> sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
>> {
>> - /* Ensure that the whole local core is idle, if applicable. */
>> - if (!sched_use_asym_prio(env->sd, env->dst_cpu))
>> - return false;
>> -
>> /*
>> * CPU priorities does not make sense for SMT cores with more than one
>> * busy sibling.
>
> While here, it might be good to fix a syntax error above: s/does/do/.
>
Yes, thanks
>> */
>> - if (group->flags & SD_SHARE_CPUCAPACITY) {
>> - if (sgs->group_weight - sgs->idle_cpus != 1)
>> - return false;
>> - }
>> -
>> - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
>> + return !(group->flags & SD_SHARE_CPUCAPACITY &&
>> + sgs->group_weight - sgs->idle_cpus != 1) &&
>> + _sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
>
> Perhaps we can come up with a better name than _sched_asym(). After this
> patch the difference between sched_asym() and _sched_asym() is that the
> former considers the stats of the source group. Maybe sched_asym() can be
> renamed as sched_group_asym(); it is only used in update_sg_lb_stats().
> Your _sched_asym() can become sched_asym().
Thanks for good suggestion! will send v2 according your suggestion.
Alex
>
>> }
>>
>> /* One group has more than one SMT CPU while the other group does not */
>> @@ -11036,8 +11037,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>> * SMT cores with more than one busy sibling.
>> */
>> if ((env->sd->flags & SD_ASYM_PACKING) &&
>> - sched_use_asym_prio(env->sd, i) &&
>> - sched_asym_prefer(i, env->dst_cpu) &&
>> + _sched_asym(env->sd, i, env->dst_cpu) &&
>> nr_running == 1)
>> continue;
>>
>> @@ -11907,8 +11907,7 @@ static void nohz_balancer_kick(struct rq *rq)
>> * preferred CPU must be idle.
>> */
>> for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
>> - if (sched_use_asym_prio(sd, i) &&
>> - sched_asym_prefer(i, cpu)) {
>> + if (_sched_asym(sd, i, cpu)) {
>> flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
>> goto unlock;
>> }
>> --
>> 2.43.0
>>
@@ -9745,6 +9745,14 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
(sd->flags & SD_SHARE_CPUCAPACITY) || is_core_idle(cpu);
}
+static inline bool _sched_asym(struct sched_domain *sd,
+ int dst_cpu, int repl_cpu)
+{
+ /* Ensure that the whole local core is idle, if applicable. */
+ return sched_use_asym_prio(sd, dst_cpu) &&
+ sched_asym_prefer(dst_cpu, repl_cpu);
+}
+
/**
* sched_asym - Check if the destination CPU can do asym_packing load balance
* @env: The load balancing environment
@@ -9768,20 +9776,13 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
static inline bool
sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
{
- /* Ensure that the whole local core is idle, if applicable. */
- if (!sched_use_asym_prio(env->sd, env->dst_cpu))
- return false;
-
/*
* CPU priorities does not make sense for SMT cores with more than one
* busy sibling.
*/
- if (group->flags & SD_SHARE_CPUCAPACITY) {
- if (sgs->group_weight - sgs->idle_cpus != 1)
- return false;
- }
-
- return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
+ return !(group->flags & SD_SHARE_CPUCAPACITY &&
+ sgs->group_weight - sgs->idle_cpus != 1) &&
+ _sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
}
/* One group has more than one SMT CPU while the other group does not */
@@ -11036,8 +11037,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* SMT cores with more than one busy sibling.
*/
if ((env->sd->flags & SD_ASYM_PACKING) &&
- sched_use_asym_prio(env->sd, i) &&
- sched_asym_prefer(i, env->dst_cpu) &&
+ _sched_asym(env->sd, i, env->dst_cpu) &&
nr_running == 1)
continue;
@@ -11907,8 +11907,7 @@ static void nohz_balancer_kick(struct rq *rq)
* preferred CPU must be idle.
*/
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
- if (sched_use_asym_prio(sd, i) &&
- sched_asym_prefer(i, cpu)) {
+ if (_sched_asym(sd, i, cpu)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}