[1/2] sched/fair: Fix value reported by hot tasks pulled in /proc/schedstat

Message ID 20230614102224.12555-2-swapnil.sapkal@amd.com
State New
Headers
Series Cleanup and Fix for wrong accounting of migrated cache hot tasks |

Commit Message

Swapnil Sapkal June 14, 2023, 10:22 a.m. UTC
  In /proc/schedstat, lb_hot_gained reports the number hot tasks pulled
during load balance. This value is incremented in can_migrate_task()
if the task is migratable and hot. After incrementing the value,
load balancer can still decide not to migrate this task leading to wrong
accounting. Fix this by incrementing stats when hot tasks are detached.
This issue only exits in detach_tasks() where we can decide to not
migrate hot task even if it is migratable. However, in detach_one_task(),
we migrate it unconditionally.

Fixes: d31980846f96 ("sched: Move up affinity check to mitigate useless redoing overhead")
Reported-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
 kernel/sched/fair.c | 47 +++++++++++++++++++++++++++++----------------
 1 file changed, 30 insertions(+), 17 deletions(-)
  

Comments

Peter Zijlstra June 19, 2023, 9:22 a.m. UTC | #1
On Wed, Jun 14, 2023 at 10:22:23AM +0000, Swapnil Sapkal wrote:
> In /proc/schedstat, lb_hot_gained reports the number hot tasks pulled
> during load balance. This value is incremented in can_migrate_task()
> if the task is migratable and hot. After incrementing the value,
> load balancer can still decide not to migrate this task leading to wrong
> accounting. Fix this by incrementing stats when hot tasks are detached.
> This issue only exits in detach_tasks() where we can decide to not
> migrate hot task even if it is migratable. However, in detach_one_task(),
> we migrate it unconditionally.
> 
> Fixes: d31980846f96 ("sched: Move up affinity check to mitigate useless redoing overhead")
> Reported-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
> Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
> ---
>  kernel/sched/fair.c | 47 +++++++++++++++++++++++++++++----------------
>  1 file changed, 30 insertions(+), 17 deletions(-)

All this for just a number hardly anybody looks at :-(

Does this also work?

Please double check the order of the task_struct::sched_bitfield thing,
I've not had much wake-up juice.

---
 include/linux/sched.h |  1 +
 kernel/sched/fair.c   | 14 ++++++++++----
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1292d38d66cc..eba0a78ac2a9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -887,6 +887,7 @@ struct task_struct {
 	unsigned			sched_reset_on_fork:1;
 	unsigned			sched_contributes_to_load:1;
 	unsigned			sched_migrated:1;
+	unsigned			sched_task_hot:1;
 
 	/* Force alignment to the next boundary: */
 	unsigned			:0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6189d1a45635..a88577132b20 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8569,6 +8569,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 	int tsk_cache_hot;
 
 	lockdep_assert_rq_held(env->src_rq);
+	if (p->sched_task_hot)
+		p->sched_task_hot = 0;
 
 	/*
 	 * We do not migrate tasks that are:
@@ -8641,10 +8643,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 
 	if (tsk_cache_hot <= 0 ||
 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-		if (tsk_cache_hot == 1) {
-			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
-			schedstat_inc(p->stats.nr_forced_migrations);
-		}
+		if (tsk_cache_hot == 1)
+			p->sched_task_hot = 1;
 		return 1;
 	}
 
@@ -8659,6 +8659,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
 {
 	lockdep_assert_rq_held(env->src_rq);
 
+	if (p->sched_task_hot) {
+		p->sched_task_hot = 0;
+		schedstat_inc(env->sd->lb_hot_gained[env->idle]);
+		schedstat_inc(p->stats.nr_forced_migrations);
+	}
+
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, env->dst_cpu);
 }
  
Swapnil Sapkal June 21, 2023, 4:38 a.m. UTC | #2
Hello Peter,

On 6/19/2023 2:52 PM, Peter Zijlstra wrote:
> On Wed, Jun 14, 2023 at 10:22:23AM +0000, Swapnil Sapkal wrote:
>> In /proc/schedstat, lb_hot_gained reports the number hot tasks pulled
>> during load balance. This value is incremented in can_migrate_task()
>> if the task is migratable and hot. After incrementing the value,
>> load balancer can still decide not to migrate this task leading to wrong
>> accounting. Fix this by incrementing stats when hot tasks are detached.
>> This issue only exits in detach_tasks() where we can decide to not
>> migrate hot task even if it is migratable. However, in detach_one_task(),
>> we migrate it unconditionally.
>>
>> Fixes: d31980846f96 ("sched: Move up affinity check to mitigate useless redoing overhead")
>> Reported-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
>> Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
>> ---
>>   kernel/sched/fair.c | 47 +++++++++++++++++++++++++++++----------------
>>   1 file changed, 30 insertions(+), 17 deletions(-)
> 
> All this for just a number hardly anybody looks at :-(
> 
> Does this also work?

Thank you for this patch which looks much simpler.
This will work. In addition we need to handle the following case: when the task is
hot and migratable, and detach_tasks() decides not to migrate the task, then we
should be incrementing nr_failed_migrations_hot. I am appending this hunk at the
end of this mail.

> 
> Please double check the order of the task_struct::sched_bitfield thing,
> I've not had much wake-up juice
> 
> ---
>   include/linux/sched.h |  1 +
>   kernel/sched/fair.c   | 14 ++++++++++----
>   2 files changed, 11 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 1292d38d66cc..eba0a78ac2a9 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -887,6 +887,7 @@ struct task_struct {
>   	unsigned			sched_reset_on_fork:1;
>   	unsigned			sched_contributes_to_load:1;
>   	unsigned			sched_migrated:1;
> +	unsigned			sched_task_hot:1;
>   
>   	/* Force alignment to the next boundary: */
>   	unsigned			:0;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6189d1a45635..a88577132b20 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8569,6 +8569,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
>   	int tsk_cache_hot;
>   
>   	lockdep_assert_rq_held(env->src_rq);
> +	if (p->sched_task_hot)
> +		p->sched_task_hot = 0;
>   
>   	/*
>   	 * We do not migrate tasks that are:
> @@ -8641,10 +8643,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
>   
>   	if (tsk_cache_hot <= 0 ||
>   	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
> -		if (tsk_cache_hot == 1) {
> -			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
> -			schedstat_inc(p->stats.nr_forced_migrations);
> -		}
> +		if (tsk_cache_hot == 1)
> +			p->sched_task_hot = 1;
>   		return 1;
>   	}
>   
> @@ -8659,6 +8659,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
>   {
>   	lockdep_assert_rq_held(env->src_rq);
>   
> +	if (p->sched_task_hot) {
> +		p->sched_task_hot = 0;
> +		schedstat_inc(env->sd->lb_hot_gained[env->idle]);
> +		schedstat_inc(p->stats.nr_forced_migrations);
> +	}
> +
>   	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
>   	set_task_cpu(p, env->dst_cpu);
>   }
---
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b6a738514047..eb836629560f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8772,6 +8772,9 @@ static int detach_tasks(struct lb_env *env)

                 continue;
  next:
+               if (p->sched_task_hot)
+                       schedstat_inc(p->stats.nr_failed_migrations_hot);
+
                 list_move(&p->se.group_node, tasks);
         }
  

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 373ff5f55884..9a8e5dcbe7e6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8507,9 +8507,9 @@  static inline int migrate_degrades_locality(struct task_struct *p,
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
 static
-int can_migrate_task(struct task_struct *p, struct lb_env *env)
+int can_migrate_task(struct task_struct *p, struct lb_env *env, int *tsk_cache_hot)
 {
-	int tsk_cache_hot;
+	int degrades_locality;
 
 	lockdep_assert_rq_held(env->src_rq);
 
@@ -8578,18 +8578,19 @@  int can_migrate_task(struct task_struct *p, struct lb_env *env)
 	if (env->flags & LBF_ACTIVE_LB)
 		return 1;
 
-	tsk_cache_hot = migrate_degrades_locality(p, env);
-	if (tsk_cache_hot == -1)
-		tsk_cache_hot = task_hot(p, env);
+	degrades_locality = migrate_degrades_locality(p, env);
+	if (degrades_locality == -1)
+		*tsk_cache_hot = task_hot(p, env);
+	else
+		*tsk_cache_hot = degrades_locality;
 
-	if (tsk_cache_hot <= 0 ||
-	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-		if (tsk_cache_hot == 1) {
-			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
-			schedstat_inc(p->stats.nr_forced_migrations);
-		}
+	/*
+	 * Can migrate a hot task only after the attempts to reach balance
+	 * without the task have exceeded the cache_nice_tries threshold.
+	 */
+	if (!(*tsk_cache_hot) ||
+		env->sd->nr_balance_failed > env->sd->cache_nice_tries)
 		return 1;
-	}
 
 	schedstat_inc(p->stats.nr_failed_migrations_hot);
 	return 0;
@@ -8598,10 +8599,15 @@  int can_migrate_task(struct task_struct *p, struct lb_env *env)
 /*
  * detach_task() -- detach the task for the migration specified in env
  */
-static void detach_task(struct task_struct *p, struct lb_env *env)
+static void detach_task(struct task_struct *p, struct lb_env *env, int tsk_cache_hot)
 {
 	lockdep_assert_rq_held(env->src_rq);
 
+	if (tsk_cache_hot == 1) {
+		schedstat_inc(env->sd->lb_hot_gained[env->idle]);
+		schedstat_inc(p->stats.nr_forced_migrations);
+	}
+
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, env->dst_cpu);
 }
@@ -8620,10 +8626,12 @@  static struct task_struct *detach_one_task(struct lb_env *env)
 
 	list_for_each_entry_reverse(p,
 			&env->src_rq->cfs_tasks, se.group_node) {
-		if (!can_migrate_task(p, env))
+		int tsk_cache_hot = 0;
+
+		if (!can_migrate_task(p, env, &tsk_cache_hot))
 			continue;
 
-		detach_task(p, env);
+		detach_task(p, env, tsk_cache_hot);
 
 		/*
 		 * Right now, this is only the second place where
@@ -8665,6 +8673,8 @@  static int detach_tasks(struct lb_env *env)
 		return 0;
 
 	while (!list_empty(tasks)) {
+		int tsk_cache_hot = 0;
+
 		/*
 		 * We don't want to steal all, otherwise we may be treated likewise,
 		 * which could at worst lead to a livelock crash.
@@ -8690,7 +8700,7 @@  static int detach_tasks(struct lb_env *env)
 
 		p = list_last_entry(tasks, struct task_struct, se.group_node);
 
-		if (!can_migrate_task(p, env))
+		if (!can_migrate_task(p, env, &tsk_cache_hot))
 			goto next;
 
 		switch (env->migration_type) {
@@ -8742,7 +8752,7 @@  static int detach_tasks(struct lb_env *env)
 			break;
 		}
 
-		detach_task(p, env);
+		detach_task(p, env, tsk_cache_hot);
 		list_add(&p->se.group_node, &env->tasks);
 
 		detached++;
@@ -8766,6 +8776,9 @@  static int detach_tasks(struct lb_env *env)
 
 		continue;
 next:
+		if (tsk_cache_hot == 1)
+			schedstat_inc(p->stats.nr_failed_migrations_hot);
+
 		list_move(&p->se.group_node, tasks);
 	}