[v2] sched: fix warning in bandwidth distribution

Message ID 20230922013750.874131-1-joshdon@google.com
State New
Headers
Series [v2] sched: fix warning in bandwidth distribution |

Commit Message

Josh Don Sept. 22, 2023, 1:37 a.m. UTC
  We've observed the following warning being hit in
distribute_cfs_runtime():
	SCHED_WARN_ON(cfs_rq->runtime_remaining > 0)

We have the following race:

- cpu0: running bandwidth distribution (distribute_cfs_runtime).
  Inspects the local cfs_rq and makes its runtime_remaining positive.
  However, we defer unthrottling the local cfs_rq until after
  considering all remote cfs_rq's.
- cpu1: starts running bandwidth distribution from the slack timer. When
  it finds the cfs_rq for cpu 0 on the throttled list, it observers the
  that the cfs_rq is throttled, yet is not on the CSD list, and has a
  positive runtime_remaining, thus triggering the warning in
  distribute_cfs_runtime.

To fix this, we can rework the local unthrottling logic to put the local
cfs_rq on a local list, so that any future bandwidth distributions will
realize that the cfs_rq is about to be unthrottled.

Signed-off-by: Josh Don <joshdon@google.com>
---
v2: Fix build error on !CONFIG_SMP

 kernel/sched/fair.c | 43 +++++++++++++++++++++++++++++++++----------
 1 file changed, 33 insertions(+), 10 deletions(-)
  

Comments

Ingo Molnar Sept. 22, 2023, 8:13 a.m. UTC | #1
* Josh Don <joshdon@google.com> wrote:

> We've observed the following warning being hit in
> distribute_cfs_runtime():
> 	SCHED_WARN_ON(cfs_rq->runtime_remaining > 0)
> 
> We have the following race:
> 
> - cpu0: running bandwidth distribution (distribute_cfs_runtime).
>   Inspects the local cfs_rq and makes its runtime_remaining positive.
>   However, we defer unthrottling the local cfs_rq until after
>   considering all remote cfs_rq's.
> - cpu1: starts running bandwidth distribution from the slack timer. When
>   it finds the cfs_rq for cpu 0 on the throttled list, it observers the
>   that the cfs_rq is throttled, yet is not on the CSD list, and has a
>   positive runtime_remaining, thus triggering the warning in
>   distribute_cfs_runtime.
> 
> To fix this, we can rework the local unthrottling logic to put the local
> cfs_rq on a local list, so that any future bandwidth distributions will
> realize that the cfs_rq is about to be unthrottled.
> 
> Signed-off-by: Josh Don <joshdon@google.com>
> ---
> v2: Fix build error on !CONFIG_SMP
> 
>  kernel/sched/fair.c | 43 +++++++++++++++++++++++++++++++++----------
>  1 file changed, 33 insertions(+), 10 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 384900bf87eb..3d1886ea18fe 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5743,13 +5743,16 @@ static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
>  
>  static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
>  {
> -	struct cfs_rq *local_unthrottle = NULL;
>  	int this_cpu = smp_processor_id();
>  	u64 runtime, remaining = 1;
>  	bool throttled = false;
>  	struct cfs_rq *cfs_rq;
>  	struct rq_flags rf;
>  	struct rq *rq;
> +#ifdef CONFIG_SMP
> +	struct cfs_rq *tmp;
> +	LIST_HEAD(local_unthrottle);
> +#endif
>  
>  	rcu_read_lock();
>  	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
> @@ -5786,11 +5789,21 @@ static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
>  
>  		/* we check whether we're throttled above */
>  		if (cfs_rq->runtime_remaining > 0) {
> -			if (cpu_of(rq) != this_cpu ||
> -			    SCHED_WARN_ON(local_unthrottle))
> +#ifdef CONFIG_SMP
> +			if (cpu_of(rq) != this_cpu) {
>  				unthrottle_cfs_rq_async(cfs_rq);
> -			else
> -				local_unthrottle = cfs_rq;
> +			} else {
> +				/*
> +				 * We currently only expect to be unthrottling
> +				 * a single cfs_rq locally.
> +				 */
> +				SCHED_WARN_ON(!list_empty(&local_unthrottle));
> +				list_add_tail(&cfs_rq->throttled_csd_list,
> +					      &local_unthrottle);
> +			}
> +#else
> +			unthrottle_cfs_rq_async(cfs_rq);
> +#endif
>  		} else {
>  			throttled = true;
>  		}
> @@ -5798,15 +5811,25 @@ static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
>  next:
>  		rq_unlock_irqrestore(rq, &rf);
>  	}
> -	rcu_read_unlock();
>  
> -	if (local_unthrottle) {
> -		rq = cpu_rq(this_cpu);
> +#ifdef CONFIG_SMP
> +	list_for_each_entry_safe(cfs_rq, tmp, &local_unthrottle,
> +				 throttled_csd_list) {
> +		struct rq *rq = rq_of(cfs_rq);
> +
>  		rq_lock_irqsave(rq, &rf);
> -		if (cfs_rq_throttled(local_unthrottle))
> -			unthrottle_cfs_rq(local_unthrottle);
> +
> +		list_del_init(&cfs_rq->throttled_csd_list);
> +
> +		if (cfs_rq_throttled(cfs_rq))
> +			unthrottle_cfs_rq(cfs_rq);
> +
>  		rq_unlock_irqrestore(rq, &rf);
>  	}
> +	SCHED_WARN_ON(!list_empty(&local_unthrottle));
> +#endif

So instead of uglifying the code with seldom-tested !CONFIG_SMP #ifdefs, 
please fold the !SMP code into SMP code: make ->throttled_csd_list 
unconditional in a preparatory patch (even if it's essentially unused on 
!SMP), then add what is basically your v1 patch as a second patch in the 
series to fix the bug.

We want to unify as much of the SMP and !SMP codepaths as possible, even 
when it casuses a bit of runtime overhead for !SMP.

Thanks,

	Ingo
  
Josh Don Sept. 22, 2023, 10:58 p.m. UTC | #2
On Fri, Sep 22, 2023 at 1:13 AM Ingo Molnar <mingo@kernel.org> wrote:
>
> So instead of uglifying the code with seldom-tested !CONFIG_SMP #ifdefs,
> please fold the !SMP code into SMP code: make ->throttled_csd_list
> unconditional in a preparatory patch (even if it's essentially unused on
> !SMP), then add what is basically your v1 patch as a second patch in the
> series to fix the bug.
>
> We want to unify as much of the SMP and !SMP codepaths as possible, even
> when it casuses a bit of runtime overhead for !SMP.

Yep, makes perfect sense, I'll respin this patch.

Thanks,
Josh
  

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 384900bf87eb..3d1886ea18fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5743,13 +5743,16 @@  static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
 
 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 {
-	struct cfs_rq *local_unthrottle = NULL;
 	int this_cpu = smp_processor_id();
 	u64 runtime, remaining = 1;
 	bool throttled = false;
 	struct cfs_rq *cfs_rq;
 	struct rq_flags rf;
 	struct rq *rq;
+#ifdef CONFIG_SMP
+	struct cfs_rq *tmp;
+	LIST_HEAD(local_unthrottle);
+#endif
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -5786,11 +5789,21 @@  static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 
 		/* we check whether we're throttled above */
 		if (cfs_rq->runtime_remaining > 0) {
-			if (cpu_of(rq) != this_cpu ||
-			    SCHED_WARN_ON(local_unthrottle))
+#ifdef CONFIG_SMP
+			if (cpu_of(rq) != this_cpu) {
 				unthrottle_cfs_rq_async(cfs_rq);
-			else
-				local_unthrottle = cfs_rq;
+			} else {
+				/*
+				 * We currently only expect to be unthrottling
+				 * a single cfs_rq locally.
+				 */
+				SCHED_WARN_ON(!list_empty(&local_unthrottle));
+				list_add_tail(&cfs_rq->throttled_csd_list,
+					      &local_unthrottle);
+			}
+#else
+			unthrottle_cfs_rq_async(cfs_rq);
+#endif
 		} else {
 			throttled = true;
 		}
@@ -5798,15 +5811,25 @@  static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 next:
 		rq_unlock_irqrestore(rq, &rf);
 	}
-	rcu_read_unlock();
 
-	if (local_unthrottle) {
-		rq = cpu_rq(this_cpu);
+#ifdef CONFIG_SMP
+	list_for_each_entry_safe(cfs_rq, tmp, &local_unthrottle,
+				 throttled_csd_list) {
+		struct rq *rq = rq_of(cfs_rq);
+
 		rq_lock_irqsave(rq, &rf);
-		if (cfs_rq_throttled(local_unthrottle))
-			unthrottle_cfs_rq(local_unthrottle);
+
+		list_del_init(&cfs_rq->throttled_csd_list);
+
+		if (cfs_rq_throttled(cfs_rq))
+			unthrottle_cfs_rq(cfs_rq);
+
 		rq_unlock_irqrestore(rq, &rf);
 	}
+	SCHED_WARN_ON(!list_empty(&local_unthrottle));
+#endif
+
+	rcu_read_unlock();
 
 	return throttled;
 }