[v3,3/4] sched/core: Avoid multiple calling update_rq_clock() in __cfsb_csd_unthrottle()

Message ID 20230515063848.77947-4-jiahao.os@bytedance.com
State New
Headers
Series Fix some warnings about rq clock |

Commit Message

Hao Jia May 15, 2023, 6:38 a.m. UTC
  After commit 8ad075c2eb1f ("sched: Async unthrottling for cfs
bandwidth"), we may update the rq clock multiple times in the loop of
__cfsb_csd_unthrottle(). At that time the following warning will be
triggered.
------------[ cut here ]------------
rq->clock_update_flags & RQCF_UPDATED
WARNING: CPU: 54 PID: 0 at kernel/sched/core.c:741
update_rq_clock+0xaf/0x180
Call Trace:
 <TASK>
 unthrottle_cfs_rq+0x4b/0x300
 __cfsb_csd_unthrottle+0xe0/0x100
 __flush_smp_call_function_queue+0xaf/0x1d0
 flush_smp_call_function_queue+0x49/0x90
 do_idle+0x17c/0x270
 cpu_startup_entry+0x19/0x20
 start_secondary+0xfa/0x120
 secondary_startup_64_no_verify+0xce/0xdb

Before the loop starts, we update the rq clock once and call
rq_clock_start_loop_update() to prevent updating the rq clock
multiple times. And call rq_clock_stop_loop_update() After
the loop to clear rq->clock_update_flags.

Fixes: 8ad075c2eb1f ("sched: Async unthrottling for cfs bandwidth")

Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Hao Jia <jiahao.os@bytedance.com>
---
 kernel/sched/fair.c  |  9 +++++++++
 kernel/sched/sched.h | 21 +++++++++++++++++++++
 2 files changed, 30 insertions(+)
  

Comments

Vincent Guittot May 15, 2023, 1:30 p.m. UTC | #1
On Mon, 15 May 2023 at 08:39, Hao Jia <jiahao.os@bytedance.com> wrote:
>
> After commit 8ad075c2eb1f ("sched: Async unthrottling for cfs
> bandwidth"), we may update the rq clock multiple times in the loop of
> __cfsb_csd_unthrottle(). At that time the following warning will be
> triggered.
> ------------[ cut here ]------------
> rq->clock_update_flags & RQCF_UPDATED
> WARNING: CPU: 54 PID: 0 at kernel/sched/core.c:741
> update_rq_clock+0xaf/0x180
> Call Trace:
>  <TASK>
>  unthrottle_cfs_rq+0x4b/0x300
>  __cfsb_csd_unthrottle+0xe0/0x100
>  __flush_smp_call_function_queue+0xaf/0x1d0
>  flush_smp_call_function_queue+0x49/0x90
>  do_idle+0x17c/0x270
>  cpu_startup_entry+0x19/0x20
>  start_secondary+0xfa/0x120
>  secondary_startup_64_no_verify+0xce/0xdb
>
> Before the loop starts, we update the rq clock once and call
> rq_clock_start_loop_update() to prevent updating the rq clock
> multiple times. And call rq_clock_stop_loop_update() After
> the loop to clear rq->clock_update_flags.
>
> Fixes: 8ad075c2eb1f ("sched: Async unthrottling for cfs bandwidth")
>
> Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
> Signed-off-by: Hao Jia <jiahao.os@bytedance.com>

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
>  kernel/sched/fair.c  |  9 +++++++++
>  kernel/sched/sched.h | 21 +++++++++++++++++++++
>  2 files changed, 30 insertions(+)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 373ff5f55884..af9604f4b135 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5576,6 +5576,14 @@ static void __cfsb_csd_unthrottle(void *arg)
>
>         rq_lock(rq, &rf);
>
> +       /*
> +        * Iterating over the list can trigger several call to
> +        * update_rq_clock() in unthrottle_cfs_rq().
> +        * Do it once and skip the potential next ones.
> +        */
> +       update_rq_clock(rq);
> +       rq_clock_start_loop_update(rq);
> +
>         /*
>          * Since we hold rq lock we're safe from concurrent manipulation of
>          * the CSD list. However, this RCU critical section annotates the
> @@ -5595,6 +5603,7 @@ static void __cfsb_csd_unthrottle(void *arg)
>
>         rcu_read_unlock();
>
> +       rq_clock_stop_loop_update(rq);
>         rq_unlock(rq, &rf);
>  }
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index ec7b3e0a2b20..50446e401b9f 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1546,6 +1546,27 @@ static inline void rq_clock_cancel_skipupdate(struct rq *rq)
>         rq->clock_update_flags &= ~RQCF_REQ_SKIP;
>  }
>
> +/*
> + * During cpu offlining and rq wide unthrottling, we can trigger
> + * an update_rq_clock() for several cfs and rt runqueues (Typically
> + * when using list_for_each_entry_*)
> + * rq_clock_start_loop_update() can be called after updating the clock
> + * once and before iterating over the list to prevent multiple update.
> + * After the iterative traversal, we need to call rq_clock_stop_loop_update()
> + * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
> + */
> +static inline void rq_clock_start_loop_update(struct rq *rq)
> +{
> +       lockdep_assert_rq_held(rq);
> +       rq->clock_update_flags |= RQCF_ACT_SKIP;
> +}
> +
> +static inline void rq_clock_stop_loop_update(struct rq *rq)
> +{
> +       lockdep_assert_rq_held(rq);
> +       rq->clock_update_flags &= ~RQCF_ACT_SKIP;
> +}
> +
>  struct rq_flags {
>         unsigned long flags;
>         struct pin_cookie cookie;
> --
> 2.37.0
>
  

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 373ff5f55884..af9604f4b135 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5576,6 +5576,14 @@  static void __cfsb_csd_unthrottle(void *arg)
 
 	rq_lock(rq, &rf);
 
+	/*
+	 * Iterating over the list can trigger several call to
+	 * update_rq_clock() in unthrottle_cfs_rq().
+	 * Do it once and skip the potential next ones.
+	 */
+	update_rq_clock(rq);
+	rq_clock_start_loop_update(rq);
+
 	/*
 	 * Since we hold rq lock we're safe from concurrent manipulation of
 	 * the CSD list. However, this RCU critical section annotates the
@@ -5595,6 +5603,7 @@  static void __cfsb_csd_unthrottle(void *arg)
 
 	rcu_read_unlock();
 
+	rq_clock_stop_loop_update(rq);
 	rq_unlock(rq, &rf);
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec7b3e0a2b20..50446e401b9f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1546,6 +1546,27 @@  static inline void rq_clock_cancel_skipupdate(struct rq *rq)
 	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
 }
 
+/*
+ * During cpu offlining and rq wide unthrottling, we can trigger
+ * an update_rq_clock() for several cfs and rt runqueues (Typically
+ * when using list_for_each_entry_*)
+ * rq_clock_start_loop_update() can be called after updating the clock
+ * once and before iterating over the list to prevent multiple update.
+ * After the iterative traversal, we need to call rq_clock_stop_loop_update()
+ * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
+ */
+static inline void rq_clock_start_loop_update(struct rq *rq)
+{
+	lockdep_assert_rq_held(rq);
+	rq->clock_update_flags |= RQCF_ACT_SKIP;
+}
+
+static inline void rq_clock_stop_loop_update(struct rq *rq)
+{
+	lockdep_assert_rq_held(rq);
+	rq->clock_update_flags &= ~RQCF_ACT_SKIP;
+}
+
 struct rq_flags {
 	unsigned long flags;
 	struct pin_cookie cookie;