[mm-unstable,RFC,5/5] cgroup: remove cgroup_rstat_flush_atomic()

Message ID 20230403220337.443510-6-yosryahmed@google.com
State New
Headers
Series cgroup: eliminate atomic rstat |

Commit Message

Yosry Ahmed April 3, 2023, 10:03 p.m. UTC
  Previous patches removed the only caller of cgroup_rstat_flush_atomic().
Remove the function and simplify the code.

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
---
 include/linux/cgroup.h |  1 -
 kernel/cgroup/rstat.c  | 26 +++++---------------------
 2 files changed, 5 insertions(+), 22 deletions(-)
  

Comments

Shakeel Butt April 20, 2023, 7:40 p.m. UTC | #1
+Tejun


On Mon, Apr 3, 2023 at 3:03 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> Previous patches removed the only caller of cgroup_rstat_flush_atomic().
> Remove the function and simplify the code.


I would say let cgroup maintainers decide this and this patch can be
decoupled from the series.

>
> Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
> ---
>  include/linux/cgroup.h |  1 -
>  kernel/cgroup/rstat.c  | 26 +++++---------------------
>  2 files changed, 5 insertions(+), 22 deletions(-)
>
> diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
> index 885f5395fcd04..567c547cf371f 100644
> --- a/include/linux/cgroup.h
> +++ b/include/linux/cgroup.h
> @@ -692,7 +692,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
>   */
>  void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
>  void cgroup_rstat_flush(struct cgroup *cgrp);
> -void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
>  void cgroup_rstat_flush_hold(struct cgroup *cgrp);
>  void cgroup_rstat_flush_release(void);
>
> diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
> index d3252b0416b69..f9ad33f117c82 100644
> --- a/kernel/cgroup/rstat.c
> +++ b/kernel/cgroup/rstat.c
> @@ -171,7 +171,7 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
>  __diag_pop();
>
>  /* see cgroup_rstat_flush() */
> -static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
> +static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
>         __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
>  {
>         int cpu;
> @@ -207,9 +207,8 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
>                 }
>                 raw_spin_unlock_irqrestore(cpu_lock, flags);
>
> -               /* if @may_sleep, play nice and yield if necessary */
> -               if (may_sleep && (need_resched() ||
> -                                 spin_needbreak(&cgroup_rstat_lock))) {
> +               /* play nice and yield if necessary */
> +               if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
>                         spin_unlock_irq(&cgroup_rstat_lock);
>                         if (!cond_resched())
>                                 cpu_relax();
> @@ -236,25 +235,10 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
>         might_sleep();
>
>         spin_lock_irq(&cgroup_rstat_lock);
> -       cgroup_rstat_flush_locked(cgrp, true);
> +       cgroup_rstat_flush_locked(cgrp);
>         spin_unlock_irq(&cgroup_rstat_lock);
>  }
>
> -/**
> - * cgroup_rstat_flush_atomic- atomic version of cgroup_rstat_flush()
> - * @cgrp: target cgroup
> - *
> - * This function can be called from any context.
> - */
> -void cgroup_rstat_flush_atomic(struct cgroup *cgrp)
> -{
> -       unsigned long flags;
> -
> -       spin_lock_irqsave(&cgroup_rstat_lock, flags);
> -       cgroup_rstat_flush_locked(cgrp, false);
> -       spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
> -}
> -
>  /**
>   * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
>   * @cgrp: target cgroup
> @@ -269,7 +253,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
>  {
>         might_sleep();
>         spin_lock_irq(&cgroup_rstat_lock);
> -       cgroup_rstat_flush_locked(cgrp, true);
> +       cgroup_rstat_flush_locked(cgrp);
>  }
>
>  /**
> --
> 2.40.0.348.gf938b09366-goog
>
  
Tejun Heo April 20, 2023, 7:48 p.m. UTC | #2
On Thu, Apr 20, 2023 at 12:40:24PM -0700, Shakeel Butt wrote:
> +Tejun
> 
> 
> On Mon, Apr 3, 2023 at 3:03 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > Previous patches removed the only caller of cgroup_rstat_flush_atomic().
> > Remove the function and simplify the code.
> 
> 
> I would say let cgroup maintainers decide this and this patch can be
> decoupled from the series.

Looks fine to me but yeah please cc me on the whole series from the next
round.

Thanks.
  
Yosry Ahmed April 20, 2023, 8:19 p.m. UTC | #3
On Thu, Apr 20, 2023 at 12:48 PM Tejun Heo <tj@kernel.org> wrote:
>
> On Thu, Apr 20, 2023 at 12:40:24PM -0700, Shakeel Butt wrote:
> > +Tejun
> >
> >
> > On Mon, Apr 3, 2023 at 3:03 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> > >
> > > Previous patches removed the only caller of cgroup_rstat_flush_atomic().
> > > Remove the function and simplify the code.
> >
> >
> > I would say let cgroup maintainers decide this and this patch can be
> > decoupled from the series.
>
> Looks fine to me but yeah please cc me on the whole series from the next
> round.


Thanks for taking a look, I don't know how I missed CC'ing you on this
RFC. If I have to guess, my initial draft did not have this patch, so
I did not include you or linux-cgroups, then I added this patch. Sorry
for that :)

>
>
> Thanks.
>
> --
> tejun
  

Patch

diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 885f5395fcd04..567c547cf371f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -692,7 +692,6 @@  static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
  */
 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
 void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
 void cgroup_rstat_flush_release(void);
 
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d3252b0416b69..f9ad33f117c82 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -171,7 +171,7 @@  __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
 __diag_pop();
 
 /* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
+static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
 	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
 {
 	int cpu;
@@ -207,9 +207,8 @@  static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
 		}
 		raw_spin_unlock_irqrestore(cpu_lock, flags);
 
-		/* if @may_sleep, play nice and yield if necessary */
-		if (may_sleep && (need_resched() ||
-				  spin_needbreak(&cgroup_rstat_lock))) {
+		/* play nice and yield if necessary */
+		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
 			spin_unlock_irq(&cgroup_rstat_lock);
 			if (!cond_resched())
 				cpu_relax();
@@ -236,25 +235,10 @@  __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
 	might_sleep();
 
 	spin_lock_irq(&cgroup_rstat_lock);
-	cgroup_rstat_flush_locked(cgrp, true);
+	cgroup_rstat_flush_locked(cgrp);
 	spin_unlock_irq(&cgroup_rstat_lock);
 }
 
-/**
- * cgroup_rstat_flush_atomic- atomic version of cgroup_rstat_flush()
- * @cgrp: target cgroup
- *
- * This function can be called from any context.
- */
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cgroup_rstat_lock, flags);
-	cgroup_rstat_flush_locked(cgrp, false);
-	spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
-}
-
 /**
  * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
  * @cgrp: target cgroup
@@ -269,7 +253,7 @@  void cgroup_rstat_flush_hold(struct cgroup *cgrp)
 {
 	might_sleep();
 	spin_lock_irq(&cgroup_rstat_lock);
-	cgroup_rstat_flush_locked(cgrp, true);
+	cgroup_rstat_flush_locked(cgrp);
 }
 
 /**