[mm-unstable,v1] mm/mglru: make memcg_lru->lock irq safe

Message ID 20230619193821.2710944-1-yuzhao@google.com
State New
Headers
Series [mm-unstable,v1] mm/mglru: make memcg_lru->lock irq safe |

Commit Message

Yu Zhao June 19, 2023, 7:38 p.m. UTC
  lru_gen_rotate_memcg() can happen in softirq if
memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be
irq safe.

This problem only affects memcg v1.

Reported-by: syzbot+87c490fd2be656269b6a@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a
Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists")
Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 mm/vmscan.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
  

Comments

Yosry Ahmed June 20, 2023, 8:19 p.m. UTC | #1
On Mon, Jun 19, 2023 at 12:38 PM Yu Zhao <yuzhao@google.com> wrote:
>
> lru_gen_rotate_memcg() can happen in softirq if
> memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be
> irq safe.
>
> This problem only affects memcg v1.
>
> Reported-by: syzbot+87c490fd2be656269b6a@syzkaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a
> Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists")
> Signed-off-by: Yu Zhao <yuzhao@google.com>
> ---
>  mm/vmscan.c | 13 +++++++------
>  1 file changed, 7 insertions(+), 6 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 45d17c7cc555..27f90896f789 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4759,10 +4759,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
>  {
>         int seg;
>         int old, new;
> +       unsigned long flags;
>         int bin = get_random_u32_below(MEMCG_NR_BINS);
>         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>
> -       spin_lock(&pgdat->memcg_lru.lock);
> +       spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);

Nit: I think it might be useful to add a comment here that this is
needed due to the call path from memcg_check_events() ->
mem_cgroup_update_tree() -- so that if that call path changes we can
come back and remove the irq-disablement.

FWIW:
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>

>
>         VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4797,7 +4798,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
>         if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
>                 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
>
> -       spin_unlock(&pgdat->memcg_lru.lock);
> +       spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
>  }
>
>  void lru_gen_online_memcg(struct mem_cgroup *memcg)
> @@ -4810,7 +4811,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
>                 struct pglist_data *pgdat = NODE_DATA(nid);
>                 struct lruvec *lruvec = get_lruvec(memcg, nid);
>
> -               spin_lock(&pgdat->memcg_lru.lock);
> +               spin_lock_irq(&pgdat->memcg_lru.lock);
>
>                 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4821,7 +4822,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
>
>                 lruvec->lrugen.gen = gen;
>
> -               spin_unlock(&pgdat->memcg_lru.lock);
> +               spin_unlock_irq(&pgdat->memcg_lru.lock);
>         }
>  }
>
> @@ -4845,7 +4846,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
>                 struct pglist_data *pgdat = NODE_DATA(nid);
>                 struct lruvec *lruvec = get_lruvec(memcg, nid);
>
> -               spin_lock(&pgdat->memcg_lru.lock);
> +               spin_lock_irq(&pgdat->memcg_lru.lock);
>
>                 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4857,7 +4858,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
>                 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
>                         WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
>
> -               spin_unlock(&pgdat->memcg_lru.lock);
> +               spin_unlock_irq(&pgdat->memcg_lru.lock);
>         }
>  }
>
> --
> 2.41.0.185.g7c58973941-goog
>
>
  

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 45d17c7cc555..27f90896f789 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4759,10 +4759,11 @@  static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
 {
 	int seg;
 	int old, new;
+	unsigned long flags;
 	int bin = get_random_u32_below(MEMCG_NR_BINS);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 
-	spin_lock(&pgdat->memcg_lru.lock);
+	spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
 
 	VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
 
@@ -4797,7 +4798,7 @@  static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
 	if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
 		WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
 
-	spin_unlock(&pgdat->memcg_lru.lock);
+	spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
 }
 
 void lru_gen_online_memcg(struct mem_cgroup *memcg)
@@ -4810,7 +4811,7 @@  void lru_gen_online_memcg(struct mem_cgroup *memcg)
 		struct pglist_data *pgdat = NODE_DATA(nid);
 		struct lruvec *lruvec = get_lruvec(memcg, nid);
 
-		spin_lock(&pgdat->memcg_lru.lock);
+		spin_lock_irq(&pgdat->memcg_lru.lock);
 
 		VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
 
@@ -4821,7 +4822,7 @@  void lru_gen_online_memcg(struct mem_cgroup *memcg)
 
 		lruvec->lrugen.gen = gen;
 
-		spin_unlock(&pgdat->memcg_lru.lock);
+		spin_unlock_irq(&pgdat->memcg_lru.lock);
 	}
 }
 
@@ -4845,7 +4846,7 @@  void lru_gen_release_memcg(struct mem_cgroup *memcg)
 		struct pglist_data *pgdat = NODE_DATA(nid);
 		struct lruvec *lruvec = get_lruvec(memcg, nid);
 
-		spin_lock(&pgdat->memcg_lru.lock);
+		spin_lock_irq(&pgdat->memcg_lru.lock);
 
 		VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
 
@@ -4857,7 +4858,7 @@  void lru_gen_release_memcg(struct mem_cgroup *memcg)
 		if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
 			WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
 
-		spin_unlock(&pgdat->memcg_lru.lock);
+		spin_unlock_irq(&pgdat->memcg_lru.lock);
 	}
 }