[v3,5/5] percpu: scoped objcg protection

Message ID 20231016221900.4031141-6-roman.gushchin@linux.dev
State New
Headers
Series mm: improve performance of accounted kernel memory allocations |

Commit Message

Roman Gushchin Oct. 16, 2023, 10:19 p.m. UTC
  Similar to slab and kmem, switch to a scope-based protection of the
objcg pointer to avoid.

Signed-off-by: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Acked-by: Shakeel Butt <shakeelb@google.com>
---
 mm/percpu.c | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
  

Comments

Vlastimil Babka Oct. 18, 2023, 2:23 p.m. UTC | #1
On 10/17/23 00:19, Roman Gushchin wrote:
> Similar to slab and kmem, switch to a scope-based protection of the
> objcg pointer to avoid.
> 
> Signed-off-by: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
> Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
> Acked-by: Shakeel Butt <shakeelb@google.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

Do you plan to convert also the bpf users of get_obj_cgroup_from_current()
so it could be removed?

Thanks!

> ---
>  mm/percpu.c | 8 +++-----
>  1 file changed, 3 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/percpu.c b/mm/percpu.c
> index a7665de8485f..f53ba692d67a 100644
> --- a/mm/percpu.c
> +++ b/mm/percpu.c
> @@ -1628,14 +1628,12 @@ static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
>  	if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
>  		return true;
>  
> -	objcg = get_obj_cgroup_from_current();
> +	objcg = current_obj_cgroup();
>  	if (!objcg)
>  		return true;
>  
> -	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
> -		obj_cgroup_put(objcg);
> +	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
>  		return false;
> -	}
>  
>  	*objcgp = objcg;
>  	return true;
> @@ -1649,6 +1647,7 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
>  		return;
>  
>  	if (likely(chunk && chunk->obj_cgroups)) {
> +		obj_cgroup_get(objcg);
>  		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
>  
>  		rcu_read_lock();
> @@ -1657,7 +1656,6 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
>  		rcu_read_unlock();
>  	} else {
>  		obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
> -		obj_cgroup_put(objcg);
>  	}
>  }
>
  

Patch

diff --git a/mm/percpu.c b/mm/percpu.c
index a7665de8485f..f53ba692d67a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1628,14 +1628,12 @@  static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
 	if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
 		return true;
 
-	objcg = get_obj_cgroup_from_current();
+	objcg = current_obj_cgroup();
 	if (!objcg)
 		return true;
 
-	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
-		obj_cgroup_put(objcg);
+	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
 		return false;
-	}
 
 	*objcgp = objcg;
 	return true;
@@ -1649,6 +1647,7 @@  static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
 		return;
 
 	if (likely(chunk && chunk->obj_cgroups)) {
+		obj_cgroup_get(objcg);
 		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
 
 		rcu_read_lock();
@@ -1657,7 +1656,6 @@  static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
 		rcu_read_unlock();
 	} else {
 		obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
-		obj_cgroup_put(objcg);
 	}
 }