[v3,1/3] memcontrol: add helpers for hugetlb memcg accounting
Commit Message
This patch exposes charge committing and cancelling as parts of the
memory controller interface. These functionalities are useful when the
try_charge() and commit_charge() stages have to be separated by other
actions in between (which can fail). One such example is the new hugetlb
accounting behavior in the following patch.
The patch also adds a helper function to obtain a reference to the
current task's memcg.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/memcontrol.h | 21 ++++++++++++++
mm/memcontrol.c | 59 ++++++++++++++++++++++++++++++--------
2 files changed, 68 insertions(+), 12 deletions(-)
Comments
On Mon 02-10-23 17:18:26, Nhat Pham wrote:
> This patch exposes charge committing and cancelling as parts of the
> memory controller interface. These functionalities are useful when the
> try_charge() and commit_charge() stages have to be separated by other
> actions in between (which can fail). One such example is the new hugetlb
> accounting behavior in the following patch.
>
> The patch also adds a helper function to obtain a reference to the
> current task's memcg.
>
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
OK, makes sense.
Acked-by: Michal Hocko <mhocko@suse.com>
Usually we prefer to have newly introduced functions along with their
users but I do understand that this split just makes it easier to review
the main patch for the feature.
> ---
> include/linux/memcontrol.h | 21 ++++++++++++++
> mm/memcontrol.c | 59 ++++++++++++++++++++++++++++++--------
> 2 files changed, 68 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index e0cfab58ab71..42bf7e9b1a2f 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -653,6 +653,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
> page_counter_read(&memcg->memory);
> }
>
> +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
> +
> int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
>
> /**
> @@ -704,6 +706,8 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
> __mem_cgroup_uncharge_list(page_list);
> }
>
> +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
> +
> void mem_cgroup_migrate(struct folio *old, struct folio *new);
>
> /**
> @@ -760,6 +764,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
>
> struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
>
> +struct mem_cgroup *get_mem_cgroup_from_current(void);
> +
> struct lruvec *folio_lruvec_lock(struct folio *folio);
> struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
> struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
> @@ -1245,6 +1251,11 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
> return false;
> }
>
> +static inline void mem_cgroup_commit_charge(struct folio *folio,
> + struct mem_cgroup *memcg)
> +{
> +}
> +
> static inline int mem_cgroup_charge(struct folio *folio,
> struct mm_struct *mm, gfp_t gfp)
> {
> @@ -1269,6 +1280,11 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
> {
> }
>
> +static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
> + unsigned int nr_pages)
> +{
> +}
> +
> static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
> {
> }
> @@ -1306,6 +1322,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
> return NULL;
> }
>
> +static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
> +{
> + return NULL;
> +}
> +
> static inline
> struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
> {
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index d1a322a75172..0219befeae38 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1086,6 +1086,27 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
> }
> EXPORT_SYMBOL(get_mem_cgroup_from_mm);
>
> +/**
> + * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
> + */
> +struct mem_cgroup *get_mem_cgroup_from_current(void)
> +{
> + struct mem_cgroup *memcg;
> +
> + if (mem_cgroup_disabled())
> + return NULL;
> +
> +again:
> + rcu_read_lock();
> + memcg = mem_cgroup_from_task(current);
> + if (!css_tryget(&memcg->css)) {
> + rcu_read_unlock();
> + goto again;
> + }
> + rcu_read_unlock();
> + return memcg;
> +}
> +
> static __always_inline bool memcg_kmem_bypass(void)
> {
> /* Allow remote memcg charging from any context. */
> @@ -2873,7 +2894,12 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> return try_charge_memcg(memcg, gfp_mask, nr_pages);
> }
>
> -static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
> +/**
> + * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
> + * @memcg: memcg previously charged.
> + * @nr_pages: number of pages previously charged.
> + */
> +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
> {
> if (mem_cgroup_is_root(memcg))
> return;
> @@ -2898,6 +2924,22 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
> folio->memcg_data = (unsigned long)memcg;
> }
>
> +/**
> + * mem_cgroup_commit_charge - commit a previously successful try_charge().
> + * @folio: folio to commit the charge to.
> + * @memcg: memcg previously charged.
> + */
> +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
> +{
> + css_get(&memcg->css);
> + commit_charge(folio, memcg);
> +
> + local_irq_disable();
> + mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
> + memcg_check_events(memcg, folio_nid(folio));
> + local_irq_enable();
> +}
> +
> #ifdef CONFIG_MEMCG_KMEM
> /*
> * The allocated objcg pointers array is not accounted directly.
> @@ -6105,7 +6147,7 @@ static void __mem_cgroup_clear_mc(void)
>
> /* we must uncharge all the leftover precharges from mc.to */
> if (mc.precharge) {
> - cancel_charge(mc.to, mc.precharge);
> + mem_cgroup_cancel_charge(mc.to, mc.precharge);
> mc.precharge = 0;
> }
> /*
> @@ -6113,7 +6155,7 @@ static void __mem_cgroup_clear_mc(void)
> * we must uncharge here.
> */
> if (mc.moved_charge) {
> - cancel_charge(mc.from, mc.moved_charge);
> + mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
> mc.moved_charge = 0;
> }
> /* we must fixup refcnts and charges */
> @@ -7020,20 +7062,13 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
> static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
> gfp_t gfp)
> {
> - long nr_pages = folio_nr_pages(folio);
> int ret;
>
> - ret = try_charge(memcg, gfp, nr_pages);
> + ret = try_charge(memcg, gfp, folio_nr_pages(folio));
> if (ret)
> goto out;
>
> - css_get(&memcg->css);
> - commit_charge(folio, memcg);
> -
> - local_irq_disable();
> - mem_cgroup_charge_statistics(memcg, nr_pages);
> - memcg_check_events(memcg, folio_nid(folio));
> - local_irq_enable();
> + mem_cgroup_commit_charge(folio, memcg);
> out:
> return ret;
> }
> --
> 2.34.1
On Mon, Oct 02, 2023 at 05:18:26PM -0700, Nhat Pham wrote:
> This patch exposes charge committing and cancelling as parts of the
> memory controller interface. These functionalities are useful when the
> try_charge() and commit_charge() stages have to be separated by other
> actions in between (which can fail). One such example is the new hugetlb
> accounting behavior in the following patch.
>
> The patch also adds a helper function to obtain a reference to the
> current task's memcg.
>
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
@@ -653,6 +653,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
+void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -704,6 +706,8 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list);
}
+void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
+
void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
@@ -760,6 +764,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -1245,6 +1251,11 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
+static inline void mem_cgroup_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg)
+{
+}
+
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
@@ -1269,6 +1280,11 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
+static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+}
+
static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1306,6 +1322,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1086,6 +1086,27 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
}
EXPORT_SYMBOL(get_mem_cgroup_from_mm);
+/**
+ * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
+ */
+struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+again:
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(current);
+ if (!css_tryget(&memcg->css)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+ return memcg;
+}
+
static __always_inline bool memcg_kmem_bypass(void)
{
/* Allow remote memcg charging from any context. */
@@ -2873,7 +2894,12 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
return try_charge_memcg(memcg, gfp_mask, nr_pages);
}
-static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
+/**
+ * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
+ * @memcg: memcg previously charged.
+ * @nr_pages: number of pages previously charged.
+ */
+void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
{
if (mem_cgroup_is_root(memcg))
return;
@@ -2898,6 +2924,22 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
folio->memcg_data = (unsigned long)memcg;
}
+/**
+ * mem_cgroup_commit_charge - commit a previously successful try_charge().
+ * @folio: folio to commit the charge to.
+ * @memcg: memcg previously charged.
+ */
+void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+{
+ css_get(&memcg->css);
+ commit_charge(folio, memcg);
+
+ local_irq_disable();
+ mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
+ memcg_check_events(memcg, folio_nid(folio));
+ local_irq_enable();
+}
+
#ifdef CONFIG_MEMCG_KMEM
/*
* The allocated objcg pointers array is not accounted directly.
@@ -6105,7 +6147,7 @@ static void __mem_cgroup_clear_mc(void)
/* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) {
- cancel_charge(mc.to, mc.precharge);
+ mem_cgroup_cancel_charge(mc.to, mc.precharge);
mc.precharge = 0;
}
/*
@@ -6113,7 +6155,7 @@ static void __mem_cgroup_clear_mc(void)
* we must uncharge here.
*/
if (mc.moved_charge) {
- cancel_charge(mc.from, mc.moved_charge);
+ mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
mc.moved_charge = 0;
}
/* we must fixup refcnts and charges */
@@ -7020,20 +7062,13 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
gfp_t gfp)
{
- long nr_pages = folio_nr_pages(folio);
int ret;
- ret = try_charge(memcg, gfp, nr_pages);
+ ret = try_charge(memcg, gfp, folio_nr_pages(folio));
if (ret)
goto out;
- css_get(&memcg->css);
- commit_charge(folio, memcg);
-
- local_irq_disable();
- mem_cgroup_charge_statistics(memcg, nr_pages);
- memcg_check_events(memcg, folio_nid(folio));
- local_irq_enable();
+ mem_cgroup_commit_charge(folio, memcg);
out:
return ret;
}