[mm-unstable,3/4] mm: multi-gen LRU: add helpers in page table walks
Commit Message
Add helpers to page table walking code:
- Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young"
- Avoids repeating same logic in two places
Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
---
mm/vmscan.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
Comments
On Mon, May 22, 2023 at 7:21 PM T.J. Alumbaugh <talumbau@google.com> wrote:
>
> Add helpers to page table walking code:
> - Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young"
I wonder if these should be called "can_walk_mmu" and
"can_clear_pmd_young", but that's rather minor.
> - Avoids repeating same logic in two places
>
> Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
> ---
> mm/vmscan.c | 20 +++++++++++++++-----
> 1 file changed, 15 insertions(+), 5 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index e088db138f5f..ad0f589d32e6 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
> #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
> #endif
>
> +static bool should_walk_mmu(void)
> +{
> + return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
> +}
> +
> +static bool should_clear_pmd_young(void)
> +{
> + return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
> +}
> +
> /******************************************************************************
> * shorthand helpers
> ******************************************************************************/
> @@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
> goto next;
>
> if (!pmd_trans_huge(pmd[i])) {
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
> + if (should_clear_pmd_young())
> pmdp_test_and_clear_young(vma, addr, pmd + i);
> goto next;
> }
> @@ -4191,7 +4201,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
> #endif
> walk->mm_stats[MM_NONLEAF_TOTAL]++;
>
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) {
> + if (should_clear_pmd_young()) {
> if (!pmd_young(val))
> continue;
>
> @@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
> * handful of PTEs. Spreading the work out over a period of time usually
> * is less efficient, but it avoids bursty page faults.
> */
> - if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
> + if (!should_walk_mmu()) {
> success = iterate_mm_list_nowalk(lruvec, max_seq);
> goto done;
> }
> @@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c
> if (get_cap(LRU_GEN_CORE))
> caps |= BIT(LRU_GEN_CORE);
>
> - if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
> + if (should_walk_mmu())
> caps |= BIT(LRU_GEN_MM_WALK);
>
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
> + if (should_clear_pmd_young())
> caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
>
> return sysfs_emit(buf, "0x%04x\n", caps);
> --
> 2.40.1.698.g37aff9b760-goog
>
Other than that,
Reviewed-by: Yuanchu Xie <yuanchu@google.com>
@@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
#endif
+static bool should_walk_mmu(void)
+{
+ return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
+}
+
+static bool should_clear_pmd_young(void)
+{
+ return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
+}
+
/******************************************************************************
* shorthand helpers
******************************************************************************/
@@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
goto next;
if (!pmd_trans_huge(pmd[i])) {
- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ if (should_clear_pmd_young())
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
@@ -4191,7 +4201,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
#endif
walk->mm_stats[MM_NONLEAF_TOTAL]++;
- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) {
+ if (should_clear_pmd_young()) {
if (!pmd_young(val))
continue;
@@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
- if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
+ if (!should_walk_mmu()) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
}
@@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c
if (get_cap(LRU_GEN_CORE))
caps |= BIT(LRU_GEN_CORE);
- if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
+ if (should_walk_mmu())
caps |= BIT(LRU_GEN_MM_WALK);
- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ if (should_clear_pmd_young())
caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
return sysfs_emit(buf, "0x%04x\n", caps);