[1/3] mm: memory-failure: make put_ref_page() more useful
Commit Message
Pass pfn/flags to put_ref_page(), then check MF_COUNT_INCREASED
and drop refcount to make the code look cleaner.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/memory-failure.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
Comments
On Fri, Oct 21, 2022 at 04:46:09PM +0800, Kefeng Wang wrote:
> Pass pfn/flags to put_ref_page(), then check MF_COUNT_INCREASED
> and drop refcount to make the code look cleaner.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Looks good to me, thank you.
Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
On 2022/10/21 16:46, Kefeng Wang wrote:
> Pass pfn/flags to put_ref_page(), then check MF_COUNT_INCREASED
> and drop refcount to make the code look cleaner.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
> mm/memory-failure.c | 34 +++++++++++++++++-----------------
> 1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index bead6bccc7f2..b94152abb1c9 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1913,17 +1913,25 @@ static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
> }
> #endif /* CONFIG_HUGETLB_PAGE */
>
> +/* Drop the extra refcount in case we come from madvise() */
> +static void put_ref_page(unsigned long pfn, int flags)
> +{
> + struct page *page;
> +
> + if (!(flags & MF_COUNT_INCREASED))
> + return;
> +
> + page = pfn_to_page(pfn);
> + if (page)
IMO above check is unneeded. Page can't be NULL as pfn is valid. But this is trival and this patch looks
good to me. Thanks.
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Thanks,
Miaohe Lin
@@ -1913,17 +1913,25 @@ static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
}
#endif /* CONFIG_HUGETLB_PAGE */
+/* Drop the extra refcount in case we come from madvise() */
+static void put_ref_page(unsigned long pfn, int flags)
+{
+ struct page *page;
+
+ if (!(flags & MF_COUNT_INCREASED))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (page)
+ put_page(page);
+}
+
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
struct dev_pagemap *pgmap)
{
- struct page *page = pfn_to_page(pfn);
int rc = -ENXIO;
- if (flags & MF_COUNT_INCREASED)
- /*
- * Drop the extra refcount in case we come from madvise().
- */
- put_page(page);
+ put_ref_page(pfn, flags);
/* device metadata space is not recoverable */
if (!pgmap_pfn_valid(pgmap, pfn))
@@ -2516,12 +2524,6 @@ static int soft_offline_in_use_page(struct page *page)
return ret;
}
-static void put_ref_page(struct page *page)
-{
- if (page)
- put_page(page);
-}
-
/**
* soft_offline_page - Soft offline a page.
* @pfn: pfn to soft-offline
@@ -2550,19 +2552,17 @@ int soft_offline_page(unsigned long pfn, int flags)
{
int ret;
bool try_again = true;
- struct page *page, *ref_page = NULL;
+ struct page *page;
WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
if (!pfn_valid(pfn))
return -ENXIO;
- if (flags & MF_COUNT_INCREASED)
- ref_page = pfn_to_page(pfn);
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
page = pfn_to_online_page(pfn);
if (!page) {
- put_ref_page(ref_page);
+ put_ref_page(pfn, flags);
return -EIO;
}
@@ -2570,7 +2570,7 @@ int soft_offline_page(unsigned long pfn, int flags)
if (PageHWPoison(page)) {
pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
- put_ref_page(ref_page);
+ put_ref_page(pfn, flags);
mutex_unlock(&mf_mutex);
return 0;
}