[v2,2/4] mm: change to return bool for isolate_lru_page()
Commit Message
The isolate_lru_page() can only return 0 or -EBUSY, and most users did
not care about the negative error of isolate_lru_page(), except one user
in add_page_for_migration(). So we can convert the isolate_lru_page() to
return a boolean value, which can help to make the code more clear when
checking the return value of isolate_lru_page().
Also convert all users' logic of checking the isolation state.
No functional changes intended.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/folio-compat.c | 12 +++---------
mm/internal.h | 2 +-
mm/khugepaged.c | 2 +-
mm/memcontrol.c | 4 ++--
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 2 +-
mm/migrate.c | 9 ++++++---
mm/migrate_device.c | 2 +-
8 files changed, 17 insertions(+), 20 deletions(-)
Comments
On Tue, 14 Feb 2023 21:59:30 +0800 Baolin Wang <baolin.wang@linux.alibaba.com> wrote:
> The isolate_lru_page() can only return 0 or -EBUSY, and most users did
> not care about the negative error of isolate_lru_page(), except one user
> in add_page_for_migration(). So we can convert the isolate_lru_page() to
> return a boolean value, which can help to make the code more clear when
> checking the return value of isolate_lru_page().
>
> Also convert all users' logic of checking the isolation state.
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> mm/folio-compat.c | 12 +++---------
> mm/internal.h | 2 +-
> mm/khugepaged.c | 2 +-
> mm/memcontrol.c | 4 ++--
> mm/memory-failure.c | 4 ++--
> mm/memory_hotplug.c | 2 +-
> mm/migrate.c | 9 ++++++---
> mm/migrate_device.c | 2 +-
> 8 files changed, 17 insertions(+), 20 deletions(-)
>
[...]
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index a1e8c3e9ab08..17ed80707518 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1668,7 +1668,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
> * LRU and non-lru movable pages.
> */
> if (PageLRU(page))
> - ret = isolate_lru_page(page);
> + ret = !isolate_lru_page(page);
This may change return value of this function. That is, this function will
return 1 instead of -EBUSY after this change. It's not a real issue as no
caller of this function takes care of the return value, though.
Thanks,
SJ
> else
> ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> if (!ret) { /* Success */
[...]
On 2/15/2023 3:32 AM, SeongJae Park wrote:
> On Tue, 14 Feb 2023 21:59:30 +0800 Baolin Wang <baolin.wang@linux.alibaba.com> wrote:
>
>> The isolate_lru_page() can only return 0 or -EBUSY, and most users did
>> not care about the negative error of isolate_lru_page(), except one user
>> in add_page_for_migration(). So we can convert the isolate_lru_page() to
>> return a boolean value, which can help to make the code more clear when
>> checking the return value of isolate_lru_page().
>>
>> Also convert all users' logic of checking the isolation state.
>>
>> No functional changes intended.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> mm/folio-compat.c | 12 +++---------
>> mm/internal.h | 2 +-
>> mm/khugepaged.c | 2 +-
>> mm/memcontrol.c | 4 ++--
>> mm/memory-failure.c | 4 ++--
>> mm/memory_hotplug.c | 2 +-
>> mm/migrate.c | 9 ++++++---
>> mm/migrate_device.c | 2 +-
>> 8 files changed, 17 insertions(+), 20 deletions(-)
>>
> [...]
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index a1e8c3e9ab08..17ed80707518 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -1668,7 +1668,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>> * LRU and non-lru movable pages.
>> */
>> if (PageLRU(page))
>> - ret = isolate_lru_page(page);
>> + ret = !isolate_lru_page(page);
>
> This may change return value of this function. That is, this function will
> return 1 instead of -EBUSY after this change. It's not a real issue as no
> caller of this function takes care of the return value, though.
Yes, I've also thought about this. OK, I can keep the original logic
here by adding a new variable. Thanks.
isolated = isolate_lru_page(page);
ret = isolated ? 0 : -EBUSY;
@@ -113,17 +113,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
-int isolate_lru_page(struct page *page)
+bool isolate_lru_page(struct page *page)
{
- bool ret;
-
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
- return -EBUSY;
- ret = folio_isolate_lru((struct folio *)page);
- if (ret)
- return 0;
-
- return -EBUSY;
+ return false;
+ return folio_isolate_lru((struct folio *)page);
}
void putback_lru_page(struct page *page)
@@ -187,7 +187,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
/*
* in mm/vmscan.c:
*/
-int isolate_lru_page(struct page *page);
+bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio);
void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio);
@@ -659,7 +659,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
* Isolate the page to avoid collapsing an hugepage
* currently in use by the VM.
*/
- if (isolate_lru_page(page)) {
+ if (!isolate_lru_page(page)) {
unlock_page(page);
result = SCAN_DEL_PAGE_LRU;
goto out;
@@ -6176,7 +6176,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
if (target_type == MC_TARGET_PAGE) {
page = target.page;
- if (!isolate_lru_page(page)) {
+ if (isolate_lru_page(page)) {
if (!mem_cgroup_move_account(page, true,
mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR;
@@ -6226,7 +6226,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
*/
if (PageTransCompound(page))
goto put;
- if (!device && isolate_lru_page(page))
+ if (!device && !isolate_lru_page(page))
goto put;
if (!mem_cgroup_move_account(page, false,
mc.from, mc.to)) {
@@ -846,7 +846,7 @@ static const char * const action_page_types[] = {
*/
static int delete_from_lru_cache(struct page *p)
{
- if (!isolate_lru_page(p)) {
+ if (isolate_lru_page(p)) {
/*
* Clear sensible page flags, so that the buddy system won't
* complain when the page is unpoison-and-freed.
@@ -2513,7 +2513,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool lru = !__PageMovable(page);
if (lru)
- isolated = !isolate_lru_page(page);
+ isolated = isolate_lru_page(page);
else
isolated = !isolate_movable_page(page,
ISOLATE_UNEVICTABLE);
@@ -1668,7 +1668,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* LRU and non-lru movable pages.
*/
if (PageLRU(page))
- ret = isolate_lru_page(page);
+ ret = !isolate_lru_page(page);
else
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
if (!ret) { /* Success */
@@ -2132,11 +2132,14 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
}
} else {
struct page *head;
+ bool isolated;
head = compound_head(page);
- err = isolate_lru_page(head);
- if (err)
+ isolated = isolate_lru_page(head);
+ if (!isolated) {
+ err = -EBUSY;
goto out_putpage;
+ }
err = 1;
list_add_tail(&head->lru, pagelist);
@@ -2541,7 +2544,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0;
}
- if (isolate_lru_page(page))
+ if (!isolate_lru_page(page))
return 0;
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
@@ -388,7 +388,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
allow_drain = false;
}
- if (isolate_lru_page(page)) {
+ if (!isolate_lru_page(page)) {
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;