[v3,4/4] mm: change to return bool for isolate_movable_page()
Commit Message
Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
will care about the negative return value, thus we can convert the
isolate_movable_page() to return a boolean value to make the code more
clear when checking the movable page isolation state.
No functional changes intended.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
---
include/linux/migrate.h | 6 +++---
mm/compaction.c | 2 +-
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 10 +++++-----
mm/migrate.c | 6 +++---
5 files changed, 14 insertions(+), 14 deletions(-)
Comments
On Wed, Feb 15, 2023 at 06:39:37PM +0800, Baolin Wang wrote:
> Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
> will care about the negative return value, thus we can convert the
> isolate_movable_page() to return a boolean value to make the code more
> clear when checking the movable page isolation state.
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
A couple of nits below, not worth respinning the patch series for:
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index c88b96b48be7..6b252f519c86 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
> unsigned long private, enum migrate_mode mode, int reason,
> unsigned int *ret_succeeded);
> extern struct page *alloc_migration_target(struct page *page, unsigned long private);
> -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
> +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
You can drop the 'extern' here.
> +++ b/mm/memory_hotplug.c
> @@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
> * We can skip free pages. And we can deal with pages on
> * LRU and non-lru movable pages.
> */
> - if (PageLRU(page)) {
> + if (PageLRU(page))
> isolated = isolate_lru_page(page);
> - ret = isolated ? 0 : -EBUSY;
> - } else
> - ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> - if (!ret) { /* Success */
> + else
> + isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> + if (isolated) { /* Success */
I would have dropped the "/* Success */" here. Before, commenting
"!ret" is quite sensible, but "isolated" seems obviously success to me.
Thanks for doing all this.
On 2/15/2023 11:44 PM, Matthew Wilcox wrote:
> On Wed, Feb 15, 2023 at 06:39:37PM +0800, Baolin Wang wrote:
>> Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
>> will care about the negative return value, thus we can convert the
>> isolate_movable_page() to return a boolean value to make the code more
>> clear when checking the movable page isolation state.
>>
>> No functional changes intended.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> Acked-by: David Hildenbrand <david@redhat.com>
>
> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>
> A couple of nits below, not worth respinning the patch series for:
>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index c88b96b48be7..6b252f519c86 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
>> unsigned long private, enum migrate_mode mode, int reason,
>> unsigned int *ret_succeeded);
>> extern struct page *alloc_migration_target(struct page *page, unsigned long private);
>> -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
>> +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>
> You can drop the 'extern' here.
>
>> +++ b/mm/memory_hotplug.c
>> @@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>> * We can skip free pages. And we can deal with pages on
>> * LRU and non-lru movable pages.
>> */
>> - if (PageLRU(page)) {
>> + if (PageLRU(page))
>> isolated = isolate_lru_page(page);
>> - ret = isolated ? 0 : -EBUSY;
>> - } else
>> - ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
>> - if (!ret) { /* Success */
>> + else
>> + isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
>> + if (isolated) { /* Success */
>
> I would have dropped the "/* Success */" here. Before, commenting
> "!ret" is quite sensible, but "isolated" seems obviously success to me.
Right. Hope Andrew can help to drop this unnecessary comment:)
Thanks for reviewing.
On Wed, 15 Feb 2023 15:44:22 +0000 Matthew Wilcox <willy@infradead.org> wrote:
> > extern struct page *alloc_migration_target(struct page *page, unsigned long private);
> > -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
> > +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>
> You can drop the 'extern' here.
There are a bunch of them, so a separate patch would be better.
--- a/include/linux/migrate.h~a
+++ a/include/linux/migrate.h
@@ -62,16 +62,16 @@ extern const char *migrate_reason_names[
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
+void putback_movable_pages(struct list_head *l);
int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason,
- unsigned int *ret_succeeded);
-extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct page *alloc_migration_target(struct page *page, unsigned long private);
+bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -142,8 +142,8 @@ const struct movable_operations *page_mo
}
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node);
#else
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline struct page *alloc_migration_target(struct page *page,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = NULL;
}
- if (!isolate_movable_page(page, mode))
+ if (isolate_movable_page(page, mode))
goto isolate_success;
}
@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
if (lru)
isolated = isolate_lru_page(page);
else
- isolated = !isolate_movable_page(page,
- ISOLATE_UNEVICTABLE);
+ isolated = isolate_movable_page(page,
+ ISOLATE_UNEVICTABLE);
if (isolated) {
list_add(&page->lru, pagelist);
@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages.
*/
- if (PageLRU(page)) {
+ if (PageLRU(page))
isolated = isolate_lru_page(page);
- ret = isolated ? 0 : -EBUSY;
- } else
- ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (!ret) { /* Success */
+ else
+ isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
+ if (isolated) { /* Success */
list_add_tail(&page->lru, &source);
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page));
} else {
+ ret = -EBUSY;
if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed");
@@ -58,7 +58,7 @@
#include "internal.h"
-int isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
folio_set_isolated(folio);
folio_unlock(folio);
- return 0;
+ return true;
out_no_isolated:
folio_unlock(folio);
out_putfolio:
folio_put(folio);
out:
- return -EBUSY;
+ return false;
}
static void putback_movable_folio(struct folio *folio)