mm/mm_init: use helper macro BITS_PER_LONG

Message ID 20230803114051.637709-1-linmiaohe@huawei.com
State New
Headers
Series mm/mm_init: use helper macro BITS_PER_LONG |

Commit Message

Miaohe Lin Aug. 3, 2023, 11:40 a.m. UTC
  It's more readable to use helper macro BITS_PER_LONG. No functional
change intended.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
 mm/mm_init.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
  

Comments

David Hildenbrand Aug. 3, 2023, 1:08 p.m. UTC | #1
On 03.08.23 13:40, Miaohe Lin wrote:
> It's more readable to use helper macro BITS_PER_LONG. No functional
> change intended.
> 
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
>   mm/mm_init.c | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 66aca3f6accd..2f37dbb5ff9a 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
>   	int shift, width;
>   	unsigned long or_mask, add_mask;
>   
> -	shift = 8 * sizeof(unsigned long);
> +	shift = BITS_PER_LONG;
>   	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
>   		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
>   	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
> @@ -1431,7 +1431,7 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
>   	usemapsize = roundup(zonesize, pageblock_nr_pages);
>   	usemapsize = usemapsize >> pageblock_order;
>   	usemapsize *= NR_PAGEBLOCK_BITS;
> -	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
> +	usemapsize = roundup(usemapsize, BITS_PER_LONG);
>   
>   	return usemapsize / 8;
>   }

Reviewed-by: David Hildenbrand <david@redhat.com>
  
Mike Rapoport Aug. 3, 2023, 1:33 p.m. UTC | #2
On Thu, Aug 03, 2023 at 07:40:51PM +0800, Miaohe Lin wrote:
> It's more readable to use helper macro BITS_PER_LONG. No functional
> change intended.
> 
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
>  mm/mm_init.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 66aca3f6accd..2f37dbb5ff9a 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
>  	int shift, width;
>  	unsigned long or_mask, add_mask;
>  
> -	shift = 8 * sizeof(unsigned long);
> +	shift = BITS_PER_LONG;
>  	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
>  		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
>  	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
> @@ -1431,7 +1431,7 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
>  	usemapsize = roundup(zonesize, pageblock_nr_pages);
>  	usemapsize = usemapsize >> pageblock_order;
>  	usemapsize *= NR_PAGEBLOCK_BITS;
> -	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
> +	usemapsize = roundup(usemapsize, BITS_PER_LONG);
>  
>  	return usemapsize / 8;

BITS_PER_BYTE instead of 8 here?

>  }
  
Miaohe Lin Aug. 4, 2023, 1:53 a.m. UTC | #3
On 2023/8/3 21:33, Mike Rapoport wrote:
> On Thu, Aug 03, 2023 at 07:40:51PM +0800, Miaohe Lin wrote:
>> It's more readable to use helper macro BITS_PER_LONG. No functional
>> change intended.
>>
>> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
>> ---
>>  mm/mm_init.c | 4 ++--
>>  1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/mm/mm_init.c b/mm/mm_init.c
>> index 66aca3f6accd..2f37dbb5ff9a 100644
>> --- a/mm/mm_init.c
>> +++ b/mm/mm_init.c
>> @@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
>>  	int shift, width;
>>  	unsigned long or_mask, add_mask;
>>  
>> -	shift = 8 * sizeof(unsigned long);
>> +	shift = BITS_PER_LONG;
>>  	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
>>  		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
>>  	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
>> @@ -1431,7 +1431,7 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
>>  	usemapsize = roundup(zonesize, pageblock_nr_pages);
>>  	usemapsize = usemapsize >> pageblock_order;
>>  	usemapsize *= NR_PAGEBLOCK_BITS;
>> -	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
>> +	usemapsize = roundup(usemapsize, BITS_PER_LONG);
>>  
>>  	return usemapsize / 8;
> 
> BITS_PER_BYTE instead of 8 here?

Sure, this is even better. Will do.

Thanks.
  

Patch

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 66aca3f6accd..2f37dbb5ff9a 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -79,7 +79,7 @@  void __init mminit_verify_pageflags_layout(void)
 	int shift, width;
 	unsigned long or_mask, add_mask;
 
-	shift = 8 * sizeof(unsigned long);
+	shift = BITS_PER_LONG;
 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
@@ -1431,7 +1431,7 @@  static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
 	usemapsize = roundup(zonesize, pageblock_nr_pages);
 	usemapsize = usemapsize >> pageblock_order;
 	usemapsize *= NR_PAGEBLOCK_BITS;
-	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
+	usemapsize = roundup(usemapsize, BITS_PER_LONG);
 
 	return usemapsize / 8;
 }