[RFC,2/4] filemap: use minimum order while allocating folios

Message ID 20230621083823.1724337-3-p.raghav@samsung.com
State New
Headers
Series minimum folio order support in filemap |

Commit Message

Pankaj Raghav June 21, 2023, 8:38 a.m. UTC
  Add support to filemap and readahead to use the folio order set by
mapping_min_folio_order().

Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
---
 mm/filemap.c   |  9 ++++++---
 mm/readahead.c | 34 ++++++++++++++++++++++++----------
 2 files changed, 30 insertions(+), 13 deletions(-)
  

Comments

Hannes Reinecke June 21, 2023, 8:59 a.m. UTC | #1
On 6/21/23 10:38, Pankaj Raghav wrote:
> Add support to filemap and readahead to use the folio order set by
> mapping_min_folio_order().
> 
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> ---
>   mm/filemap.c   |  9 ++++++---
>   mm/readahead.c | 34 ++++++++++++++++++++++++----------
>   2 files changed, 30 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 3b73101f9f86..9dc8568e9336 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1936,7 +1936,8 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
>   			gfp |= GFP_NOWAIT | __GFP_NOWARN;
>   		}
>   
> -		folio = filemap_alloc_folio(gfp, 0);
> +		folio = filemap_alloc_folio(gfp,
> +					    mapping_min_folio_order(mapping));
>   		if (!folio)
>   			return ERR_PTR(-ENOMEM);
>   
> @@ -2495,7 +2496,8 @@ static int filemap_create_folio(struct file *file,
>   	struct folio *folio;
>   	int error;
>   
> -	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
> +	folio = filemap_alloc_folio(mapping_gfp_mask(mapping),
> +				    mapping_min_folio_order(mapping));
>   	if (!folio)
>   		return -ENOMEM;
>   
> @@ -3663,7 +3665,8 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
>   repeat:
>   	folio = filemap_get_folio(mapping, index);
>   	if (IS_ERR(folio)) {
> -		folio = filemap_alloc_folio(gfp, 0);
> +		folio = filemap_alloc_folio(gfp,
> +					    mapping_min_folio_order(mapping));
>   		if (!folio)
>   			return ERR_PTR(-ENOMEM);
>   		err = filemap_add_folio(mapping, folio, index, gfp);
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 47afbca1d122..090b810ddeed 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -245,7 +245,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
>   			continue;
>   		}
>   
> -		folio = filemap_alloc_folio(gfp_mask, 0);
> +		folio = filemap_alloc_folio(gfp_mask,
> +					    mapping_min_folio_order(mapping));
>   		if (!folio)
>   			break;
>   		if (filemap_add_folio(mapping, folio, index + i,
> @@ -259,7 +260,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
>   		if (i == nr_to_read - lookahead_size)
>   			folio_set_readahead(folio);
>   		ractl->_workingset |= folio_test_workingset(folio);
> -		ractl->_nr_pages++;
> +		ractl->_nr_pages += folio_nr_pages(folio);
> +		i += folio_nr_pages(folio) - 1;
>   	}
>   
>   	/*
This is incomplete, as the loop above has some exit statements which 
blindly step backwards by one page.

I found it better to rework the 'for' into a 'while' loop; please check 
the attached patch.

Cheers,

Hannes
  
Pankaj Raghav June 21, 2023, 10:25 a.m. UTC | #2
>> index 47afbca1d122..090b810ddeed 100644
>> --- a/mm/readahead.c
>> +++ b/mm/readahead.c
>> @@ -245,7 +245,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
>>               continue;
>>           }
>>   -        folio = filemap_alloc_folio(gfp_mask, 0);
>> +        folio = filemap_alloc_folio(gfp_mask,
>> +                        mapping_min_folio_order(mapping));
>>           if (!folio)
>>               break;
>>           if (filemap_add_folio(mapping, folio, index + i,
>> @@ -259,7 +260,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
>>           if (i == nr_to_read - lookahead_size)
>>               folio_set_readahead(folio);
>>           ractl->_workingset |= folio_test_workingset(folio);
>> -        ractl->_nr_pages++;
>> +        ractl->_nr_pages += folio_nr_pages(folio);
>> +        i += folio_nr_pages(folio) - 1;
>>       }
>>         /*
> This is incomplete, as the loop above has some exit statements which blindly step backwards by one
> page.
> 
> I found it better to rework the 'for' into a 'while' loop; please check the attached patch.
> 
Taken from your patch:

@@ -240,8 +240,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 			 * not worth getting one just for that.
 			 */
 			read_pages(ractl);
-			ractl->_index++;
-			i = ractl->_index + ractl->_nr_pages - index - 1;
+			ractl->_index += folio_nr_pages(folio);
+			i = ractl->_index + ractl->_nr_pages - index;

IIUC, we don't need to update the _index after read_pages() as it already modifies it. We just need
to move ractl->_index by 1 to move to the next index.


> Cheers,
> 
> Hannes
  

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index 3b73101f9f86..9dc8568e9336 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1936,7 +1936,8 @@  struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
 			gfp |= GFP_NOWAIT | __GFP_NOWARN;
 		}
 
-		folio = filemap_alloc_folio(gfp, 0);
+		folio = filemap_alloc_folio(gfp,
+					    mapping_min_folio_order(mapping));
 		if (!folio)
 			return ERR_PTR(-ENOMEM);
 
@@ -2495,7 +2496,8 @@  static int filemap_create_folio(struct file *file,
 	struct folio *folio;
 	int error;
 
-	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
+	folio = filemap_alloc_folio(mapping_gfp_mask(mapping),
+				    mapping_min_folio_order(mapping));
 	if (!folio)
 		return -ENOMEM;
 
@@ -3663,7 +3665,8 @@  static struct folio *do_read_cache_folio(struct address_space *mapping,
 repeat:
 	folio = filemap_get_folio(mapping, index);
 	if (IS_ERR(folio)) {
-		folio = filemap_alloc_folio(gfp, 0);
+		folio = filemap_alloc_folio(gfp,
+					    mapping_min_folio_order(mapping));
 		if (!folio)
 			return ERR_PTR(-ENOMEM);
 		err = filemap_add_folio(mapping, folio, index, gfp);
diff --git a/mm/readahead.c b/mm/readahead.c
index 47afbca1d122..090b810ddeed 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -245,7 +245,8 @@  void page_cache_ra_unbounded(struct readahead_control *ractl,
 			continue;
 		}
 
-		folio = filemap_alloc_folio(gfp_mask, 0);
+		folio = filemap_alloc_folio(gfp_mask,
+					    mapping_min_folio_order(mapping));
 		if (!folio)
 			break;
 		if (filemap_add_folio(mapping, folio, index + i,
@@ -259,7 +260,8 @@  void page_cache_ra_unbounded(struct readahead_control *ractl,
 		if (i == nr_to_read - lookahead_size)
 			folio_set_readahead(folio);
 		ractl->_workingset |= folio_test_workingset(folio);
-		ractl->_nr_pages++;
+		ractl->_nr_pages += folio_nr_pages(folio);
+		i += folio_nr_pages(folio) - 1;
 	}
 
 	/*
@@ -311,6 +313,8 @@  void force_page_cache_ra(struct readahead_control *ractl,
 	struct file_ra_state *ra = ractl->ra;
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages, index;
+	unsigned int folio_order = mapping_min_folio_order(mapping);
+	unsigned int nr_of_pages = (1  << folio_order);
 
 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
 		return;
@@ -320,6 +324,13 @@  void force_page_cache_ra(struct readahead_control *ractl,
 	 * be up to the optimal hardware IO size
 	 */
 	index = readahead_index(ractl);
+	if (folio_order && (index & (nr_of_pages - 1))) {
+		unsigned long old_index = index;
+
+		index = round_down(index, nr_of_pages);
+		nr_to_read += (old_index - index);
+	}
+
 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
 	while (nr_to_read) {
@@ -795,18 +806,20 @@  void readahead_expand(struct readahead_control *ractl,
 	struct file_ra_state *ra = ractl->ra;
 	pgoff_t new_index, new_nr_pages;
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
+	unsigned min_folio_count = 1U << mapping_min_folio_order(mapping);
 
-	new_index = new_start / PAGE_SIZE;
+	new_index = new_start / (min_folio_count * PAGE_SIZE);
 
 	/* Expand the leading edge downwards */
 	while (ractl->_index > new_index) {
-		unsigned long index = ractl->_index - 1;
+		unsigned long index = ractl->_index - min_folio_count;
 		struct folio *folio = xa_load(&mapping->i_pages, index);
 
 		if (folio && !xa_is_value(folio))
 			return; /* Folio apparently present */
 
-		folio = filemap_alloc_folio(gfp_mask, 0);
+		folio = filemap_alloc_folio(gfp_mask,
+					    mapping_min_folio_order(mapping));
 		if (!folio)
 			return;
 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
@@ -818,7 +831,7 @@  void readahead_expand(struct readahead_control *ractl,
 			ractl->_workingset = true;
 			psi_memstall_enter(&ractl->_pflags);
 		}
-		ractl->_nr_pages++;
+		ractl->_nr_pages += folio_nr_pages(folio);
 		ractl->_index = folio->index;
 	}
 
@@ -833,7 +846,8 @@  void readahead_expand(struct readahead_control *ractl,
 		if (folio && !xa_is_value(folio))
 			return; /* Folio apparently present */
 
-		folio = filemap_alloc_folio(gfp_mask, 0);
+		folio = filemap_alloc_folio(gfp_mask,
+					    mapping_min_folio_order(mapping));
 		if (!folio)
 			return;
 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
@@ -845,10 +859,10 @@  void readahead_expand(struct readahead_control *ractl,
 			ractl->_workingset = true;
 			psi_memstall_enter(&ractl->_pflags);
 		}
-		ractl->_nr_pages++;
+		ractl->_nr_pages += folio_nr_pages(folio);
 		if (ra) {
-			ra->size++;
-			ra->async_size++;
+			ra->size += folio_nr_pages(folio);
+			ra->async_size += folio_nr_pages(folio);
 		}
 	}
 }