[v3,21/34] s390: Implement the new page table range API

Message ID 20230228213738.272178-22-willy@infradead.org
State New
Headers
Series New page table range API |

Commit Message

Matthew Wilcox Feb. 28, 2023, 9:37 p.m. UTC
  Add set_ptes() and update_mmu_cache_range().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: linux-s390@vger.kernel.org
---
 arch/s390/include/asm/pgtable.h | 34 ++++++++++++++++++++++++---------
 1 file changed, 25 insertions(+), 9 deletions(-)
  

Comments

Gerald Schaefer March 2, 2023, 1:31 p.m. UTC | #1
On Tue, 28 Feb 2023 21:37:24 +0000
"Matthew Wilcox (Oracle)" <willy@infradead.org> wrote:

> Add set_ptes() and update_mmu_cache_range().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: Heiko Carstens <hca@linux.ibm.com>
> Cc: Vasily Gorbik <gor@linux.ibm.com>
> Cc: Alexander Gordeev <agordeev@linux.ibm.com>
> Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
> Cc: Sven Schnelle <svens@linux.ibm.com>
> Cc: linux-s390@vger.kernel.org
> ---
>  arch/s390/include/asm/pgtable.h | 34 ++++++++++++++++++++++++---------
>  1 file changed, 25 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 2c70b4d1263d..46bf475116f1 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -50,6 +50,7 @@ void arch_report_meminfo(struct seq_file *m);
>   * tables contain all the necessary information.
>   */
>  #define update_mmu_cache(vma, address, ptep)     do { } while (0)
> +#define update_mmu_cache_range(vma, addr, ptep, nr)	do { } while (0)
>  #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
>  
>  /*
> @@ -1317,21 +1318,36 @@ pgprot_t pgprot_writecombine(pgprot_t prot);
>  pgprot_t pgprot_writethrough(pgprot_t prot);
>  
>  /*
> - * Certain architectures need to do special things when PTEs
> - * within a page table are directly modified.  Thus, the following
> - * hook is made available.
> + * Set multiple PTEs to consecutive pages with a single call.  All PTEs
> + * are within the same folio, PMD and VMA.
>   */
> -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
> -			      pte_t *ptep, pte_t entry)
> +static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
> +			      pte_t *ptep, pte_t entry, unsigned int nr)
>  {
>  	if (pte_present(entry))
>  		entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
> -	if (mm_has_pgste(mm))
> -		ptep_set_pte_at(mm, addr, ptep, entry);
> -	else
> -		set_pte(ptep, entry);
> +	if (mm_has_pgste(mm)) {
> +		for (;;) {
> +			ptep_set_pte_at(mm, addr, ptep, entry);

There might be room for additional optimization here, regarding the
preempt_disable/enable() in ptep_set_pte_at(), i.e. move it out of
ptep_set_pte_at() and do it only once in this loop.

We could add that later with an add-on patch, but for this series it
all looks good.

Reviewed-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
  

Patch

diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 2c70b4d1263d..46bf475116f1 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -50,6 +50,7 @@  void arch_report_meminfo(struct seq_file *m);
  * tables contain all the necessary information.
  */
 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
+#define update_mmu_cache_range(vma, addr, ptep, nr)	do { } while (0)
 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
 
 /*
@@ -1317,21 +1318,36 @@  pgprot_t pgprot_writecombine(pgprot_t prot);
 pgprot_t pgprot_writethrough(pgprot_t prot);
 
 /*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
+ * Set multiple PTEs to consecutive pages with a single call.  All PTEs
+ * are within the same folio, PMD and VMA.
  */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-			      pte_t *ptep, pte_t entry)
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t entry, unsigned int nr)
 {
 	if (pte_present(entry))
 		entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
-	if (mm_has_pgste(mm))
-		ptep_set_pte_at(mm, addr, ptep, entry);
-	else
-		set_pte(ptep, entry);
+	if (mm_has_pgste(mm)) {
+		for (;;) {
+			ptep_set_pte_at(mm, addr, ptep, entry);
+			if (--nr == 0)
+				break;
+			ptep++;
+			entry = __pte(pte_val(entry) + PAGE_SIZE);
+			addr += PAGE_SIZE;
+		}
+	} else {
+		for (;;) {
+			set_pte(ptep, entry);
+			if (--nr == 0)
+				break;
+			ptep++;
+			entry = __pte(pte_val(entry) + PAGE_SIZE);
+		}
+	}
 }
 
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.