[PATCHv6,15/16] x86/mm: Introduce kernel_ident_mapping_free()

Message ID 20240124125557.493675-16-kirill.shutemov@linux.intel.com
State New
Headers
Series x86/tdx: Add kexec support |

Commit Message

Kirill A. Shutemov Jan. 24, 2024, 12:55 p.m. UTC
  The helper complements kernel_ident_mapping_init(): it frees the
identity mapping that was previously allocated. It will be used in the
error path to free a partially allocated mapping or if the mapping is no
longer needed.

The caller provides a struct x86_mapping_info with the free_pgd_page()
callback hooked up and the pgd_t to free.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/init.h |  3 ++
 arch/x86/mm/ident_map.c     | 73 +++++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+)
  

Comments

Kai Huang Jan. 26, 2024, 2:10 p.m. UTC | #1
On Wed, 2024-01-24 at 14:55 +0200, Kirill A. Shutemov wrote:
> The helper complements kernel_ident_mapping_init(): it frees the
> identity mapping that was previously allocated. It will be used in the
> error path to free a partially allocated mapping or if the mapping is no
> longer needed.
> 
> The caller provides a struct x86_mapping_info with the free_pgd_page()
> callback hooked up and the pgd_t to free.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

FWIW:

Acked-by: Kai Huang <kai.huang@intel.com>

> ---
>  arch/x86/include/asm/init.h |  3 ++
>  arch/x86/mm/ident_map.c     | 73 +++++++++++++++++++++++++++++++++++++
>  2 files changed, 76 insertions(+)
> 
> diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
> index cc9ccf61b6bd..14d72727d7ee 100644
> --- a/arch/x86/include/asm/init.h
> +++ b/arch/x86/include/asm/init.h
> @@ -6,6 +6,7 @@
>  
>  struct x86_mapping_info {
>  	void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
> +	void (*free_pgt_page)(void *, void *); /* free buf for page table */
>  	void *context;			 /* context for alloc_pgt_page */
>  	unsigned long page_flag;	 /* page flag for PMD or PUD entry */
>  	unsigned long offset;		 /* ident mapping offset */
> @@ -16,4 +17,6 @@ struct x86_mapping_info {
>  int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
>  				unsigned long pstart, unsigned long pend);
>  
> +void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd);
> +
>  #endif /* _ASM_X86_INIT_H */
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index 968d7005f4a7..3996af7b4abf 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -4,6 +4,79 @@
>   * included by both the compressed kernel and the regular kernel.
>   */
>  
> +static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
> +{
> +	pte_t *pte = pte_offset_kernel(pmd, 0);
> +
> +	info->free_pgt_page(pte, info->context);
> +}
> +
> +static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
> +{
> +	pmd_t *pmd = pmd_offset(pud, 0);
> +	int i;
> +
> +	for (i = 0; i < PTRS_PER_PMD; i++) {
> +		if (!pmd_present(pmd[i]))
> +			continue;
> +
> +		if (pmd_leaf(pmd[i]))
> +			continue;
> +
> +		free_pte(info, &pmd[i]);
> +	}
> +
> +	info->free_pgt_page(pmd, info->context);
> +}
> +
> +static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
> +{
> +	pud_t *pud = pud_offset(p4d, 0);
> +	int i;
> +
> +	for (i = 0; i < PTRS_PER_PUD; i++) {
> +		if (!pud_present(pud[i]))
> +			continue;
> +
> +		if (pud_leaf(pud[i]))
> +			continue;
> +
> +		free_pmd(info, &pud[i]);
> +	}
> +
> +	info->free_pgt_page(pud, info->context);
> +}
> +
> +static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
> +{
> +	p4d_t *p4d = p4d_offset(pgd, 0);
> +	int i;
> +
> +	for (i = 0; i < PTRS_PER_P4D; i++) {
> +		if (!p4d_present(p4d[i]))
> +			continue;
> +
> +		free_pud(info, &p4d[i]);
> +	}
> +
> +	if (pgtable_l5_enabled())
> +		info->free_pgt_page(pgd, info->context);
> +}
> +
> +void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
> +{
> +	int i;
> +
> +	for (i = 0; i < PTRS_PER_PGD; i++) {
> +		if (!pgd_present(pgd[i]))
> +			continue;
> +
> +		free_p4d(info, &pgd[i]);
> +	}
> +
> +	info->free_pgt_page(pgd, info->context);
> +}
> +
>  static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>  			   unsigned long addr, unsigned long end)
>  {
  

Patch

diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index cc9ccf61b6bd..14d72727d7ee 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -6,6 +6,7 @@ 
 
 struct x86_mapping_info {
 	void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+	void (*free_pgt_page)(void *, void *); /* free buf for page table */
 	void *context;			 /* context for alloc_pgt_page */
 	unsigned long page_flag;	 /* page flag for PMD or PUD entry */
 	unsigned long offset;		 /* ident mapping offset */
@@ -16,4 +17,6 @@  struct x86_mapping_info {
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 				unsigned long pstart, unsigned long pend);
 
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd);
+
 #endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 968d7005f4a7..3996af7b4abf 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -4,6 +4,79 @@ 
  * included by both the compressed kernel and the regular kernel.
  */
 
+static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
+{
+	pte_t *pte = pte_offset_kernel(pmd, 0);
+
+	info->free_pgt_page(pte, info->context);
+}
+
+static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
+{
+	pmd_t *pmd = pmd_offset(pud, 0);
+	int i;
+
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		if (!pmd_present(pmd[i]))
+			continue;
+
+		if (pmd_leaf(pmd[i]))
+			continue;
+
+		free_pte(info, &pmd[i]);
+	}
+
+	info->free_pgt_page(pmd, info->context);
+}
+
+static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
+{
+	pud_t *pud = pud_offset(p4d, 0);
+	int i;
+
+	for (i = 0; i < PTRS_PER_PUD; i++) {
+		if (!pud_present(pud[i]))
+			continue;
+
+		if (pud_leaf(pud[i]))
+			continue;
+
+		free_pmd(info, &pud[i]);
+	}
+
+	info->free_pgt_page(pud, info->context);
+}
+
+static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
+{
+	p4d_t *p4d = p4d_offset(pgd, 0);
+	int i;
+
+	for (i = 0; i < PTRS_PER_P4D; i++) {
+		if (!p4d_present(p4d[i]))
+			continue;
+
+		free_pud(info, &p4d[i]);
+	}
+
+	if (pgtable_l5_enabled())
+		info->free_pgt_page(pgd, info->context);
+}
+
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
+{
+	int i;
+
+	for (i = 0; i < PTRS_PER_PGD; i++) {
+		if (!pgd_present(pgd[i]))
+			continue;
+
+		free_p4d(info, &pgd[i]);
+	}
+
+	info->free_pgt_page(pgd, info->context);
+}
+
 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {