[v4,14/18] KVM: mmu: Initialize kvm_mmu_memory_cache.gfp_zero to __GFP_ZERO by default

Message ID 20230306224127.1689967-15-vipinsh@google.com
State New
Headers
Series NUMA aware page table allocation |

Commit Message

Vipin Sharma March 6, 2023, 10:41 p.m. UTC
  Set __GFP_ZERO to gfp_zero in default initizliation of struct
kvm_mmu_memory_cache{}

All of the users of default initialization code of struct
kvm_mmu_memory_cache{} explicitly sets gfp_zero to __GFP_ZERO. This can
be moved to common initialization logic.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/arm64/kvm/arm.c      | 1 -
 arch/arm64/kvm/mmu.c      | 1 -
 arch/riscv/kvm/mmu.c      | 1 -
 arch/riscv/kvm/vcpu.c     | 1 -
 arch/x86/kvm/mmu/mmu.c    | 6 ------
 include/linux/kvm_types.h | 4 +++-
 6 files changed, 3 insertions(+), 11 deletions(-)
  

Comments

David Matlack March 23, 2023, 10:28 p.m. UTC | #1
On Mon, Mar 06, 2023 at 02:41:23PM -0800, Vipin Sharma wrote:
> Set __GFP_ZERO to gfp_zero in default initizliation of struct
> kvm_mmu_memory_cache{}
> 
> All of the users of default initialization code of struct
> kvm_mmu_memory_cache{} explicitly sets gfp_zero to __GFP_ZERO. This can
> be moved to common initialization logic.

If that were true we could get rid of gfp_zero entirely and hard-code
__GFP_ZERO in the memory allocator! mmu_shadowed_info_cache is the one
that does not set __GFP_ZERO.

> 
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  arch/arm64/kvm/arm.c      | 1 -
>  arch/arm64/kvm/mmu.c      | 1 -
>  arch/riscv/kvm/mmu.c      | 1 -
>  arch/riscv/kvm/vcpu.c     | 1 -
>  arch/x86/kvm/mmu/mmu.c    | 6 ------
>  include/linux/kvm_types.h | 4 +++-
>  6 files changed, 3 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 2b3d88e4ace8..b4243978d962 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -331,7 +331,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
> -	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
>  
>  	/*
>  	 * Default value for the FP state, will be overloaded at load
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 8a56f071ca66..133eba96c41f 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -904,7 +904,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  	if (is_protected_kvm_enabled())
>  		return -EPERM;
>  
> -	cache.gfp_zero = __GFP_ZERO;
>  	size += offset_in_page(guest_ipa);
>  	guest_ipa &= PAGE_MASK;
>  
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index bdd8c17958dd..62550fd91c70 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -353,7 +353,6 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
>  	phys_addr_t addr, end;
>  	KVM_MMU_MEMORY_CACHE(pcache);
>  
> -	pcache.gfp_zero = __GFP_ZERO;
>  	if (in_atomic)
>  		pcache.gfp_custom = GFP_ATOMIC | __GFP_ACCOUNT;
>  
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index bc743e9122d1..f5a96ed1e426 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -164,7 +164,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  	/* Mark this VCPU never ran */
>  	vcpu->arch.ran_atleast_once = false;
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
> -	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
>  	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
>  
>  	/* Setup ISA features available to VCPU */
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index b706087ef74e..d96afc849ee8 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5963,14 +5963,11 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache);
>  	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> -	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
>  	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> -	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache);
> -	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
>  	mutex_init(&vcpu->arch.mmu_shadow_page_cache_lock);
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadowed_info_cache);
> @@ -6138,14 +6135,11 @@ int kvm_mmu_init_vm(struct kvm *kvm)
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache);
>  	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> -	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache);
> -	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
>  
>  	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache);
>  	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> -	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
>  
>  	return 0;
>  }
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 192516eeccac..5da7953532ce 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -99,7 +99,9 @@ struct kvm_mmu_memory_cache {
>  	void **objects;
>  };
>  
> -#define KVM_MMU_MEMORY_CACHE_INIT() { }
> +#define KVM_MMU_MEMORY_CACHE_INIT() {	\
> +	.gfp_zero = __GFP_ZERO,		\
> +}
>  
>  #define KVM_MMU_MEMORY_CACHE(_name) \
>  		struct kvm_mmu_memory_cache _name = KVM_MMU_MEMORY_CACHE_INIT()
> -- 
> 2.40.0.rc0.216.gc4246ad0f0-goog
>
  
Vipin Sharma March 28, 2023, 5:31 p.m. UTC | #2
On Thu, Mar 23, 2023 at 3:28 PM David Matlack <dmatlack@google.com> wrote:
>
> On Mon, Mar 06, 2023 at 02:41:23PM -0800, Vipin Sharma wrote:
> > Set __GFP_ZERO to gfp_zero in default initizliation of struct
> > kvm_mmu_memory_cache{}
> >
> > All of the users of default initialization code of struct
> > kvm_mmu_memory_cache{} explicitly sets gfp_zero to __GFP_ZERO. This can
> > be moved to common initialization logic.
>
> If that were true we could get rid of gfp_zero entirely and hard-code
> __GFP_ZERO in the memory allocator! mmu_shadowed_info_cache is the one
> that does not set __GFP_ZERO.
>

Can we use __GFP_ZERO for mmu_shadowed_info_cache? Also, MIPS doesn't
use __GFP_ZERO, I think it might be a missed thing in MIPS rather than
intentional.
  
David Matlack March 28, 2023, 11:13 p.m. UTC | #3
On Tue, Mar 28, 2023 at 10:31 AM Vipin Sharma <vipinsh@google.com> wrote:
>
> On Thu, Mar 23, 2023 at 3:28 PM David Matlack <dmatlack@google.com> wrote:
> >
> > On Mon, Mar 06, 2023 at 02:41:23PM -0800, Vipin Sharma wrote:
> > > Set __GFP_ZERO to gfp_zero in default initizliation of struct
> > > kvm_mmu_memory_cache{}
> > >
> > > All of the users of default initialization code of struct
> > > kvm_mmu_memory_cache{} explicitly sets gfp_zero to __GFP_ZERO. This can
> > > be moved to common initialization logic.
> >
> > If that were true we could get rid of gfp_zero entirely and hard-code
> > __GFP_ZERO in the memory allocator! mmu_shadowed_info_cache is the one
> > that does not set __GFP_ZERO.
> >
>
> Can we use __GFP_ZERO for mmu_shadowed_info_cache?

Yes but doing so would add CPU cost to zero the memory on allocation.
Someone would need to do some performance testing to confirm that the
cost of zeroing is acceptable.

> Also, MIPS doesn't
> use __GFP_ZERO, I think it might be a missed thing in MIPS rather than
> intentional.
  

Patch

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 2b3d88e4ace8..b4243978d962 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -331,7 +331,6 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
-	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 
 	/*
 	 * Default value for the FP state, will be overloaded at load
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8a56f071ca66..133eba96c41f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -904,7 +904,6 @@  int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	if (is_protected_kvm_enabled())
 		return -EPERM;
 
-	cache.gfp_zero = __GFP_ZERO;
 	size += offset_in_page(guest_ipa);
 	guest_ipa &= PAGE_MASK;
 
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index bdd8c17958dd..62550fd91c70 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -353,7 +353,6 @@  int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
 	phys_addr_t addr, end;
 	KVM_MMU_MEMORY_CACHE(pcache);
 
-	pcache.gfp_zero = __GFP_ZERO;
 	if (in_atomic)
 		pcache.gfp_custom = GFP_ATOMIC | __GFP_ACCOUNT;
 
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index bc743e9122d1..f5a96ed1e426 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -164,7 +164,6 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 	/* Mark this VCPU never ran */
 	vcpu->arch.ran_atleast_once = false;
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
-	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
 
 	/* Setup ISA features available to VCPU */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b706087ef74e..d96afc849ee8 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5963,14 +5963,11 @@  int kvm_mmu_create(struct kvm_vcpu *vcpu)
 
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache);
 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
-	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
 
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
-	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
 
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache);
-	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
 	mutex_init(&vcpu->arch.mmu_shadow_page_cache_lock);
 
 	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadowed_info_cache);
@@ -6138,14 +6135,11 @@  int kvm_mmu_init_vm(struct kvm *kvm)
 
 	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache);
 	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
-	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
 
 	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache);
-	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
 
 	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache);
 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
-	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
 
 	return 0;
 }
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 192516eeccac..5da7953532ce 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -99,7 +99,9 @@  struct kvm_mmu_memory_cache {
 	void **objects;
 };
 
-#define KVM_MMU_MEMORY_CACHE_INIT() { }
+#define KVM_MMU_MEMORY_CACHE_INIT() {	\
+	.gfp_zero = __GFP_ZERO,		\
+}
 
 #define KVM_MMU_MEMORY_CACHE(_name) \
 		struct kvm_mmu_memory_cache _name = KVM_MMU_MEMORY_CACHE_INIT()