Commit Message
Isaku Yamahata
Oct. 30, 2022, 6:22 a.m. UTC
From: Sean Christopherson <sean.j.christopherson@intel.com> TDX requires special handling to support large private page. For simplicity, only support 4K page for TD guest for now. Add per-VM maximum page level support to support different maximum page sizes for TD guest and conventional VMX guest. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 1 + arch/x86/kvm/mmu/mmu_internal.h | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a1c801ca61d3..2e0b23422d63 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1156,6 +1156,7 @@ struct kvm_arch { unsigned long n_requested_mmu_pages; unsigned long n_max_mmu_pages; unsigned int indirect_shadow_pages; + int tdp_max_page_level; u8 mmu_valid_gen; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct list_head active_mmu_pages; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 09defac49bf0..0001e921154e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6092,6 +6092,7 @@ int kvm_mmu_init_vm(struct kvm *kvm) kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; + kvm->arch.tdp_max_page_level = KVM_MAX_HUGEPAGE_LEVEL; return 0; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 5cdff5ca546c..57e3ea2b52cc 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -275,7 +275,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm), - .max_level = KVM_MAX_HUGEPAGE_LEVEL, + .max_level = vcpu->kvm->arch.tdp_max_page_level, .req_level = PG_LEVEL_4K, .goal_level = PG_LEVEL_4K, };