[RFC,26/42] KVM: x86/mmu: introduce new op get_default_mt_mask to kvm_x86_ops

Message ID 20231202092825.15041-1-yan.y.zhao@intel.com
State New
Headers
Series Sharing KVM TDP to IOMMU |

Commit Message

Yan Zhao Dec. 2, 2023, 9:28 a.m. UTC
  Introduce a new op get_default_mt_mask to kvm_x86_ops to get default memory
types when no non-coherent DMA devices are attached.

For VMX, when there's no non-coherent DMA devices, guest MTRRs and vCPUs
CR0.CD mode are not queried to get memory types of EPT. So, introduce a
new op get_default_mt_mask that does not require param "vcpu" to get memory
types.

This is a preparation patch for later KVM MMU to export TDP, because IO
page fault requests are in non-vcpu context and have no "vcpu" to get
memory type from op get_mt_mask.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  1 +
 arch/x86/kvm/vmx/vmx.c             | 11 +++++++++++
 3 files changed, 13 insertions(+)
  

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 26b628d84594b..d751407b1056c 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -92,6 +92,7 @@  KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
 KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
 KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
 KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
+KVM_X86_OP_OPTIONAL_RET0(get_default_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 16e01eee34a99..1f6ac04e0f952 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1679,6 +1679,7 @@  struct kvm_x86_ops {
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
 	u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+	u8 (*get_default_mt_mask)(struct kvm *kvm, bool is_mmio);
 
 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 			     int root_level);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 1cc717a718e9c..f290dd3094da6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7614,6 +7614,16 @@  static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 	return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
 }
 
+static u8 vmx_get_default_mt_mask(struct kvm *kvm, bool is_mmio)
+{
+	WARN_ON(kvm_arch_has_noncoherent_dma(kvm));
+
+	if (is_mmio)
+		return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
+
+	return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+}
+
 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
 {
 	/*
@@ -8295,6 +8305,7 @@  static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.set_tss_addr = vmx_set_tss_addr,
 	.set_identity_map_addr = vmx_set_identity_map_addr,
 	.get_mt_mask = vmx_get_mt_mask,
+	.get_default_mt_mask = vmx_get_default_mt_mask,
 
 	.get_exit_info = vmx_get_exit_info,