From: Zhao Liu <zhao1.liu@intel.com>
HRESET is a feature associated with ITD, which provides an HRESET
instruction to reset the ITD related history accumulated on the current
logical processor it is executing on [1]. The HRESET instruction does
not cause the VMExit and is therefore available to the Guest by default
when the HRESET feature bit is set for the Guest.
The HRESET feature also provides a thread scope MSR to control the
enabling of the ITD history reset via the HRESET instruction [2]:
MSR_IA32_HW_HRESET_ENABLE.
This MSR can control the hardware, so we use the emulation way to
support it for Guest, and this makes the Guest's changes to the hardware
under the control of the Host.
Considering that there may be the difference between Guest and Host
about HRESET enabling status, we store the MSR_IA32_HW_HRESET_ENABLE
values of Host and Guest in vcpu_vmx and save/load their respective
configurations when Guest/Host switch.
[1]: SDM, vol. 3B, section 15.6.11 Logical Processor Scope History
[2]: SDM, vol. 2A, chap. CPUID--CPU Identification, CPUID.07H.01H.EAX
[Bit 22], HRESET.
Tested-by: Yanting Jiang <yanting.jiang@intel.com>
Co-developed-by: Zhuocheng Ding <zhuocheng.ding@intel.com>
Signed-off-by: Zhuocheng Ding <zhuocheng.ding@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
---
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/vmx/vmx.c | 54 ++++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 2 ++
arch/x86/kvm/x86.c | 1 +
4 files changed, 58 insertions(+)
@@ -4295,6 +4295,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
case MSR_IA32_PACKAGE_THERM_STATUS:
case MSR_IA32_HW_FEEDBACK_CONFIG:
case MSR_IA32_HW_FEEDBACK_PTR:
+ case MSR_IA32_HW_HRESET_ENABLE:
return false;
case MSR_IA32_SMBASE:
if (!IS_ENABLED(CONFIG_KVM_SMM))
@@ -1314,6 +1314,35 @@ static void itd_guest_exit(struct vcpu_vmx *vmx)
wrmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->host_thread_cfg);
}
+static void hreset_guest_enter(struct vcpu_vmx *vmx)
+{
+ struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_HRESET) ||
+ !guest_cpuid_has(&vmx->vcpu, X86_FEATURE_HRESET))
+ return;
+
+ rdmsrl(MSR_IA32_HW_HRESET_ENABLE, vcpu_hfi->host_hreset_enable);
+ if (unlikely(vcpu_hfi->host_hreset_enable != vcpu_hfi->guest_hreset_enable))
+ wrmsrl(MSR_IA32_HW_HRESET_ENABLE, vcpu_hfi->guest_hreset_enable);
+}
+
+static void hreset_guest_exit(struct vcpu_vmx *vmx)
+{
+ struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_HRESET) ||
+ !guest_cpuid_has(&vmx->vcpu, X86_FEATURE_HRESET))
+ return;
+
+ /*
+ * MSR_IA32_HW_HRESET_ENABLE is not passed through to Guest, so there
+ * is no need to read the MSR to save the Guest's value.
+ */
+ if (unlikely(vcpu_hfi->host_hreset_enable != vcpu_hfi->guest_hreset_enable))
+ wrmsrl(MSR_IA32_HW_HRESET_ENABLE, vcpu_hfi->host_hreset_enable);
+}
+
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
unsigned long fs_base, unsigned long gs_base)
{
@@ -2462,6 +2491,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
msr_info->data = kvm_vmx->pkg_therm.msr_ia32_hfi_ptr;
break;
+ case MSR_IA32_HW_HRESET_ENABLE:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(&vmx->vcpu, X86_FEATURE_HRESET))
+ return 1;
+ msr_info->data = vmx->vcpu_hfi_desc.guest_hreset_enable;
+ break;
default:
find_uret_msr:
msr = vmx_find_uret_msr(vmx, msr_info->index);
@@ -3091,6 +3126,21 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
ret = vmx_set_hfi_ptr_msr(vcpu, msr_info);
mutex_unlock(&kvm_vmx->pkg_therm.pkg_therm_lock);
break;
+ case MSR_IA32_HW_HRESET_ENABLE: {
+ struct kvm_cpuid_entry2 *entry;
+
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(&vmx->vcpu, X86_FEATURE_HRESET))
+ return 1;
+
+ entry = kvm_find_cpuid_entry_index(&vmx->vcpu, 0x20, 0);
+ /* Reserved bits: generate the exception. */
+ if (!msr_info->host_initiated && data & ~entry->ebx)
+ return 1;
+ /* hreset_guest_enter() will update MSR for Guest. */
+ vmx->vcpu_hfi_desc.guest_hreset_enable = data;
+ break;
+ }
default:
find_uret_msr:
msr = vmx_find_uret_msr(vmx, msr_index);
@@ -5513,6 +5563,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->msr_ia32_therm_status = 0;
vmx->vcpu_hfi_desc.host_thread_cfg = 0;
vmx->vcpu_hfi_desc.guest_thread_cfg = 0;
+ vmx->vcpu_hfi_desc.host_hreset_enable = 0;
+ vmx->vcpu_hfi_desc.guest_hreset_enable = 0;
vmx->hv_deadline_tsc = -1;
kvm_set_cr8(vcpu, 0);
@@ -8006,6 +8058,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
pt_guest_enter(vmx);
itd_guest_enter(vmx);
+ hreset_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
if (intel_pmu_lbr_is_enabled(vcpu))
@@ -8044,6 +8097,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS);
#endif
+ hreset_guest_exit(vmx);
itd_guest_exit(vmx);
pt_guest_exit(vmx);
@@ -74,6 +74,8 @@ struct pt_desc {
struct vcpu_hfi_desc {
u64 host_thread_cfg;
u64 guest_thread_cfg;
+ u64 host_hreset_enable;
+ u64 guest_hreset_enable;
};
union vmx_exit_reason {
@@ -1552,6 +1552,7 @@ static const u32 emulated_msrs_all[] = {
MSR_IA32_PACKAGE_THERM_STATUS,
MSR_IA32_HW_FEEDBACK_CONFIG,
MSR_IA32_HW_FEEDBACK_PTR,
+ MSR_IA32_HW_HRESET_ENABLE,
/*
* KVM always supports the "true" VMX control MSRs, even if the host