[RFC,v5,4/6] KVM: x86: Let userspace re-enable previously disabled exits
Commit Message
From: Sean Christopherson <seanjc@google.com>
Add an OVERRIDE flag to KVM_CAP_X86_DISABLE_EXITS allow userspace to
re-enable exits and/or override previous settings. There's no real use
case for the the per-VM ioctl, but a future per-vCPU variant wants to let
userspace toggle interception while the vCPU is running; add the OVERRIDE
functionality now to provide consistent between between the per-VM and
per-vCPU variants.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
Documentation/virt/kvm/api.rst | 5 +++++
arch/x86/kvm/x86.c | 32 ++++++++++++++++++++++++--------
include/uapi/linux/kvm.h | 4 +++-
3 files changed, 32 insertions(+), 9 deletions(-)
@@ -7095,6 +7095,7 @@ Valid bits in args[0] are::
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+ #define KVM_X86_DISABLE_EXITS_OVERRIDE (1ull << 63)
Enabling this capability on a VM provides userspace with a way to no
longer intercept some instructions for improved latency in some
@@ -7103,6 +7104,10 @@ physical CPUs. More bits can be added in the future; userspace can
just pass the KVM_CHECK_EXTENSION result to KVM_ENABLE_CAP to disable
all such vmexits.
+By default, this capability only disables exits. To re-enable an exit, or to
+override previous settings, userspace can set KVM_X86_DISABLE_EXITS_OVERRIDE,
+in which case KVM will enable/disable according to the mask (a '1' == disable).
+
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
7.14 KVM_CAP_S390_HPAGE_1M
@@ -5484,6 +5484,28 @@ static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
return r;
}
+
+#define kvm_ioctl_disable_exits(a, mask) \
+({ \
+ if (!kvm_can_mwait_in_guest()) \
+ (mask) &= KVM_X86_DISABLE_EXITS_MWAIT; \
+ if ((mask) & KVM_X86_DISABLE_EXITS_OVERRIDE) { \
+ (a).mwait_in_guest = (mask) & KVM_X86_DISABLE_EXITS_MWAIT; \
+ (a).hlt_in_guest = (mask) & KVM_X86_DISABLE_EXITS_HLT; \
+ (a).pause_in_guest = (mask) & KVM_X86_DISABLE_EXITS_PAUSE; \
+ (a).cstate_in_guest = (mask) & KVM_X86_DISABLE_EXITS_CSTATE; \
+ } else { \
+ if ((mask) & KVM_X86_DISABLE_EXITS_MWAIT) \
+ (a).mwait_in_guest = true; \
+ if ((mask) & KVM_X86_DISABLE_EXITS_HLT) \
+ (a).hlt_in_guest = true; \
+ if ((mask) & KVM_X86_DISABLE_EXITS_PAUSE) \
+ (a).pause_in_guest = true; \
+ if ((mask) & KVM_X86_DISABLE_EXITS_CSTATE) \
+ (a).cstate_in_guest = true; \
+ } \
+})
+
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{
@@ -6238,14 +6260,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
if (kvm->created_vcpus)
goto disable_exits_unlock;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
- kvm->arch.mwait_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
- kvm->arch.hlt_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
- kvm->arch.pause_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
- kvm->arch.cstate_in_guest = true;
+ kvm_ioctl_disable_exits(kvm->arch, cap->args[0]);
+
r = 0;
disable_exits_unlock:
mutex_unlock(&kvm->lock);
@@ -823,10 +823,12 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+#define KVM_X86_DISABLE_EXITS_OVERRIDE (1ull << 63)
#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
KVM_X86_DISABLE_EXITS_HLT | \
KVM_X86_DISABLE_EXITS_PAUSE | \
- KVM_X86_DISABLE_EXITS_CSTATE)
+ KVM_X86_DISABLE_EXITS_CSTATE | \
+ KVM_X86_DISABLE_EXITS_OVERRIDE)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {