[RFC,12/33] KVM: x86: hyper-v: Handle VSM hcalls in user-space
Commit Message
Let user-space handle all hypercalls that fall under the AccessVsm
partition privilege flag. That is:
- HVCALL_MODIFY_VTL_PROTECTION_MASK:
- HVCALL_ENABLE_PARTITION_VTL:
- HVCALL_ENABLE_VP_VTL:
- HVCALL_VTL_CALL:
- HVCALL_VTL_RETURN:
The hypercalls are processed through the KVM_EXIT_HYPERV_HVCALL exit.
Additionally, expose the cpuid bit.
Signed-off-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
---
arch/x86/kvm/hyperv.c | 15 +++++++++++++++
include/asm-generic/hyperv-tlfs.h | 7 ++++++-
2 files changed, 21 insertions(+), 1 deletion(-)
Comments
On Wed, 2023-11-08 at 11:17 +0000, Nicolas Saenz Julienne wrote:
> Let user-space handle all hypercalls that fall under the AccessVsm
> partition privilege flag. That is:
> - HVCALL_MODIFY_VTL_PROTECTION_MASK:
> - HVCALL_ENABLE_PARTITION_VTL:
> - HVCALL_ENABLE_VP_VTL:
> - HVCALL_VTL_CALL:
> - HVCALL_VTL_RETURN:
> The hypercalls are processed through the KVM_EXIT_HYPERV_HVCALL exit.
> Additionally, expose the cpuid bit.
>
> Signed-off-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
> ---
> arch/x86/kvm/hyperv.c | 15 +++++++++++++++
> include/asm-generic/hyperv-tlfs.h | 7 ++++++-
> 2 files changed, 21 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index a3970d52eef1..a266c5d393f5 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -2462,6 +2462,11 @@ static bool kvm_hv_is_xmm_output_hcall(u16 code)
> return false;
> }
>
> +static inline bool kvm_hv_is_vtl_call_return(u16 code)
> +{
> + return code == HVCALL_VTL_CALL || code == HVCALL_VTL_RETURN;
> +}
> +
> static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
> {
> bool fast = !!(vcpu->run->hyperv.u.hcall.input & HV_HYPERCALL_FAST_BIT);
> @@ -2471,6 +2476,9 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
> if (kvm_hv_is_xmm_output_hcall(code) && hv_result_success(result) && fast)
> kvm_hv_write_xmm(vcpu->run->hyperv.u.hcall.xmm);
>
> + if (kvm_hv_is_vtl_call_return(code))
> + return kvm_skip_emulated_instruction(vcpu);
Can you add justification for this?
If this is justified, does it make sense to move this code to kvm_hv_hypercall_complete
(which also calls kvm_skip_emulated_instruction())
> +
> return kvm_hv_hypercall_complete(vcpu, result);
> }
>
> @@ -2525,6 +2533,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
> case HVCALL_SEND_IPI_EX:
> case HVCALL_GET_VP_REGISTERS:
> case HVCALL_SET_VP_REGISTERS:
> + case HVCALL_MODIFY_VTL_PROTECTION_MASK:
> return true;
> }
>
> @@ -2745,6 +2754,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
> goto hypercall_userspace_exit;
> case HVCALL_GET_VP_REGISTERS:
> case HVCALL_SET_VP_REGISTERS:
> + case HVCALL_MODIFY_VTL_PROTECTION_MASK:
> + case HVCALL_ENABLE_PARTITION_VTL:
> + case HVCALL_ENABLE_VP_VTL:
> + case HVCALL_VTL_CALL:
> + case HVCALL_VTL_RETURN:
> goto hypercall_userspace_exit;
> default:
Also those new hypercalls also should be added to hv_check_hypercall_access.
> ret = HV_STATUS_INVALID_HYPERCALL_CODE;
> @@ -2912,6 +2926,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
> ent->ebx |= HV_SIGNAL_EVENTS;
> ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
> ent->ebx |= HV_ACCESS_VP_REGISTERS;
> + ent->ebx |= HV_ACCESS_VSM;
>
> ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
> ent->edx |= HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE;
Best regards,
Maxim Levitsky
> diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
> index 24ea699a3d8e..a8b5c8a84bbc 100644
> --- a/include/asm-generic/hyperv-tlfs.h
> +++ b/include/asm-generic/hyperv-tlfs.h
> @@ -89,6 +89,7 @@
> #define HV_ACCESS_STATS BIT(8)
> #define HV_DEBUGGING BIT(11)
> #define HV_CPU_MANAGEMENT BIT(12)
> +#define HV_ACCESS_VSM BIT(16)
> #define HV_ACCESS_VP_REGISTERS BIT(17)
> #define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
> #define HV_ISOLATION BIT(22)
> @@ -147,9 +148,13 @@ union hv_reference_tsc_msr {
> /* Declare the various hypercall operations. */
> #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
> #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
> -#define HVCALL_ENABLE_VP_VTL 0x000f
> #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
> #define HVCALL_SEND_IPI 0x000b
> +#define HVCALL_MODIFY_VTL_PROTECTION_MASK 0x000c
> +#define HVCALL_ENABLE_PARTITION_VTL 0x000d
> +#define HVCALL_ENABLE_VP_VTL 0x000f
> +#define HVCALL_VTL_CALL 0x0011
> +#define HVCALL_VTL_RETURN 0x0012
> #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
> #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
> #define HVCALL_SEND_IPI_EX 0x0015
@@ -2462,6 +2462,11 @@ static bool kvm_hv_is_xmm_output_hcall(u16 code)
return false;
}
+static inline bool kvm_hv_is_vtl_call_return(u16 code)
+{
+ return code == HVCALL_VTL_CALL || code == HVCALL_VTL_RETURN;
+}
+
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{
bool fast = !!(vcpu->run->hyperv.u.hcall.input & HV_HYPERCALL_FAST_BIT);
@@ -2471,6 +2476,9 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
if (kvm_hv_is_xmm_output_hcall(code) && hv_result_success(result) && fast)
kvm_hv_write_xmm(vcpu->run->hyperv.u.hcall.xmm);
+ if (kvm_hv_is_vtl_call_return(code))
+ return kvm_skip_emulated_instruction(vcpu);
+
return kvm_hv_hypercall_complete(vcpu, result);
}
@@ -2525,6 +2533,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
case HVCALL_SEND_IPI_EX:
case HVCALL_GET_VP_REGISTERS:
case HVCALL_SET_VP_REGISTERS:
+ case HVCALL_MODIFY_VTL_PROTECTION_MASK:
return true;
}
@@ -2745,6 +2754,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
goto hypercall_userspace_exit;
case HVCALL_GET_VP_REGISTERS:
case HVCALL_SET_VP_REGISTERS:
+ case HVCALL_MODIFY_VTL_PROTECTION_MASK:
+ case HVCALL_ENABLE_PARTITION_VTL:
+ case HVCALL_ENABLE_VP_VTL:
+ case HVCALL_VTL_CALL:
+ case HVCALL_VTL_RETURN:
goto hypercall_userspace_exit;
default:
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
@@ -2912,6 +2926,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->ebx |= HV_SIGNAL_EVENTS;
ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
ent->ebx |= HV_ACCESS_VP_REGISTERS;
+ ent->ebx |= HV_ACCESS_VSM;
ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
ent->edx |= HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE;
@@ -89,6 +89,7 @@
#define HV_ACCESS_STATS BIT(8)
#define HV_DEBUGGING BIT(11)
#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ACCESS_VSM BIT(16)
#define HV_ACCESS_VP_REGISTERS BIT(17)
#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
#define HV_ISOLATION BIT(22)
@@ -147,9 +148,13 @@ union hv_reference_tsc_msr {
/* Declare the various hypercall operations. */
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_ENABLE_VP_VTL 0x000f
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_MODIFY_VTL_PROTECTION_MASK 0x000c
+#define HVCALL_ENABLE_PARTITION_VTL 0x000d
+#define HVCALL_ENABLE_VP_VTL 0x000f
+#define HVCALL_VTL_CALL 0x0011
+#define HVCALL_VTL_RETURN 0x0012
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
#define HVCALL_SEND_IPI_EX 0x0015