[4/7] KVM: selftests: Test consistency of CPUID with num of Fixed counters

Message ID 20230323072714.82289-5-likexu@tencent.com
State New
Headers
Series KVM: selftests: Test the consistency of the PMU's CPUID and its features |

Commit Message

Like Xu March 23, 2023, 7:27 a.m. UTC
  From: Jinrong Liang <cloudliang@tencent.com>

Add test to check if non-existent counters can be accessed in guest after
determining the number of Intel generic performance counters by CPUID.
Per SDM, fixed-function performance counter 'i' is supported if ECX[i] ||
(EDX[4:0] > i). KVM doesn't emulate more counters than it can support.

Co-developed-by: Like Xu <likexu@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_cpuid_test.c     | 68 +++++++++++++++++++
 1 file changed, 68 insertions(+)
  

Comments

Sean Christopherson May 24, 2023, 10:47 p.m. UTC | #1
On Thu, Mar 23, 2023, Like Xu wrote:
> From: Jinrong Liang <cloudliang@tencent.com>
> 
> Add test to check if non-existent counters can be accessed in guest after
> determining the number of Intel generic performance counters by CPUID.
> Per SDM, fixed-function performance counter 'i' is supported if ECX[i] ||
> (EDX[4:0] > i). KVM doesn't emulate more counters than it can support.
> 
> Co-developed-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> ---
>  .../selftests/kvm/x86_64/pmu_cpuid_test.c     | 68 +++++++++++++++++++
>  1 file changed, 68 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> index 50902187d2c9..c934144be287 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> @@ -74,6 +74,22 @@ static uint8_t kvm_gp_ctrs_num(void)
>  	return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;
>  }
>  
> +static uint8_t kvm_fixed_ctrs_num(void)
> +{
> +	const struct kvm_cpuid_entry2 *kvm_entry;
> +
> +	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
> +	return kvm_entry->edx & FIXED_CTR_NUM_MASK;
> +}
> +
> +static uint32_t kvm_fixed_ctrs_bitmask(void)
> +{
> +	const struct kvm_cpuid_entry2 *kvm_entry;
> +
> +	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
> +	return kvm_entry->ecx;
> +}

KVM_X86_CPU_PROPERTY

>  static struct kvm_vcpu *new_vcpu(void *guest_code)
>  {
>  	struct kvm_vm *vm;
> @@ -230,6 +246,39 @@ static void test_oob_gp_counter_setup(struct kvm_vcpu *vcpu, uint8_t eax_gp_num,
>  	vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
>  }
>  
> +static uint64_t test_oob_fixed_counter_setup(struct kvm_vcpu *vcpu,
> +					     uint8_t edx_fix_num,
> +					     uint32_t fixed_bitmask)
> +{
> +	struct kvm_cpuid_entry2 *entry;
> +	uint32_t ctr_msr = MSR_CORE_PERF_FIXED_CTR0;
> +	uint8_t idx = edx_fix_num;
> +	bool is_supported = true;

No need to initialize "true", it's explicitly set below.

> +	uint64_t ret = 0xffffULL;
> +
> +	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
> +	entry->ecx = fixed_bitmask;
> +	entry->edx = (entry->edx & ~FIXED_CTR_NUM_MASK) | edx_fix_num;
> +	vcpu_set_cpuid(vcpu);
> +
> +	/* Per Intel SDM, FixCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i). */
> +	is_supported = (entry->ecx & BIT_ULL(idx) ||
> +			((entry->edx & FIXED_CTR_NUM_MASK) > idx));
> +
> +	/* KVM doesn't emulate more fixed counters than it can support. */
> +	if (idx >= kvm_fixed_ctrs_num())
> +		is_supported = false;

Why not this?

	is_supported = idx < kvm_fixed_ctrs_num() &&
		       <CPUID entry stuff>;
> +
> +	if (!is_supported) {
> +		vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
> +		ret = GP_VECTOR;
> +	}

Same comments as the previous patch(es).
  
Jim Mattson May 24, 2023, 11:08 p.m. UTC | #2
On Thu, Mar 23, 2023 at 12:28 AM Like Xu <like.xu.linux@gmail.com> wrote:
>
> From: Jinrong Liang <cloudliang@tencent.com>
>
> Add test to check if non-existent counters can be accessed in guest after
> determining the number of Intel generic performance counters by CPUID.
> Per SDM, fixed-function performance counter 'i' is supported if ECX[i] ||
> (EDX[4:0] > i). KVM doesn't emulate more counters than it can support.
>
> Co-developed-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> ---
>  .../selftests/kvm/x86_64/pmu_cpuid_test.c     | 68 +++++++++++++++++++
>  1 file changed, 68 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> index 50902187d2c9..c934144be287 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> @@ -74,6 +74,22 @@ static uint8_t kvm_gp_ctrs_num(void)
>         return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;
>  }
>
> +static uint8_t kvm_fixed_ctrs_num(void)
> +{
> +       const struct kvm_cpuid_entry2 *kvm_entry;
> +
> +       kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
> +       return kvm_entry->edx & FIXED_CTR_NUM_MASK;
> +}
> +
> +static uint32_t kvm_fixed_ctrs_bitmask(void)
> +{
> +       const struct kvm_cpuid_entry2 *kvm_entry;
> +
> +       kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
> +       return kvm_entry->ecx;
> +}
> +
>  static struct kvm_vcpu *new_vcpu(void *guest_code)
>  {
>         struct kvm_vm *vm;
> @@ -230,6 +246,39 @@ static void test_oob_gp_counter_setup(struct kvm_vcpu *vcpu, uint8_t eax_gp_num,
>         vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
>  }
>
> +static uint64_t test_oob_fixed_counter_setup(struct kvm_vcpu *vcpu,
> +                                            uint8_t edx_fix_num,
> +                                            uint32_t fixed_bitmask)
> +{
> +       struct kvm_cpuid_entry2 *entry;
> +       uint32_t ctr_msr = MSR_CORE_PERF_FIXED_CTR0;
> +       uint8_t idx = edx_fix_num;
> +       bool is_supported = true;
> +       uint64_t ret = 0xffffULL;
> +
> +       entry = vcpu_get_cpuid_entry(vcpu, 0xa);
> +       entry->ecx = fixed_bitmask;
> +       entry->edx = (entry->edx & ~FIXED_CTR_NUM_MASK) | edx_fix_num;
> +       vcpu_set_cpuid(vcpu);
> +
> +       /* Per Intel SDM, FixCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i). */
> +       is_supported = (entry->ecx & BIT_ULL(idx) ||
> +                       ((entry->edx & FIXED_CTR_NUM_MASK) > idx));
> +
> +       /* KVM doesn't emulate more fixed counters than it can support. */
> +       if (idx >= kvm_fixed_ctrs_num())
> +               is_supported = false;
> +
> +       if (!is_supported) {
> +               vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
> +               ret = GP_VECTOR;
> +       }
> +
> +       vcpu_args_set(vcpu, 4, ctr_msr, ret, idx, 1);
> +
> +       return ret;
> +}
> +
>  static void intel_check_arch_event_is_unavl(uint8_t idx)
>  {
>         const char *msg = "Unavailable arch event is counting.";

This test seems bogus to me. The event may not be available because it
is inaccurate. That doesn't imply that the count will always be zero.

> @@ -267,10 +316,23 @@ static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
>         free_vcpu(vcpu);
>  }
>
> +static void intel_test_oob_fixed_ctr(uint8_t edx_fix_num, uint32_t fixed_bitmask)
> +{
> +       const char *msg = "At least one unsupported Fixed counter is visible.";

This test seems bogus to me. Unsupported does not imply invisible.

> +       struct kvm_vcpu *vcpu;
> +       uint64_t ret;
> +
> +       vcpu = new_vcpu(guest_wr_and_rd_msrs);
> +       ret = test_oob_fixed_counter_setup(vcpu, edx_fix_num, fixed_bitmask);
> +       run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)ret);
> +       free_vcpu(vcpu);
> +}
> +
>  static void intel_test_counters_num(void)
>  {
>         uint8_t kvm_gp_num = kvm_gp_ctrs_num();
>         unsigned int i;
> +       uint32_t ecx;
>
>         TEST_REQUIRE(kvm_gp_num > 2);
>
> @@ -289,6 +351,12 @@ static void intel_test_counters_num(void)
>                 /* KVM doesn't emulate more counters than it can support. */
>                 test_oob_gp_counter(kvm_gp_num + 1, perf_caps[i]);
>         }
> +
> +       for (ecx = 0; ecx <= kvm_fixed_ctrs_bitmask() + 1; ecx++) {
> +               intel_test_oob_fixed_ctr(0, ecx);
> +               intel_test_oob_fixed_ctr(kvm_fixed_ctrs_num(), ecx);
> +               intel_test_oob_fixed_ctr(kvm_fixed_ctrs_num() + 1, ecx);
> +       }
>  }
>
>  static void intel_test_arch_events(void)
> --
> 2.40.0
>
  

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
index 50902187d2c9..c934144be287 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
@@ -74,6 +74,22 @@  static uint8_t kvm_gp_ctrs_num(void)
 	return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;
 }
 
+static uint8_t kvm_fixed_ctrs_num(void)
+{
+	const struct kvm_cpuid_entry2 *kvm_entry;
+
+	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+	return kvm_entry->edx & FIXED_CTR_NUM_MASK;
+}
+
+static uint32_t kvm_fixed_ctrs_bitmask(void)
+{
+	const struct kvm_cpuid_entry2 *kvm_entry;
+
+	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+	return kvm_entry->ecx;
+}
+
 static struct kvm_vcpu *new_vcpu(void *guest_code)
 {
 	struct kvm_vm *vm;
@@ -230,6 +246,39 @@  static void test_oob_gp_counter_setup(struct kvm_vcpu *vcpu, uint8_t eax_gp_num,
 	vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
 }
 
+static uint64_t test_oob_fixed_counter_setup(struct kvm_vcpu *vcpu,
+					     uint8_t edx_fix_num,
+					     uint32_t fixed_bitmask)
+{
+	struct kvm_cpuid_entry2 *entry;
+	uint32_t ctr_msr = MSR_CORE_PERF_FIXED_CTR0;
+	uint8_t idx = edx_fix_num;
+	bool is_supported = true;
+	uint64_t ret = 0xffffULL;
+
+	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+	entry->ecx = fixed_bitmask;
+	entry->edx = (entry->edx & ~FIXED_CTR_NUM_MASK) | edx_fix_num;
+	vcpu_set_cpuid(vcpu);
+
+	/* Per Intel SDM, FixCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i). */
+	is_supported = (entry->ecx & BIT_ULL(idx) ||
+			((entry->edx & FIXED_CTR_NUM_MASK) > idx));
+
+	/* KVM doesn't emulate more fixed counters than it can support. */
+	if (idx >= kvm_fixed_ctrs_num())
+		is_supported = false;
+
+	if (!is_supported) {
+		vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
+		ret = GP_VECTOR;
+	}
+
+	vcpu_args_set(vcpu, 4, ctr_msr, ret, idx, 1);
+
+	return ret;
+}
+
 static void intel_check_arch_event_is_unavl(uint8_t idx)
 {
 	const char *msg = "Unavailable arch event is counting.";
@@ -267,10 +316,23 @@  static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
 	free_vcpu(vcpu);
 }
 
+static void intel_test_oob_fixed_ctr(uint8_t edx_fix_num, uint32_t fixed_bitmask)
+{
+	const char *msg = "At least one unsupported Fixed counter is visible.";
+	struct kvm_vcpu *vcpu;
+	uint64_t ret;
+
+	vcpu = new_vcpu(guest_wr_and_rd_msrs);
+	ret = test_oob_fixed_counter_setup(vcpu, edx_fix_num, fixed_bitmask);
+	run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)ret);
+	free_vcpu(vcpu);
+}
+
 static void intel_test_counters_num(void)
 {
 	uint8_t kvm_gp_num = kvm_gp_ctrs_num();
 	unsigned int i;
+	uint32_t ecx;
 
 	TEST_REQUIRE(kvm_gp_num > 2);
 
@@ -289,6 +351,12 @@  static void intel_test_counters_num(void)
 		/* KVM doesn't emulate more counters than it can support. */
 		test_oob_gp_counter(kvm_gp_num + 1, perf_caps[i]);
 	}
+
+	for (ecx = 0; ecx <= kvm_fixed_ctrs_bitmask() + 1; ecx++) {
+		intel_test_oob_fixed_ctr(0, ecx);
+		intel_test_oob_fixed_ctr(kvm_fixed_ctrs_num(), ecx);
+		intel_test_oob_fixed_ctr(kvm_fixed_ctrs_num() + 1, ecx);
+	}
 }
 
 static void intel_test_arch_events(void)