[3/7] KVM: selftests: Test consistency of CPUID with num of GP counters

Message ID 20230323072714.82289-4-likexu@tencent.com
State New
Headers
Series KVM: selftests: Test the consistency of the PMU's CPUID and its features |

Commit Message

Like Xu March 23, 2023, 7:27 a.m. UTC
  From: Like Xu <likexu@tencent.com>

Add test to check if non-existent counters can be accessed in guest after
determining the number of Intel generic performance counters by CPUID.
When the num of counters is less than 3, KVM does not emulate #GP if
a counter isn't present due to compatibility MSR_P6_PERFCTRx handling.
Nor will the KVM emulate more counters than it can support.

Co-developed-by: Jinrong Liang <cloudliang@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_cpuid_test.c     | 102 ++++++++++++++++++
 1 file changed, 102 insertions(+)
  

Comments

Sean Christopherson May 24, 2023, 10:44 p.m. UTC | #1
On Thu, Mar 23, 2023, Like Xu wrote:
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> index 75434aa2a0ec..50902187d2c9 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
> @@ -49,11 +49,31 @@ static const uint64_t arch_events[] = {
>  /* Association of Fixed Counters with Architectural Performance Events */
>  static int fixed_events[] = {1, 0, 7};
>  
> +static const uint64_t perf_caps[] = {
> +	0,
> +	PMU_CAP_FW_WRITES,
> +};
> +
> +/*
> + * KVM implements the first two non-existent counters (MSR_P6_PERFCTRx)
> + * via kvm_pr_unimpl_wrmsr() instead of #GP. It is acceptable here to test
> + * the third counter as there are usually more than 3 available gp counters.

Don't hedge, i.e. don't say things like "usually".  And why not test that KVM
drops writes to the first two counters?  Unlike KVM-Unit_tests, selftests can
test arbitrary KVM behavior without concern for breaking other use cases.

> +#define MSR_INTEL_ARCH_PMU_GPCTR (MSR_IA32_PERFCTR0 + 2)
> +
>  static uint64_t evt_code_for_fixed_ctr(uint8_t idx)
>  {
>  	return arch_events[fixed_events[idx]];
>  }
>  
> +static uint8_t kvm_gp_ctrs_num(void)
> +{
> +	const struct kvm_cpuid_entry2 *kvm_entry;
> +
> +	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
> +	return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;

This definitely can be defined as a KVM_X86_CPU_PROPERTY().  Ditto for most of
the helpers that are added in future patches.

>  static struct kvm_vcpu *new_vcpu(void *guest_code)
>  {
>  	struct kvm_vm *vm;
> @@ -98,6 +118,30 @@ static bool first_uc_arg_non_zero(struct ucall *uc, void *data)
>  	return uc->args[1];
>  }
>  
> +static bool first_uc_arg_equals(struct ucall *uc, void *data)
> +{
> +	return uc->args[1] == (uint64_t)data;
> +}
> +
> +static void guest_gp_handler(struct ex_regs *regs)
> +{
> +	GUEST_SYNC(GP_VECTOR);
> +	GUEST_DONE();
> +}
> +
> +static void guest_wr_and_rd_msrs(uint32_t base, uint64_t value,
> +				 uint8_t begin, uint8_t offset)
> +{
> +	unsigned int i;
> +
> +	for (i = begin; i < begin + offset; i++) {
> +		wrmsr(base + i, value);
> +		GUEST_SYNC(rdmsr(base + i));

Unless it won't work for something, use rdmsr_safe() and/oror wrmsr_safe() instead
of installing a dedicated handler.  And if I'm reading the code correctly, that will
fix a bug in the test where only the first MSR is tested in the #GP case since the
#GP handler goes straight to GUEST_DONE(), i.e. doesn't skip and continue the rest
of the guest code.  Maybe that isn't a bug in practice, e.g. each negative test only
tests a single MSR, but (a) that's not obvious and (b) it's an unnecessary limitation.


> +	}
> +
> +	GUEST_DONE();
> +}
  

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
index 75434aa2a0ec..50902187d2c9 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c
@@ -49,11 +49,31 @@  static const uint64_t arch_events[] = {
 /* Association of Fixed Counters with Architectural Performance Events */
 static int fixed_events[] = {1, 0, 7};
 
+static const uint64_t perf_caps[] = {
+	0,
+	PMU_CAP_FW_WRITES,
+};
+
+/*
+ * KVM implements the first two non-existent counters (MSR_P6_PERFCTRx)
+ * via kvm_pr_unimpl_wrmsr() instead of #GP. It is acceptable here to test
+ * the third counter as there are usually more than 3 available gp counters.
+ */
+#define MSR_INTEL_ARCH_PMU_GPCTR (MSR_IA32_PERFCTR0 + 2)
+
 static uint64_t evt_code_for_fixed_ctr(uint8_t idx)
 {
 	return arch_events[fixed_events[idx]];
 }
 
+static uint8_t kvm_gp_ctrs_num(void)
+{
+	const struct kvm_cpuid_entry2 *kvm_entry;
+
+	kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0);
+	return (kvm_entry->eax & GP_CTR_NUM_MASK) >> GP_CTR_NUM_OFS_BIT;
+}
+
 static struct kvm_vcpu *new_vcpu(void *guest_code)
 {
 	struct kvm_vm *vm;
@@ -98,6 +118,30 @@  static bool first_uc_arg_non_zero(struct ucall *uc, void *data)
 	return uc->args[1];
 }
 
+static bool first_uc_arg_equals(struct ucall *uc, void *data)
+{
+	return uc->args[1] == (uint64_t)data;
+}
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+	GUEST_SYNC(GP_VECTOR);
+	GUEST_DONE();
+}
+
+static void guest_wr_and_rd_msrs(uint32_t base, uint64_t value,
+				 uint8_t begin, uint8_t offset)
+{
+	unsigned int i;
+
+	for (i = begin; i < begin + offset; i++) {
+		wrmsr(base + i, value);
+		GUEST_SYNC(rdmsr(base + i));
+	}
+
+	GUEST_DONE();
+}
+
 static void intel_guest_run_arch_event(uint8_t version, uint8_t max_gp_num,
 				       uint8_t max_fixed_num, bool supported,
 				       uint32_t ctr_base_msr, uint64_t evt_code)
@@ -165,6 +209,27 @@  static void test_arch_events_setup(struct kvm_vcpu *vcpu, uint8_t evt_vector,
 		      ctr_msr, arch_events[idx]);
 }
 
+static void test_oob_gp_counter_setup(struct kvm_vcpu *vcpu, uint8_t eax_gp_num,
+				      uint64_t perf_cap)
+{
+	struct kvm_cpuid_entry2 *entry;
+	uint32_t ctr_msr = MSR_IA32_PERFCTR0;
+
+	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+	entry->eax = (entry->eax & ~GP_CTR_NUM_MASK) |
+		(eax_gp_num << GP_CTR_NUM_OFS_BIT);
+	vcpu_set_cpuid(vcpu);
+
+	if (perf_cap & PMU_CAP_FW_WRITES)
+		ctr_msr = MSR_IA32_PMC0;
+
+	vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
+	vcpu_args_set(vcpu, 4, ctr_msr, 0xffff,
+		      min(eax_gp_num, kvm_gp_ctrs_num()), 1);
+
+	vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
+}
+
 static void intel_check_arch_event_is_unavl(uint8_t idx)
 {
 	const char *msg = "Unavailable arch event is counting.";
@@ -190,6 +255,42 @@  static void intel_check_arch_event_is_unavl(uint8_t idx)
 	}
 }
 
+/* Access the first out-of-range counter register to trigger #GP */
+static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
+{
+	const char *msg = "At least one unsupported GP counter is visible.";
+	struct kvm_vcpu *vcpu;
+
+	vcpu = new_vcpu(guest_wr_and_rd_msrs);
+	test_oob_gp_counter_setup(vcpu, eax_gp_num, perf_cap);
+	run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)GP_VECTOR);
+	free_vcpu(vcpu);
+}
+
+static void intel_test_counters_num(void)
+{
+	uint8_t kvm_gp_num = kvm_gp_ctrs_num();
+	unsigned int i;
+
+	TEST_REQUIRE(kvm_gp_num > 2);
+
+	for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
+		/*
+		 * For compatibility reasons, KVM does not emulate #GP
+		 * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't
+		 * affect checking the presence of MSR_IA32_PMCx with #GP.
+		 */
+		if (perf_caps[i] & PMU_CAP_FW_WRITES)
+			test_oob_gp_counter(0, perf_caps[i]);
+
+		test_oob_gp_counter(2, perf_caps[i]);
+		test_oob_gp_counter(kvm_gp_num, perf_caps[i]);
+
+		/* KVM doesn't emulate more counters than it can support. */
+		test_oob_gp_counter(kvm_gp_num + 1, perf_caps[i]);
+	}
+}
+
 static void intel_test_arch_events(void)
 {
 	uint8_t idx;
@@ -213,6 +314,7 @@  static void intel_test_arch_events(void)
 static void intel_test_pmu_cpuid(void)
 {
 	intel_test_arch_events();
+	intel_test_counters_num();
 }
 
 int main(int argc, char *argv[])