[v7,13/19] KVM: selftests: Add functional test for Intel's fixed PMU counters
Commit Message
From: Jinrong Liang <cloudliang@tencent.com>
Extend the fixed counters test to verify that supported counters can
actually be enabled in the control MSRs, that unsupported counters cannot,
and that enabled counters actually count.
Co-developed-by: Like Xu <likexu@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
[sean: fold into the rd/wr access test, massage changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
.../selftests/kvm/x86_64/pmu_counters_test.c | 31 ++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
Comments
On 11/8/2023 8:31 AM, Sean Christopherson wrote:
> From: Jinrong Liang <cloudliang@tencent.com>
>
> Extend the fixed counters test to verify that supported counters can
> actually be enabled in the control MSRs, that unsupported counters cannot,
> and that enabled counters actually count.
>
> Co-developed-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> [sean: fold into the rd/wr access test, massage changelog]
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> .../selftests/kvm/x86_64/pmu_counters_test.c | 31 ++++++++++++++++++-
> 1 file changed, 30 insertions(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> index 8c934e261f2d..b9c073d3ade9 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> @@ -324,7 +324,6 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
> vector = wrmsr_safe(msr, 0);
> GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
> }
> - GUEST_DONE();
> }
>
> static void guest_test_gp_counters(void)
> @@ -342,6 +341,7 @@ static void guest_test_gp_counters(void)
> base_msr = MSR_IA32_PERFCTR0;
>
> guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
> + GUEST_DONE();
> }
>
> static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
> @@ -365,6 +365,7 @@ static void guest_test_fixed_counters(void)
> {
> uint64_t supported_bitmask = 0;
> uint8_t nr_fixed_counters = 0;
> + uint8_t i;
>
> /* Fixed counters require Architectural vPMU Version 2+. */
> if (guest_get_pmu_version() >= 2)
> @@ -379,6 +380,34 @@ static void guest_test_fixed_counters(void)
>
> guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS,
> nr_fixed_counters, supported_bitmask);
> +
> + for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) {
> + uint8_t vector;
> + uint64_t val;
> +
> + if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) {
> + vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL,
> + FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
> + __GUEST_ASSERT(vector == GP_VECTOR,
> + "Expected #GP for counter %u in FIXED_CTR_CTRL", i);
> +
> + vector = wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL,
> + FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
> + __GUEST_ASSERT(vector == GP_VECTOR,
> + "Expected #GP for counter %u in PERF_GLOBAL_CTRL", i);
> + continue;
> + }
> +
> + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
> + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
> + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
This assembly code seems to be called many times, we may wrap it into a
macro or function.
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> + val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
> +
> + GUEST_ASSERT_NE(val, 0);
> + }
> + GUEST_DONE();
> }
>
> static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
@@ -324,7 +324,6 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
vector = wrmsr_safe(msr, 0);
GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
}
- GUEST_DONE();
}
static void guest_test_gp_counters(void)
@@ -342,6 +341,7 @@ static void guest_test_gp_counters(void)
base_msr = MSR_IA32_PERFCTR0;
guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
+ GUEST_DONE();
}
static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
@@ -365,6 +365,7 @@ static void guest_test_fixed_counters(void)
{
uint64_t supported_bitmask = 0;
uint8_t nr_fixed_counters = 0;
+ uint8_t i;
/* Fixed counters require Architectural vPMU Version 2+. */
if (guest_get_pmu_version() >= 2)
@@ -379,6 +380,34 @@ static void guest_test_fixed_counters(void)
guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS,
nr_fixed_counters, supported_bitmask);
+
+ for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) {
+ uint8_t vector;
+ uint64_t val;
+
+ if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) {
+ vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL,
+ FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
+ __GUEST_ASSERT(vector == GP_VECTOR,
+ "Expected #GP for counter %u in FIXED_CTR_CTRL", i);
+
+ vector = wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL,
+ FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
+ __GUEST_ASSERT(vector == GP_VECTOR,
+ "Expected #GP for counter %u in PERF_GLOBAL_CTRL", i);
+ continue;
+ }
+
+ wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+ wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
+
+ GUEST_ASSERT_NE(val, 0);
+ }
+ GUEST_DONE();
}
static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,