[4/6] perf/x86/amd/uncore: Add group exclusivity

Message ID 92465f174413553410aaa876223817b156808800.1689748843.git.sandipan.das@amd.com
State New
Headers
Series perf/x86/amd: Add memory controller events |

Commit Message

Sandipan Das July 19, 2023, 6:55 a.m. UTC
  In some cases, it may be necessary to restrict opening PMU events to a
subset of CPUs. Uncore PMUs which require this restriction can use the
new group attribute in struct amd_uncore to set a valid uncore ID during
initialization. In the starting phase of hotplug, the per-CPU context
will be marked as unused if the ID in group does not match the uncore ID
for the CPU that is being onlined.

E.g. the Zen 4 memory controller (UMC) PMUs are specific to each active
memory channel and the MSR address space for the PERF_CTL and PERF_CTR
registers is reused on each socket. Thus, PMU events corresponding to a
memory controller should only be opened on CPUs belonging to the socket
associated with that memory controller.

Signed-off-by: Sandipan Das <sandipan.das@amd.com>
---
 arch/x86/events/amd/uncore.c | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)
  

Patch

diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index f17df6574ba5..6653e8e164bd 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -50,6 +50,7 @@  struct amd_uncore {
 	int num_counters;
 	int rdpmc_base;
 	u32 msr_base;
+	int group;
 	cpumask_t active_mask;
 	struct pmu pmu;
 	struct amd_uncore_ctx * __percpu *ctx;
@@ -423,6 +424,17 @@  static int amd_uncore_cpu_starting(unsigned int cpu)
 		uncore = &uncores[i];
 		ctx = *per_cpu_ptr(uncore->ctx, cpu);
 		ctx->id = uncore->id(cpu);
+
+		/*
+		 * Reclaim the context if events can only be opened by CPUs
+		 * within the same group
+		 */
+		if (uncore->group >= 0 && ctx->id != uncore->group) {
+			hlist_add_head(&ctx->node, &uncore_unused_list);
+			*per_cpu_ptr(uncore->ctx, cpu) = NULL;
+			continue;
+		}
+
 		ctx = amd_uncore_find_online_sibling(ctx, uncore);
 		*per_cpu_ptr(uncore->ctx, cpu) = ctx;
 	}
@@ -453,7 +465,7 @@  static int amd_uncore_cpu_online(unsigned int cpu)
 	for (i = 0; i < num_uncores; i++) {
 		uncore = &uncores[i];
 		ctx = *per_cpu_ptr(uncore->ctx, cpu);
-		if (cpu == ctx->cpu)
+		if (ctx && cpu == ctx->cpu)
 			cpumask_set_cpu(cpu, &uncore->active_mask);
 	}
 
@@ -469,12 +481,14 @@  static int amd_uncore_cpu_down_prepare(unsigned int cpu)
 	for (i = 0; i < num_uncores; i++) {
 		uncore = &uncores[i];
 		this = *per_cpu_ptr(uncore->ctx, cpu);
+		if (!this)
+			continue;
 
 		/* this cpu is going down, migrate to a shared sibling if possible */
 		for_each_online_cpu(j) {
 			that = *per_cpu_ptr(uncore->ctx, j);
 
-			if (cpu == j)
+			if (!that || cpu == j)
 				continue;
 
 			if (this == that) {
@@ -499,6 +513,9 @@  static int amd_uncore_cpu_dead(unsigned int cpu)
 	for (i = 0; i < num_uncores; i++) {
 		uncore = &uncores[i];
 		ctx = *per_cpu_ptr(uncore->ctx, cpu);
+		if (!ctx)
+			continue;
+
 		if (cpu == ctx->cpu)
 			cpumask_clear_cpu(cpu, &uncore->active_mask);
 
@@ -584,6 +601,7 @@  static int amd_uncore_df_init(void)
 	uncore->msr_base = MSR_F15H_NB_PERF_CTL;
 	uncore->rdpmc_base = RDPMC_BASE_NB;
 	uncore->id = amd_uncore_df_id;
+	uncore->group = -1;
 
 	if (pmu_version >= 2) {
 		*df_attr++ = &format_attr_event14v2.attr;
@@ -693,6 +711,7 @@  static int amd_uncore_l3_init(void)
 	uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
 	uncore->rdpmc_base = RDPMC_BASE_LLC;
 	uncore->id = amd_uncore_l3_id;
+	uncore->group = -1;
 
 	if (boot_cpu_data.x86 >= 0x17) {
 		*l3_attr++ = &format_attr_event8.attr;