[v2,5/7] x86/resctrl: Add package scoped resource

Message ID 20230621174006.42533-6-tony.luck@intel.com
State New
Headers
Series x86/resctrl: Add support for Sub-NUMA cluster (SNC) systems |

Commit Message

Luck, Tony June 21, 2023, 5:40 p.m. UTC
  Some Intel features require setting a package scoped model specific
register.

Add a new resource that builds domains for each package.

Signed-off-by: Tony Luck <tony.luck@intel.com>
---
 include/linux/resctrl.h                |  1 +
 arch/x86/kernel/cpu/resctrl/internal.h |  6 ++++++
 arch/x86/kernel/cpu/resctrl/core.c     | 23 +++++++++++++++++++----
 3 files changed, 26 insertions(+), 4 deletions(-)
  

Comments

Shaopeng Tan (Fujitsu) June 29, 2023, 7:38 a.m. UTC | #1
Hi Tony,

> Some Intel features require setting a package scoped model specific register.
> 
> Add a new resource that builds domains for each package.
> 
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---
>  include/linux/resctrl.h                |  1 +
>  arch/x86/kernel/cpu/resctrl/internal.h |  6 ++++++
>  arch/x86/kernel/cpu/resctrl/core.c     | 23
> +++++++++++++++++++----
>  3 files changed, 26 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index
> 25051daa6655..f504f6263fec 100644
> --- a/include/linux/resctrl.h
> +++ b/include/linux/resctrl.h
> @@ -167,6 +167,7 @@ struct rdt_resource {
>  	int			rid;
>  	bool			alloc_capable;
>  	bool			mon_capable;
> +	bool			pkg_actions;
>  	int			num_rmid;
>  	int			scope;
>  	struct resctrl_cache	cache;
> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h
> b/arch/x86/kernel/cpu/resctrl/internal.h
> index 38bac0062c82..e51a5004be77 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -438,6 +438,7 @@ enum resctrl_res_level {
>  	RDT_RESOURCE_MBA,
>  	RDT_RESOURCE_SMBA,
>  	RDT_RESOURCE_NODE,
> +	RDT_RESOURCE_PKG,
> 
>  	/* Must be the last */
>  	RDT_NUM_RESOURCES,
> @@ -447,6 +448,7 @@ enum resctrl_scope {
>  	SCOPE_L2_CACHE = 2,
>  	SCOPE_L3_CACHE = 3,
>  	SCOPE_NODE,
> +	SCOPE_PKG,
>  };
> 
>  static inline int get_mbm_res_level(void) @@ -482,6 +484,10 @@ int
> resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
>  	for_each_rdt_resource(r)					      \
>  		if (r->alloc_capable || r->mon_capable)
> 
> +#define for_each_domain_needed_rdt_resource(r)
> 	      \
> +	for_each_rdt_resource(r)					      \
> +		if (r->alloc_capable || r->mon_capable || r->pkg_actions)
> +
>  #define for_each_alloc_capable_rdt_resource(r)
> \
>  	for_each_rdt_resource(r)					      \
>  		if (r->alloc_capable)
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c
> b/arch/x86/kernel/cpu/resctrl/core.c
> index 6fe9f87d4403..af3be3c2db96 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -127,6 +127,16 @@ struct rdt_hw_resource rdt_resources_all[] = {
>  			.fflags			= 0,
>  		},
>  	},
> +	[RDT_RESOURCE_PKG] =
> +	{
> +		.r_resctrl = {
> +			.rid			= RDT_RESOURCE_PKG,
> +			.name			= "PKG",
> +			.scope			= SCOPE_PKG,
> +			.domains		=
> domain_init(RDT_RESOURCE_PKG),
> +			.fflags			= 0,
> +		},
> +	},
>  };
> 
>  /*
> @@ -504,9 +514,14 @@ static int arch_domain_mbm_alloc(u32 num_rmid,
> struct rdt_hw_domain *hw_dom)
> 
>  static int get_domain_id(int cpu, enum resctrl_scope scope)  {
> -	if (scope == SCOPE_NODE)
> +	switch (scope) {
> +	case SCOPE_NODE:
>  		return cpu_to_node(cpu);
> -	return get_cpu_cacheinfo_id(cpu, scope);
> +	case SCOPE_PKG:
> +		return topology_physical_package_id(cpu);
> +	default:
> +		return get_cpu_cacheinfo_id(cpu, scope);
> +	}
>  }
> 
>  /*
> @@ -630,7 +645,7 @@ static int resctrl_online_cpu(unsigned int cpu)
>  	struct rdt_resource *r;
> 
>  	mutex_lock(&rdtgroup_mutex);
> -	for_each_capable_rdt_resource(r)
> +	for_each_domain_needed_rdt_resource(r)
>  		domain_add_cpu(cpu, r);
>  	/* The cpu is set in default rdtgroup after online. */
>  	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); @@ -657,7
> +672,7 @@ static int resctrl_offline_cpu(unsigned int cpu)
>  	struct rdt_resource *r;
> 
>  	mutex_lock(&rdtgroup_mutex);
> -	for_each_capable_rdt_resource(r)
> +	for_each_domain_needed_rdt_resource(r)

Function for_each_capable_rdt_resource(r) is no longer used in anywhere,
I think it is better to remove "#define for_each_capable_rdt_resource(r)" together.

Best regards,
Shaopeng TAN
  

Patch

diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 25051daa6655..f504f6263fec 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -167,6 +167,7 @@  struct rdt_resource {
 	int			rid;
 	bool			alloc_capable;
 	bool			mon_capable;
+	bool			pkg_actions;
 	int			num_rmid;
 	int			scope;
 	struct resctrl_cache	cache;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 38bac0062c82..e51a5004be77 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -438,6 +438,7 @@  enum resctrl_res_level {
 	RDT_RESOURCE_MBA,
 	RDT_RESOURCE_SMBA,
 	RDT_RESOURCE_NODE,
+	RDT_RESOURCE_PKG,
 
 	/* Must be the last */
 	RDT_NUM_RESOURCES,
@@ -447,6 +448,7 @@  enum resctrl_scope {
 	SCOPE_L2_CACHE = 2,
 	SCOPE_L3_CACHE = 3,
 	SCOPE_NODE,
+	SCOPE_PKG,
 };
 
 static inline int get_mbm_res_level(void)
@@ -482,6 +484,10 @@  int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
 	for_each_rdt_resource(r)					      \
 		if (r->alloc_capable || r->mon_capable)
 
+#define for_each_domain_needed_rdt_resource(r)				      \
+	for_each_rdt_resource(r)					      \
+		if (r->alloc_capable || r->mon_capable || r->pkg_actions)
+
 #define for_each_alloc_capable_rdt_resource(r)				      \
 	for_each_rdt_resource(r)					      \
 		if (r->alloc_capable)
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 6fe9f87d4403..af3be3c2db96 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -127,6 +127,16 @@  struct rdt_hw_resource rdt_resources_all[] = {
 			.fflags			= 0,
 		},
 	},
+	[RDT_RESOURCE_PKG] =
+	{
+		.r_resctrl = {
+			.rid			= RDT_RESOURCE_PKG,
+			.name			= "PKG",
+			.scope			= SCOPE_PKG,
+			.domains		= domain_init(RDT_RESOURCE_PKG),
+			.fflags			= 0,
+		},
+	},
 };
 
 /*
@@ -504,9 +514,14 @@  static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
 
 static int get_domain_id(int cpu, enum resctrl_scope scope)
 {
-	if (scope == SCOPE_NODE)
+	switch (scope) {
+	case SCOPE_NODE:
 		return cpu_to_node(cpu);
-	return get_cpu_cacheinfo_id(cpu, scope);
+	case SCOPE_PKG:
+		return topology_physical_package_id(cpu);
+	default:
+		return get_cpu_cacheinfo_id(cpu, scope);
+	}
 }
 
 /*
@@ -630,7 +645,7 @@  static int resctrl_online_cpu(unsigned int cpu)
 	struct rdt_resource *r;
 
 	mutex_lock(&rdtgroup_mutex);
-	for_each_capable_rdt_resource(r)
+	for_each_domain_needed_rdt_resource(r)
 		domain_add_cpu(cpu, r);
 	/* The cpu is set in default rdtgroup after online. */
 	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
@@ -657,7 +672,7 @@  static int resctrl_offline_cpu(unsigned int cpu)
 	struct rdt_resource *r;
 
 	mutex_lock(&rdtgroup_mutex);
-	for_each_capable_rdt_resource(r)
+	for_each_domain_needed_rdt_resource(r)
 		domain_remove_cpu(cpu, r);
 	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
 		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {