[v12,7/8] x86/resctrl: Sub NUMA Cluster detection and enable

Message ID 20231130003418.89964-8-tony.luck@intel.com
State New
Headers
Series Add support for Sub-NUMA cluster (SNC) systems |

Commit Message

Luck, Tony Nov. 30, 2023, 12:34 a.m. UTC
  There isn't a simple hardware bit that indicates whether a CPU is
running in Sub NUMA Cluster (SNC) mode. Infer the state by comparing
the ratio of NUMA nodes to L3 cache instances.

When SNC mode is detected, reconfigure the RMID counters by updating
the MSR_RMID_SNC_CONFIG MSR on each socket as CPUs are seen.

Clearing bit zero of the MSR divides the RMIDs and renumbers the ones
on the second SNC node to start from zero.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Peter Newman <peternewman@google.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
Tested-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
---
 arch/x86/include/asm/msr-index.h   |  1 +
 arch/x86/kernel/cpu/resctrl/core.c | 96 ++++++++++++++++++++++++++++++
 2 files changed, 97 insertions(+)
  

Comments

Fam Zheng Nov. 30, 2023, 6:02 p.m. UTC | #1
Hi Tony,

On Wed, Nov 29, 2023 at 04:34:17PM -0800, Tony Luck wrote:
> There isn't a simple hardware bit that indicates whether a CPU is
> running in Sub NUMA Cluster (SNC) mode. Infer the state by comparing
> the ratio of NUMA nodes to L3 cache instances.
> 
> When SNC mode is detected, reconfigure the RMID counters by updating
> the MSR_RMID_SNC_CONFIG MSR on each socket as CPUs are seen.
> 
> Clearing bit zero of the MSR divides the RMIDs and renumbers the ones
> on the second SNC node to start from zero.
> 
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> Reviewed-by: Peter Newman <peternewman@google.com>
> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
> Reviewed-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
> Tested-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
> ---
>  arch/x86/include/asm/msr-index.h   |  1 +
>  arch/x86/kernel/cpu/resctrl/core.c | 96 ++++++++++++++++++++++++++++++
>  2 files changed, 97 insertions(+)
> 
> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
> index 1d51e1850ed0..94d29d81e6db 100644
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -1111,6 +1111,7 @@
>  #define MSR_IA32_QM_CTR			0xc8e
>  #define MSR_IA32_PQR_ASSOC		0xc8f
>  #define MSR_IA32_L3_CBM_BASE		0xc90
> +#define MSR_RMID_SNC_CONFIG		0xca0
>  #define MSR_IA32_L2_CBM_BASE		0xd10
>  #define MSR_IA32_MBA_THRTL_BASE		0xd50
>  
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index cf5aba8a74bf..3293ab4c58b0 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -16,11 +16,14 @@
>  
>  #define pr_fmt(fmt)	"resctrl: " fmt
>  
> +#include <linux/cpu.h>
>  #include <linux/slab.h>
>  #include <linux/err.h>
>  #include <linux/cacheinfo.h>
>  #include <linux/cpuhotplug.h>
> +#include <linux/mod_devicetable.h>
>  
> +#include <asm/cpu_device_id.h>
>  #include <asm/intel-family.h>
>  #include <asm/resctrl.h>
>  #include "internal.h"
> @@ -740,11 +743,42 @@ static void clear_closid_rmid(int cpu)
>  	wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
>  }
>  
> +/*
> + * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
> + * which indicates that RMIDs are configured in legacy mode.
> + * This mode is incompatible with Linux resctrl semantics
> + * as RMIDs are partitioned between SNC nodes, which requires
> + * a user to know which RMID is allocated to a task.
> + * Clearing bit 0 reconfigures the RMID counters for use
> + * in Sub NUMA Cluster mode. This mode is better for Linux.
> + * The RMID space is divided between all SNC nodes with the
> + * RMIDs renumbered to start from zero in each node when
> + * couning operations from tasks. Code to read the counters
> + * must adjust RMID counter numbers based on SNC node. See
> + * __rmid_read() for code that does this.
> + */
> +static void snc_remap_rmids(int cpu)
> +{
> +	u64 val;
> +
> +	/* Only need to enable once per package. */
> +	if (cpumask_first(topology_core_cpumask(cpu)) != cpu)
> +		return;
> +
> +	rdmsrl(MSR_RMID_SNC_CONFIG, val);
> +	val &= ~BIT_ULL(0);
> +	wrmsrl(MSR_RMID_SNC_CONFIG, val);
> +}
> +
>  static int resctrl_online_cpu(unsigned int cpu)
>  {
>  	struct rdt_resource *r;
>  
>  	mutex_lock(&rdtgroup_mutex);
> +
> +	if (snc_nodes_per_l3_cache > 1)
> +		snc_remap_rmids(cpu);
> +
>  	for_each_capable_rdt_resource(r)
>  		domain_add_cpu(cpu, r);
>  	/* The cpu is set in default rdtgroup after online. */
> @@ -999,11 +1033,73 @@ static __init bool get_rdt_resources(void)
>  	return (rdt_mon_capable || rdt_alloc_capable);
>  }
>  
> +/* CPU models that support MSR_RMID_SNC_CONFIG */
> +static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
> +	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
> +	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 0),
> +	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, 0),
> +	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, 0),
> +	{}
> +};
> +
> +/*
> + * There isn't a simple hardware bit that indicates whether a CPU is running
> + * in Sub NUMA Cluster (SNC) mode. Infer the state by comparing the
> + * ratio of NUMA nodes to L3 cache instances.
> + * It is not possible to accurately determine SNC state if the system is
> + * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
> + * to L3 caches. It will be OK if system is booted with hyperthreading
> + * disabled (since this doesn't affect the ratio).
> + */
> +static __init int snc_get_config(void)
> +{
> +	unsigned long *node_caches;
> +	int mem_only_nodes = 0;
> +	int cpu, node, ret;
> +	int num_l3_caches;
> +
> +	if (!x86_match_cpu(snc_cpu_ids))
> +		return 1;
> +
> +	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
> +	if (!node_caches)
> +		return 1;
> +
> +	cpus_read_lock();
> +
> +	if (num_online_cpus() != num_present_cpus())
> +		pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
> +
> +	for_each_node(node) {
> +		cpu = cpumask_first(cpumask_of_node(node));
> +		if (cpu < nr_cpu_ids)
> +			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);

Are we sure get_cpu_cacheinfo_id() is an valid index here? Looking at
the function it could be -1 or larger than nr_node_ids.

Fam

> +		else
> +			mem_only_nodes++;
> +	}
> +	cpus_read_unlock();
> +
> +	num_l3_caches = bitmap_weight(node_caches, nr_node_ids);
> +	kfree(node_caches);
> +
> +	if (!num_l3_caches)
> +		return 1;
> +
> +	ret = (nr_node_ids - mem_only_nodes) / num_l3_caches;
> +
> +	if (ret > 1)
> +		rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_NODE;
> +
> +	return ret;
> +}
> +
>  static __init void rdt_init_res_defs_intel(void)
>  {
>  	struct rdt_hw_resource *hw_res;
>  	struct rdt_resource *r;
>  
> +	snc_nodes_per_l3_cache = snc_get_config();
> +
>  	for_each_rdt_resource(r) {
>  		hw_res = resctrl_to_arch_res(r);
>  
> -- 
> 2.41.
  
Luck, Tony Nov. 30, 2023, 8:57 p.m. UTC | #2
On Thu, Nov 30, 2023 at 06:02:42PM +0000, Fam Zheng wrote:
> > +static __init int snc_get_config(void)
> > +{
> > +	unsigned long *node_caches;
> > +	int mem_only_nodes = 0;
> > +	int cpu, node, ret;
> > +	int num_l3_caches;
> > +
> > +	if (!x86_match_cpu(snc_cpu_ids))
> > +		return 1;
> > +
> > +	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
> > +	if (!node_caches)
> > +		return 1;
> > +
> > +	cpus_read_lock();
> > +
> > +	if (num_online_cpus() != num_present_cpus())
> > +		pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
> > +
> > +	for_each_node(node) {
> > +		cpu = cpumask_first(cpumask_of_node(node));
> > +		if (cpu < nr_cpu_ids)
> > +			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
> 
> Are we sure get_cpu_cacheinfo_id() is an valid index here? Looking at
> the function it could be -1 or larger than nr_node_ids.

Fam,

Return -1 is possible (in the case where first CPU on a node doesn't
have an L3 cache). Larger than nr_node_ids seems a bit more speculative.
It would mean a system with multiple L3 cache instances per node. I
suppose that's theoretically possible. In the limit case every CPU may
have its own personal L3 cache, but still have multiple CPUs grouped
together on a node.

Patch below (to be folded into part7 of next version). Increases the
size of the bitmap. Checks for get_cpu_cacheinfo_id() returning -1.
Patch just ignores the node in this case. I'm never quite sure how much
code to add for "Can't happen" scenarios.

-Tony


diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 3293ab4c58b0..85f8a1b3feaf 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -1056,12 +1056,13 @@ static __init int snc_get_config(void)
 	unsigned long *node_caches;
 	int mem_only_nodes = 0;
 	int cpu, node, ret;
+	int cache_id;
 	int num_l3_caches;
 
 	if (!x86_match_cpu(snc_cpu_ids))
 		return 1;
 
-	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
+	node_caches = bitmap_zalloc(num_online_cpus(), GFP_KERNEL);
 	if (!node_caches)
 		return 1;
 
@@ -1072,10 +1073,13 @@ static __init int snc_get_config(void)
 
 	for_each_node(node) {
 		cpu = cpumask_first(cpumask_of_node(node));
-		if (cpu < nr_cpu_ids)
-			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
-		else
+		if (cpu < nr_cpu_ids) {
+			cache_id = get_cpu_cacheinfo_id(cpu, 3);
+			if (cache_id != -1)
+				set_bit(cache_id, node_caches);
+		} else {
 			mem_only_nodes++;
+		}
 	}
 	cpus_read_unlock();
  
Reinette Chatre Nov. 30, 2023, 9:47 p.m. UTC | #3
Hi Tony,

On 11/30/2023 12:57 PM, Tony Luck wrote:
> On Thu, Nov 30, 2023 at 06:02:42PM +0000, Fam Zheng wrote:
>>> +static __init int snc_get_config(void)
>>> +{
>>> +	unsigned long *node_caches;
>>> +	int mem_only_nodes = 0;
>>> +	int cpu, node, ret;
>>> +	int num_l3_caches;
>>> +
>>> +	if (!x86_match_cpu(snc_cpu_ids))
>>> +		return 1;
>>> +
>>> +	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
>>> +	if (!node_caches)
>>> +		return 1;
>>> +
>>> +	cpus_read_lock();
>>> +
>>> +	if (num_online_cpus() != num_present_cpus())
>>> +		pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
>>> +
>>> +	for_each_node(node) {
>>> +		cpu = cpumask_first(cpumask_of_node(node));
>>> +		if (cpu < nr_cpu_ids)
>>> +			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
>>
>> Are we sure get_cpu_cacheinfo_id() is an valid index here? Looking at
>> the function it could be -1 or larger than nr_node_ids.
> 
> Fam,
> 
> Return -1 is possible (in the case where first CPU on a node doesn't
> have an L3 cache). Larger than nr_node_ids seems a bit more speculative.
> It would mean a system with multiple L3 cache instances per node. I
> suppose that's theoretically possible. In the limit case every CPU may
> have its own personal L3 cache, but still have multiple CPUs grouped
> together on a node.
> 
> Patch below (to be folded into part7 of next version). Increases the
> size of the bitmap. Checks for get_cpu_cacheinfo_id() returning -1.
> Patch just ignores the node in this case. I'm never quite sure how much
> code to add for "Can't happen" scenarios.
> 

Thank you.

> -Tony
> 
> 
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index 3293ab4c58b0..85f8a1b3feaf 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -1056,12 +1056,13 @@ static __init int snc_get_config(void)
>  	unsigned long *node_caches;
>  	int mem_only_nodes = 0;
>  	int cpu, node, ret;
> +	int cache_id;
>  	int num_l3_caches;

Please do maintain reverse fir order.

>  
>  	if (!x86_match_cpu(snc_cpu_ids))
>  		return 1;

I understand and welcome this change as motivated by robustness. Apart
from that, with this being a model specific feature for this particular
group of systems, it it not clear to me in which scenarios this could
run on a system where a present CPU does not have access to L3 cache.

>  
> -	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
> +	node_caches = bitmap_zalloc(num_online_cpus(), GFP_KERNEL);

Please do take care to take new bitmap size into account in all
places. From what I can tell there is a later bitmap_weight() call that
still uses nr_node_ids as size.

>  	if (!node_caches)
>  		return 1;
>  
> @@ -1072,10 +1073,13 @@ static __init int snc_get_config(void)
>  
>  	for_each_node(node) {
>  		cpu = cpumask_first(cpumask_of_node(node));
> -		if (cpu < nr_cpu_ids)
> -			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
> -		else
> +		if (cpu < nr_cpu_ids) {
> +			cache_id = get_cpu_cacheinfo_id(cpu, 3);
> +			if (cache_id != -1)
> +				set_bit(cache_id, node_caches);
> +		} else {
>  			mem_only_nodes++;
> +		}
>  	}
>  	cpus_read_unlock();
>  

Could this code be made even more robust by checking the computed
snc_nodes_per_l3_cache against the limited actually possible values?
Forcing it to 1 if something went wrong?

Reinette
  
Luck, Tony Nov. 30, 2023, 10:43 p.m. UTC | #4
On Thu, Nov 30, 2023 at 01:47:10PM -0800, Reinette Chatre wrote:
> Hi Tony,
> > diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> > index 3293ab4c58b0..85f8a1b3feaf 100644
> > --- a/arch/x86/kernel/cpu/resctrl/core.c
> > +++ b/arch/x86/kernel/cpu/resctrl/core.c
> > @@ -1056,12 +1056,13 @@ static __init int snc_get_config(void)
> >  	unsigned long *node_caches;
> >  	int mem_only_nodes = 0;
> >  	int cpu, node, ret;
> > +	int cache_id;
> >  	int num_l3_caches;
> 
> Please do maintain reverse fir order.

Fixed.

> 
> >  
> >  	if (!x86_match_cpu(snc_cpu_ids))
> >  		return 1;
> 
> I understand and welcome this change as motivated by robustness. Apart
> from that, with this being a model specific feature for this particular
> group of systems, it it not clear to me in which scenarios this could
> run on a system where a present CPU does not have access to L3 cache.

Agreed that on these systems there should always be an L3 cache. Should
I drop the check for "-1"?

> >  
> > -	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
> > +	node_caches = bitmap_zalloc(num_online_cpus(), GFP_KERNEL);
> 
> Please do take care to take new bitmap size into account in all
> places. From what I can tell there is a later bitmap_weight() call that
> still uses nr_node_ids as size.

Oops. I was also using num_online_cpus() before cpus_read_lock(), so
things could theoretically change before the bitmap_weight() call.
I switched to using num_present_cpus() in both places.

> >  	if (!node_caches)
> >  		return 1;
> >  
> > @@ -1072,10 +1073,13 @@ static __init int snc_get_config(void)
> >  
> >  	for_each_node(node) {
> >  		cpu = cpumask_first(cpumask_of_node(node));
> > -		if (cpu < nr_cpu_ids)
> > -			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
> > -		else
> > +		if (cpu < nr_cpu_ids) {
> > +			cache_id = get_cpu_cacheinfo_id(cpu, 3);
> > +			if (cache_id != -1)
> > +				set_bit(cache_id, node_caches);
> > +		} else {
> >  			mem_only_nodes++;
> > +		}
> >  	}
> >  	cpus_read_unlock();
> >  
> 
> Could this code be made even more robust by checking the computed
> snc_nodes_per_l3_cache against the limited actually possible values?
> Forcing it to 1 if something went wrong?

Added a couple of extra sanity checks. See updated incremental patch
below.

-Tony


diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 3293ab4c58b0..3684c6bf8224 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -1057,11 +1057,12 @@ static __init int snc_get_config(void)
 	int mem_only_nodes = 0;
 	int cpu, node, ret;
 	int num_l3_caches;
+	int cache_id;
 
 	if (!x86_match_cpu(snc_cpu_ids))
 		return 1;
 
-	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
+	node_caches = bitmap_zalloc(num_present_cpus(), GFP_KERNEL);
 	if (!node_caches)
 		return 1;
 
@@ -1072,23 +1073,39 @@ static __init int snc_get_config(void)
 
 	for_each_node(node) {
 		cpu = cpumask_first(cpumask_of_node(node));
-		if (cpu < nr_cpu_ids)
-			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
-		else
+		if (cpu < nr_cpu_ids) {
+			cache_id = get_cpu_cacheinfo_id(cpu, 3);
+			if (cache_id != -1)
+				set_bit(cache_id, node_caches);
+		} else {
 			mem_only_nodes++;
+		}
 	}
 	cpus_read_unlock();
 
-	num_l3_caches = bitmap_weight(node_caches, nr_node_ids);
+	num_l3_caches = bitmap_weight(node_caches, num_present_cpus());
 	kfree(node_caches);
 
 	if (!num_l3_caches)
 		return 1;
 
+	/* sanity check #1: Number of CPU nodes must be multiple of num_l3_caches */
+	if ((nr_node_ids - mem_only_nodes) % num_l3_caches)
+		return 1;
+
 	ret = (nr_node_ids - mem_only_nodes) / num_l3_caches;
 
-	if (ret > 1)
+	/* sanity check #2: Only valid results are 1, 2, 4 */
+	switch (ret) {
+	case 1:
+		break;
+	case 2:
+	case 4:
 		rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_NODE;
+		break;
+	default:
+		return 1;
+	}
 
 	return ret;
 }
  
Reinette Chatre Nov. 30, 2023, 11:40 p.m. UTC | #5
Hi Tony,

On 11/30/2023 2:43 PM, Tony Luck wrote:
> On Thu, Nov 30, 2023 at 01:47:10PM -0800, Reinette Chatre wrote:

...

>>>  	if (!x86_match_cpu(snc_cpu_ids))
>>>  		return 1;
>>
>> I understand and welcome this change as motivated by robustness. Apart
>> from that, with this being a model specific feature for this particular
>> group of systems, it it not clear to me in which scenarios this could
>> run on a system where a present CPU does not have access to L3 cache.
> 
> Agreed that on these systems there should always be an L3 cache. Should
> I drop the check for "-1"?

Please do keep it. I welcome the additional robustness. The static checker I
tried did not complain about this but I expect that it is something that
could trigger checks.

> 
>>>  
>>> -	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
>>> +	node_caches = bitmap_zalloc(num_online_cpus(), GFP_KERNEL);
>>
>> Please do take care to take new bitmap size into account in all
>> places. From what I can tell there is a later bitmap_weight() call that
>> still uses nr_node_ids as size.
> 
> Oops. I was also using num_online_cpus() before cpus_read_lock(), so
> things could theoretically change before the bitmap_weight() call.
> I switched to using num_present_cpus() in both places.

Thanks for catching this. I am not sure if num_present_cpus() is the right
choice. I found its comment to say "If HOTPLUG is enabled, then cpu_present_mask
varies dynamically ...". num_possible_cpus() seems more appropriate when looking
for something that does not change while not holding the hotplug lock. Reading its
description more closely also makes me wonder if the later
	num_online_cpus() != num_present_cpus()
should also maybe be 
	num_online_cpus() != num_possible_cpus() ?
It seems to more closely match the intention.

>>>  	if (!node_caches)
>>>  		return 1;
>>>  
>>> @@ -1072,10 +1073,13 @@ static __init int snc_get_config(void)
>>>  
>>>  	for_each_node(node) {
>>>  		cpu = cpumask_first(cpumask_of_node(node));
>>> -		if (cpu < nr_cpu_ids)
>>> -			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
>>> -		else
>>> +		if (cpu < nr_cpu_ids) {
>>> +			cache_id = get_cpu_cacheinfo_id(cpu, 3);
>>> +			if (cache_id != -1)
>>> +				set_bit(cache_id, node_caches);
>>> +		} else {
>>>  			mem_only_nodes++;
>>> +		}
>>>  	}
>>>  	cpus_read_unlock();
>>>  
>>
>> Could this code be made even more robust by checking the computed
>> snc_nodes_per_l3_cache against the limited actually possible values?
>> Forcing it to 1 if something went wrong?
> 
> Added a couple of extra sanity checks. See updated incremental patch
> below.

Thank you very much. The additional checks look good to me.

Reinette
  
Luck, Tony Dec. 1, 2023, 12:37 a.m. UTC | #6
On Thu, Nov 30, 2023 at 03:40:52PM -0800, Reinette Chatre wrote:
> Hi Tony,
> 
> On 11/30/2023 2:43 PM, Tony Luck wrote:
> > On Thu, Nov 30, 2023 at 01:47:10PM -0800, Reinette Chatre wrote:
> 
> ...
> 
> >>>  	if (!x86_match_cpu(snc_cpu_ids))
> >>>  		return 1;
> >>
> >> I understand and welcome this change as motivated by robustness. Apart
> >> from that, with this being a model specific feature for this particular
> >> group of systems, it it not clear to me in which scenarios this could
> >> run on a system where a present CPU does not have access to L3 cache.
> > 
> > Agreed that on these systems there should always be an L3 cache. Should
> > I drop the check for "-1"?
> 
> Please do keep it. I welcome the additional robustness. The static checker I
> tried did not complain about this but I expect that it is something that
> could trigger checks.
> 
> > 
> >>>  
> >>> -	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
> >>> +	node_caches = bitmap_zalloc(num_online_cpus(), GFP_KERNEL);
> >>
> >> Please do take care to take new bitmap size into account in all
> >> places. From what I can tell there is a later bitmap_weight() call that
> >> still uses nr_node_ids as size.
> > 
> > Oops. I was also using num_online_cpus() before cpus_read_lock(), so
> > things could theoretically change before the bitmap_weight() call.
> > I switched to using num_present_cpus() in both places.
> 
> Thanks for catching this. I am not sure if num_present_cpus() is the right
> choice. I found its comment to say "If HOTPLUG is enabled, then cpu_present_mask
> varies dynamically ...". num_possible_cpus() seems more appropriate when looking

I can size the bitmask based on num_possible_cpus().

> for something that does not change while not holding the hotplug lock. Reading its
> description more closely also makes me wonder if the later
> 	num_online_cpus() != num_present_cpus()
> should also maybe be 
> 	num_online_cpus() != num_possible_cpus() ?
> It seems to more closely match the intention.

This seems problematic. On a system that does support physical CPU
hotplug num_possible_cpus() may be some very large number. Reserving
space for CPUs that can be added later. None of those CPUs can be online
(obviously!). So this test would fail on such a system.

> >>>  	if (!node_caches)
> >>>  		return 1;
> >>>  
> >>> @@ -1072,10 +1073,13 @@ static __init int snc_get_config(void)
> >>>  
> >>>  	for_each_node(node) {
> >>>  		cpu = cpumask_first(cpumask_of_node(node));
> >>> -		if (cpu < nr_cpu_ids)
> >>> -			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
> >>> -		else
> >>> +		if (cpu < nr_cpu_ids) {
> >>> +			cache_id = get_cpu_cacheinfo_id(cpu, 3);
> >>> +			if (cache_id != -1)
> >>> +				set_bit(cache_id, node_caches);
> >>> +		} else {
> >>>  			mem_only_nodes++;
> >>> +		}
> >>>  	}
> >>>  	cpus_read_unlock();
> >>>  
> >>
> >> Could this code be made even more robust by checking the computed
> >> snc_nodes_per_l3_cache against the limited actually possible values?
> >> Forcing it to 1 if something went wrong?
> > 
> > Added a couple of extra sanity checks. See updated incremental patch
> > below.
> 
> Thank you very much. The additional checks look good to me.
> 
> Reinette

Thanks for looking at this. I'm applying changes to my local tree. I'll
give folks a little more time to find additonal issues in v12 and post
v13 next week.

-Tony
  
Reinette Chatre Dec. 1, 2023, 2:08 a.m. UTC | #7
Hi Tony,

On 11/30/2023 4:37 PM, Tony Luck wrote:
> On Thu, Nov 30, 2023 at 03:40:52PM -0800, Reinette Chatre wrote:
>> Hi Tony,
>>
>> On 11/30/2023 2:43 PM, Tony Luck wrote:
>>> On Thu, Nov 30, 2023 at 01:47:10PM -0800, Reinette Chatre wrote:

>> for something that does not change while not holding the hotplug lock. Reading its
>> description more closely also makes me wonder if the later
>> 	num_online_cpus() != num_present_cpus()
>> should also maybe be 
>> 	num_online_cpus() != num_possible_cpus() ?
>> It seems to more closely match the intention.
> 
> This seems problematic. On a system that does support physical CPU
> hotplug num_possible_cpus() may be some very large number. Reserving
> space for CPUs that can be added later. None of those CPUs can be online
> (obviously!). So this test would fail on such a system.

Thank you for clarifying. It was not obvious to me how these bitmaps are
different with physical hotplug. 

Reinette
  

Patch

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d51e1850ed0..94d29d81e6db 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1111,6 +1111,7 @@ 
 #define MSR_IA32_QM_CTR			0xc8e
 #define MSR_IA32_PQR_ASSOC		0xc8f
 #define MSR_IA32_L3_CBM_BASE		0xc90
+#define MSR_RMID_SNC_CONFIG		0xca0
 #define MSR_IA32_L2_CBM_BASE		0xd10
 #define MSR_IA32_MBA_THRTL_BASE		0xd50
 
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index cf5aba8a74bf..3293ab4c58b0 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -16,11 +16,14 @@ 
 
 #define pr_fmt(fmt)	"resctrl: " fmt
 
+#include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/cacheinfo.h>
 #include <linux/cpuhotplug.h>
+#include <linux/mod_devicetable.h>
 
+#include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include <asm/resctrl.h>
 #include "internal.h"
@@ -740,11 +743,42 @@  static void clear_closid_rmid(int cpu)
 	wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
 }
 
+/*
+ * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
+ * which indicates that RMIDs are configured in legacy mode.
+ * This mode is incompatible with Linux resctrl semantics
+ * as RMIDs are partitioned between SNC nodes, which requires
+ * a user to know which RMID is allocated to a task.
+ * Clearing bit 0 reconfigures the RMID counters for use
+ * in Sub NUMA Cluster mode. This mode is better for Linux.
+ * The RMID space is divided between all SNC nodes with the
+ * RMIDs renumbered to start from zero in each node when
+ * couning operations from tasks. Code to read the counters
+ * must adjust RMID counter numbers based on SNC node. See
+ * __rmid_read() for code that does this.
+ */
+static void snc_remap_rmids(int cpu)
+{
+	u64 val;
+
+	/* Only need to enable once per package. */
+	if (cpumask_first(topology_core_cpumask(cpu)) != cpu)
+		return;
+
+	rdmsrl(MSR_RMID_SNC_CONFIG, val);
+	val &= ~BIT_ULL(0);
+	wrmsrl(MSR_RMID_SNC_CONFIG, val);
+}
+
 static int resctrl_online_cpu(unsigned int cpu)
 {
 	struct rdt_resource *r;
 
 	mutex_lock(&rdtgroup_mutex);
+
+	if (snc_nodes_per_l3_cache > 1)
+		snc_remap_rmids(cpu);
+
 	for_each_capable_rdt_resource(r)
 		domain_add_cpu(cpu, r);
 	/* The cpu is set in default rdtgroup after online. */
@@ -999,11 +1033,73 @@  static __init bool get_rdt_resources(void)
 	return (rdt_mon_capable || rdt_alloc_capable);
 }
 
+/* CPU models that support MSR_RMID_SNC_CONFIG */
+static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
+	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
+	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 0),
+	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, 0),
+	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, 0),
+	{}
+};
+
+/*
+ * There isn't a simple hardware bit that indicates whether a CPU is running
+ * in Sub NUMA Cluster (SNC) mode. Infer the state by comparing the
+ * ratio of NUMA nodes to L3 cache instances.
+ * It is not possible to accurately determine SNC state if the system is
+ * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
+ * to L3 caches. It will be OK if system is booted with hyperthreading
+ * disabled (since this doesn't affect the ratio).
+ */
+static __init int snc_get_config(void)
+{
+	unsigned long *node_caches;
+	int mem_only_nodes = 0;
+	int cpu, node, ret;
+	int num_l3_caches;
+
+	if (!x86_match_cpu(snc_cpu_ids))
+		return 1;
+
+	node_caches = bitmap_zalloc(nr_node_ids, GFP_KERNEL);
+	if (!node_caches)
+		return 1;
+
+	cpus_read_lock();
+
+	if (num_online_cpus() != num_present_cpus())
+		pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
+
+	for_each_node(node) {
+		cpu = cpumask_first(cpumask_of_node(node));
+		if (cpu < nr_cpu_ids)
+			set_bit(get_cpu_cacheinfo_id(cpu, 3), node_caches);
+		else
+			mem_only_nodes++;
+	}
+	cpus_read_unlock();
+
+	num_l3_caches = bitmap_weight(node_caches, nr_node_ids);
+	kfree(node_caches);
+
+	if (!num_l3_caches)
+		return 1;
+
+	ret = (nr_node_ids - mem_only_nodes) / num_l3_caches;
+
+	if (ret > 1)
+		rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_NODE;
+
+	return ret;
+}
+
 static __init void rdt_init_res_defs_intel(void)
 {
 	struct rdt_hw_resource *hw_res;
 	struct rdt_resource *r;
 
+	snc_nodes_per_l3_cache = snc_get_config();
+
 	for_each_rdt_resource(r) {
 		hw_res = resctrl_to_arch_res(r);