linux-next: manual merge of the kvm-arm tree with the arm64 tree

Message ID 20240219135805.1c4138a3@canb.auug.org.au
State New
Headers
Series linux-next: manual merge of the kvm-arm tree with the arm64 tree |

Commit Message

Stephen Rothwell Feb. 19, 2024, 2:58 a.m. UTC
  Hi all,

Today's linux-next merge of the kvm-arm tree got a conflict in:

  arch/arm64/kernel/cpufeature.c

between commits:

  9cce9c6c2c3b ("arm64: mm: Handle LVA support as a CPU feature")
  352b0395b505 ("arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs")

from the arm64 tree and commit:

  da9af5071b25 ("arm64: cpufeature: Detect HCR_EL2.NV1 being RES0")

from the kvm-arm tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
  

Comments

Catalin Marinas Feb. 19, 2024, 12:14 p.m. UTC | #1
On Mon, Feb 19, 2024 at 01:58:05PM +1100, Stephen Rothwell wrote:
> diff --cc arch/arm64/kernel/cpufeature.c
> index 0be9296e9253,f309fd542c20..000000000000
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@@ -721,13 -754,12 +756,14 @@@ static const struct __ftr_reg_entry 
>   			       &id_aa64isar2_override),
>   
>   	/* Op1 = 0, CRn = 0, CRm = 7 */
>  -	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
>  +	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
>  +			       &id_aa64mmfr0_override),
>   	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
>   			       &id_aa64mmfr1_override),
>  -	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
>  +	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
>  +			       &id_aa64mmfr2_override),
>   	ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
> + 	ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
>   
>   	/* Op1 = 1, CRn = 0, CRm = 0 */
>   	ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
> @@@ -2701,33 -2817,13 +2779,40 @@@ static const struct arm64_cpu_capabilit
>   		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
>   		.matches = has_lpa2,
>   	},
>  +#ifdef CONFIG_ARM64_VA_BITS_52
>  +	{
>  +		.capability = ARM64_HAS_VA52,
>  +		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
>  +		.matches = has_cpuid_feature,
>  +		.field_width = 4,
>  +#ifdef CONFIG_ARM64_64K_PAGES
>  +		.desc = "52-bit Virtual Addressing (LVA)",
>  +		.sign = FTR_SIGNED,
>  +		.sys_reg = SYS_ID_AA64MMFR2_EL1,
>  +		.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
>  +		.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
>  +#else
>  +		.desc = "52-bit Virtual Addressing (LPA2)",
>  +		.sys_reg = SYS_ID_AA64MMFR0_EL1,
>  +#ifdef CONFIG_ARM64_4K_PAGES
>  +		.sign = FTR_SIGNED,
>  +		.field_pos = ID_AA64MMFR0_EL1_TGRAN4_SHIFT,
>  +		.min_field_value = ID_AA64MMFR0_EL1_TGRAN4_52_BIT,
>  +#else
>  +		.sign = FTR_UNSIGNED,
>  +		.field_pos = ID_AA64MMFR0_EL1_TGRAN16_SHIFT,
>  +		.min_field_value = ID_AA64MMFR0_EL1_TGRAN16_52_BIT,
>  +#endif
>  +#endif
>  +	},
>  +#endif
> + 	{
> + 		.desc = "NV1",
> + 		.capability = ARM64_HAS_HCR_NV1,
> + 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> + 		.matches = has_nv1,
> + 		ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
> + 	},
>   	{},
>   };

Thanks Stephen. It looks fine.
  
Marc Zyngier Feb. 19, 2024, 3:22 p.m. UTC | #2
On Mon, 19 Feb 2024 12:14:18 +0000,
Catalin Marinas <catalin.marinas@arm.com> wrote:
> 
> On Mon, Feb 19, 2024 at 01:58:05PM +1100, Stephen Rothwell wrote:
> > diff --cc arch/arm64/kernel/cpufeature.c
> > index 0be9296e9253,f309fd542c20..000000000000
> > --- a/arch/arm64/kernel/cpufeature.c
> > +++ b/arch/arm64/kernel/cpufeature.c
> > @@@ -721,13 -754,12 +756,14 @@@ static const struct __ftr_reg_entry 
> >   			       &id_aa64isar2_override),
> >   
> >   	/* Op1 = 0, CRn = 0, CRm = 7 */
> >  -	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
> >  +	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
> >  +			       &id_aa64mmfr0_override),
> >   	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
> >   			       &id_aa64mmfr1_override),
> >  -	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
> >  +	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
> >  +			       &id_aa64mmfr2_override),
> >   	ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
> > + 	ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
> >   
> >   	/* Op1 = 1, CRn = 0, CRm = 0 */
> >   	ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
> > @@@ -2701,33 -2817,13 +2779,40 @@@ static const struct arm64_cpu_capabilit
> >   		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> >   		.matches = has_lpa2,
> >   	},
> >  +#ifdef CONFIG_ARM64_VA_BITS_52
> >  +	{
> >  +		.capability = ARM64_HAS_VA52,
> >  +		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
> >  +		.matches = has_cpuid_feature,
> >  +		.field_width = 4,
> >  +#ifdef CONFIG_ARM64_64K_PAGES
> >  +		.desc = "52-bit Virtual Addressing (LVA)",
> >  +		.sign = FTR_SIGNED,
> >  +		.sys_reg = SYS_ID_AA64MMFR2_EL1,
> >  +		.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
> >  +		.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
> >  +#else
> >  +		.desc = "52-bit Virtual Addressing (LPA2)",
> >  +		.sys_reg = SYS_ID_AA64MMFR0_EL1,
> >  +#ifdef CONFIG_ARM64_4K_PAGES
> >  +		.sign = FTR_SIGNED,
> >  +		.field_pos = ID_AA64MMFR0_EL1_TGRAN4_SHIFT,
> >  +		.min_field_value = ID_AA64MMFR0_EL1_TGRAN4_52_BIT,
> >  +#else
> >  +		.sign = FTR_UNSIGNED,
> >  +		.field_pos = ID_AA64MMFR0_EL1_TGRAN16_SHIFT,
> >  +		.min_field_value = ID_AA64MMFR0_EL1_TGRAN16_52_BIT,
> >  +#endif
> >  +#endif
> >  +	},
> >  +#endif
> > + 	{
> > + 		.desc = "NV1",
> > + 		.capability = ARM64_HAS_HCR_NV1,
> > + 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> > + 		.matches = has_nv1,
> > + 		ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
> > + 	},
> >   	{},
> >   };
> 
> Thanks Stephen. It looks fine.

Actually, it breaks 52bit support in a "subtle" way (multiple reports
on the list and IRC, all pointing to failures on QEMU). The KVM tree
adds support for feature ranges, which this code is totally unaware
of, and only provides the min value and not the max. Things go wrong
from there.

I propose to fix it like below, which makes it robust against the KVM
changes (patch applies to arm64/for-next/core). I have tested it in
combination with kvmarm/next, with 4kB and 16kB (LVA2), as well as
64kB (LVA).

Thanks,

	M.

From f24638a5f41424faf47f3d9035e6dcbd3800fcb6 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <maz@kernel.org>
Date: Mon, 19 Feb 2024 15:13:22 +0000
Subject: [PATCH] arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and
 VARange

Open-coding the feature matching parameters for LVA/LVA2 leads to
issues with upcoming changes to the cpufeature code.

By making TGRAN{4,16,64} and VARange signed/unsigned as per the
architecture, we can use the existing macros, making the feature
match robust against those changes.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kernel/cpufeature.c | 15 +++------------
 arch/arm64/tools/sysreg        |  8 ++++----
 2 files changed, 7 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 8f9665e8774b..2119e9dd0c4e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2791,24 +2791,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.capability = ARM64_HAS_VA52,
 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
 		.matches = has_cpuid_feature,
-		.field_width = 4,
 #ifdef CONFIG_ARM64_64K_PAGES
 		.desc = "52-bit Virtual Addressing (LVA)",
-		.sign = FTR_SIGNED,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
-		.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
+		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, VARange, 52)
 #else
 		.desc = "52-bit Virtual Addressing (LPA2)",
-		.sys_reg = SYS_ID_AA64MMFR0_EL1,
 #ifdef CONFIG_ARM64_4K_PAGES
-		.sign = FTR_SIGNED,
-		.field_pos = ID_AA64MMFR0_EL1_TGRAN4_SHIFT,
-		.min_field_value = ID_AA64MMFR0_EL1_TGRAN4_52_BIT,
+		ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN4, 52_BIT)
 #else
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR0_EL1_TGRAN16_SHIFT,
-		.min_field_value = ID_AA64MMFR0_EL1_TGRAN16_52_BIT,
+		ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN16, 52_BIT)
 #endif
 #endif
 	},
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index fa3fe0856880..670a33fca3bc 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1540,16 +1540,16 @@ Enum	35:32	TGRAN16_2
 	0b0010	IMP
 	0b0011	52_BIT
 EndEnum
-Enum	31:28	TGRAN4
+SignedEnum	31:28	TGRAN4
 	0b0000	IMP
 	0b0001	52_BIT
 	0b1111	NI
 EndEnum
-Enum	27:24	TGRAN64
+SignedEnum	27:24	TGRAN64
 	0b0000	IMP
 	0b1111	NI
 EndEnum
-Enum	23:20	TGRAN16
+UnsignedEnum	23:20	TGRAN16
 	0b0000	NI
 	0b0001	IMP
 	0b0010	52_BIT
@@ -1697,7 +1697,7 @@ Enum	23:20	CCIDX
 	0b0000	32
 	0b0001	64
 EndEnum
-Enum	19:16	VARange
+UnsignedEnum	19:16	VARange
 	0b0000	48
 	0b0001	52
 EndEnum
  
Mark Rutland Feb. 19, 2024, 3:35 p.m. UTC | #3
On Mon, Feb 19, 2024 at 03:22:14PM +0000, Marc Zyngier wrote:
> From f24638a5f41424faf47f3d9035e6dcbd3800fcb6 Mon Sep 17 00:00:00 2001
> From: Marc Zyngier <maz@kernel.org>
> Date: Mon, 19 Feb 2024 15:13:22 +0000
> Subject: [PATCH] arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and
>  VARange
> 
> Open-coding the feature matching parameters for LVA/LVA2 leads to
> issues with upcoming changes to the cpufeature code.
> 
> By making TGRAN{4,16,64} and VARange signed/unsigned as per the
> architecture, we can use the existing macros, making the feature
> match robust against those changes.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>

I think this is the right thing to do; the patch itself looks good to me, so
FWIW:

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/kernel/cpufeature.c | 15 +++------------
>  arch/arm64/tools/sysreg        |  8 ++++----
>  2 files changed, 7 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 8f9665e8774b..2119e9dd0c4e 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -2791,24 +2791,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.capability = ARM64_HAS_VA52,
>  		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
>  		.matches = has_cpuid_feature,
> -		.field_width = 4,
>  #ifdef CONFIG_ARM64_64K_PAGES
>  		.desc = "52-bit Virtual Addressing (LVA)",
> -		.sign = FTR_SIGNED,
> -		.sys_reg = SYS_ID_AA64MMFR2_EL1,
> -		.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
> -		.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
> +		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, VARange, 52)
>  #else
>  		.desc = "52-bit Virtual Addressing (LPA2)",
> -		.sys_reg = SYS_ID_AA64MMFR0_EL1,
>  #ifdef CONFIG_ARM64_4K_PAGES
> -		.sign = FTR_SIGNED,
> -		.field_pos = ID_AA64MMFR0_EL1_TGRAN4_SHIFT,
> -		.min_field_value = ID_AA64MMFR0_EL1_TGRAN4_52_BIT,
> +		ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN4, 52_BIT)
>  #else
> -		.sign = FTR_UNSIGNED,
> -		.field_pos = ID_AA64MMFR0_EL1_TGRAN16_SHIFT,
> -		.min_field_value = ID_AA64MMFR0_EL1_TGRAN16_52_BIT,
> +		ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN16, 52_BIT)
>  #endif
>  #endif
>  	},
> diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
> index fa3fe0856880..670a33fca3bc 100644
> --- a/arch/arm64/tools/sysreg
> +++ b/arch/arm64/tools/sysreg
> @@ -1540,16 +1540,16 @@ Enum	35:32	TGRAN16_2
>  	0b0010	IMP
>  	0b0011	52_BIT
>  EndEnum
> -Enum	31:28	TGRAN4
> +SignedEnum	31:28	TGRAN4
>  	0b0000	IMP
>  	0b0001	52_BIT
>  	0b1111	NI
>  EndEnum
> -Enum	27:24	TGRAN64
> +SignedEnum	27:24	TGRAN64
>  	0b0000	IMP
>  	0b1111	NI
>  EndEnum
> -Enum	23:20	TGRAN16
> +UnsignedEnum	23:20	TGRAN16
>  	0b0000	NI
>  	0b0001	IMP
>  	0b0010	52_BIT
> @@ -1697,7 +1697,7 @@ Enum	23:20	CCIDX
>  	0b0000	32
>  	0b0001	64
>  EndEnum
> -Enum	19:16	VARange
> +UnsignedEnum	19:16	VARange
>  	0b0000	48
>  	0b0001	52
>  EndEnum
> -- 
> 2.39.2
> 
> 
> -- 
> Without deviation from the norm, progress is not possible.
  
Ard Biesheuvel Feb. 19, 2024, 3:43 p.m. UTC | #4
On Mon, 19 Feb 2024 at 16:22, Marc Zyngier <maz@kernel.org> wrote:
>
> On Mon, 19 Feb 2024 12:14:18 +0000,
> Catalin Marinas <catalin.marinas@arm.com> wrote:
> >
> > On Mon, Feb 19, 2024 at 01:58:05PM +1100, Stephen Rothwell wrote:
> > > diff --cc arch/arm64/kernel/cpufeature.c
> > > index 0be9296e9253,f309fd542c20..000000000000
> > > --- a/arch/arm64/kernel/cpufeature.c
> > > +++ b/arch/arm64/kernel/cpufeature.c
> > > @@@ -721,13 -754,12 +756,14 @@@ static const struct __ftr_reg_entry
> > >                            &id_aa64isar2_override),
> > >
> > >     /* Op1 = 0, CRn = 0, CRm = 7 */
> > >  -  ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
> > >  +  ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
> > >  +                         &id_aa64mmfr0_override),
> > >     ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
> > >                            &id_aa64mmfr1_override),
> > >  -  ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
> > >  +  ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
> > >  +                         &id_aa64mmfr2_override),
> > >     ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
> > > +   ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
> > >
> > >     /* Op1 = 1, CRn = 0, CRm = 0 */
> > >     ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
> > > @@@ -2701,33 -2817,13 +2779,40 @@@ static const struct arm64_cpu_capabilit
> > >             .type = ARM64_CPUCAP_SYSTEM_FEATURE,
> > >             .matches = has_lpa2,
> > >     },
> > >  +#ifdef CONFIG_ARM64_VA_BITS_52
> > >  +  {
> > >  +          .capability = ARM64_HAS_VA52,
> > >  +          .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
> > >  +          .matches = has_cpuid_feature,
> > >  +          .field_width = 4,
> > >  +#ifdef CONFIG_ARM64_64K_PAGES
> > >  +          .desc = "52-bit Virtual Addressing (LVA)",
> > >  +          .sign = FTR_SIGNED,
> > >  +          .sys_reg = SYS_ID_AA64MMFR2_EL1,
> > >  +          .field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
> > >  +          .min_field_value = ID_AA64MMFR2_EL1_VARange_52,
> > >  +#else
> > >  +          .desc = "52-bit Virtual Addressing (LPA2)",
> > >  +          .sys_reg = SYS_ID_AA64MMFR0_EL1,
> > >  +#ifdef CONFIG_ARM64_4K_PAGES
> > >  +          .sign = FTR_SIGNED,
> > >  +          .field_pos = ID_AA64MMFR0_EL1_TGRAN4_SHIFT,
> > >  +          .min_field_value = ID_AA64MMFR0_EL1_TGRAN4_52_BIT,
> > >  +#else
> > >  +          .sign = FTR_UNSIGNED,
> > >  +          .field_pos = ID_AA64MMFR0_EL1_TGRAN16_SHIFT,
> > >  +          .min_field_value = ID_AA64MMFR0_EL1_TGRAN16_52_BIT,
> > >  +#endif
> > >  +#endif
> > >  +  },
> > >  +#endif
> > > +   {
> > > +           .desc = "NV1",
> > > +           .capability = ARM64_HAS_HCR_NV1,
> > > +           .type = ARM64_CPUCAP_SYSTEM_FEATURE,
> > > +           .matches = has_nv1,
> > > +           ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
> > > +   },
> > >     {},
> > >   };
> >
> > Thanks Stephen. It looks fine.
>
> Actually, it breaks 52bit support in a "subtle" way (multiple reports
> on the list and IRC, all pointing to failures on QEMU). The KVM tree
> adds support for feature ranges, which this code is totally unaware
> of, and only provides the min value and not the max. Things go wrong
> from there.
>
> I propose to fix it like below, which makes it robust against the KVM
> changes (patch applies to arm64/for-next/core). I have tested it in
> combination with kvmarm/next, with 4kB and 16kB (LVA2), as well as
> 64kB (LVA).
>
> Thanks,
>
>         M.
>
> From f24638a5f41424faf47f3d9035e6dcbd3800fcb6 Mon Sep 17 00:00:00 2001
> From: Marc Zyngier <maz@kernel.org>
> Date: Mon, 19 Feb 2024 15:13:22 +0000
> Subject: [PATCH] arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and
>  VARange
>
> Open-coding the feature matching parameters for LVA/LVA2 leads to
> issues with upcoming changes to the cpufeature code.
>
> By making TGRAN{4,16,64} and VARange signed/unsigned as per the
> architecture, we can use the existing macros, making the feature
> match robust against those changes.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>

Thanks for the fix.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Ard Biesheuvel <ardb@kernel.org>
  
Catalin Marinas Feb. 19, 2024, 4:49 p.m. UTC | #5
On Mon, Feb 19, 2024 at 03:22:14PM +0000, Marc Zyngier wrote:
> From f24638a5f41424faf47f3d9035e6dcbd3800fcb6 Mon Sep 17 00:00:00 2001
> From: Marc Zyngier <maz@kernel.org>
> Date: Mon, 19 Feb 2024 15:13:22 +0000
> Subject: [PATCH] arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and
>  VARange
> 
> Open-coding the feature matching parameters for LVA/LVA2 leads to
> issues with upcoming changes to the cpufeature code.
> 
> By making TGRAN{4,16,64} and VARange signed/unsigned as per the
> architecture, we can use the existing macros, making the feature
> match robust against those changes.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>

Thanks Marc. I applied it on top of the arm64 for-next/stage1-lpa2
branch.
  

Patch

diff --cc arch/arm64/kernel/cpufeature.c
index 0be9296e9253,f309fd542c20..000000000000
--- a/arch/arm64/kernel/cpufeature.c