[1/6] aarch64: Fix move-after-intrinsic function-body tests

Message ID 20230509064831.1651327-2-richard.sandiford@arm.com
State Repeat Merge
Headers
Series aarch64: Avoid hard-coding specific register allocations |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

Richard Sandiford May 9, 2023, 6:48 a.m. UTC
  Some of the SVE ACLE asm tests tried to be agnostic about the
instruction order, but only one of the alternatives was exercised
in practice.  This patch fixes latent typos in the other versions.

gcc/testsuite/
	* gcc.target/aarch64/sve2/acle/asm/aesd_u8.c: Fix expected register
	allocation in the case where a move occurs after the intrinsic
	instruction.
	* gcc.target/aarch64/sve2/acle/asm/aese_u8.c: Likewise.
	* gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c: Likewise.
	* gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c: Likewise.
	* gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c: Likewise.
---
 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c   | 4 ++--
 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c   | 4 ++--
 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c | 2 +-
 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c  | 2 +-
 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c  | 2 +-
 5 files changed, 7 insertions(+), 7 deletions(-)
  

Patch

diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
index 622f5cf4609..384b6ffc9aa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
@@ -28,13 +28,13 @@  TEST_UNIFORM_Z (aesd_u8_tied2, svuint8_t,
 **	mov	z0\.d, z1\.d
 **	aesd	z0\.b, z0\.b, z2\.b
 ** |
-**	aesd	z1\.b, z0\.b, z2\.b
+**	aesd	z1\.b, z1\.b, z2\.b
 **	mov	z0\.d, z1\.d
 ** |
 **	mov	z0\.d, z2\.d
 **	aesd	z0\.b, z0\.b, z1\.b
 ** |
-**	aesd	z2\.b, z0\.b, z1\.b
+**	aesd	z2\.b, z2\.b, z1\.b
 **	mov	z0\.d, z2\.d
 ** )
 **	ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
index 6555bbb1de7..6381bce1661 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
@@ -28,13 +28,13 @@  TEST_UNIFORM_Z (aese_u8_tied2, svuint8_t,
 **	mov	z0\.d, z1\.d
 **	aese	z0\.b, z0\.b, z2\.b
 ** |
-**	aese	z1\.b, z0\.b, z2\.b
+**	aese	z1\.b, z1\.b, z2\.b
 **	mov	z0\.d, z1\.d
 ** |
 **	mov	z0\.d, z2\.d
 **	aese	z0\.b, z0\.b, z1\.b
 ** |
-**	aese	z2\.b, z0\.b, z1\.b
+**	aese	z2\.b, z2\.b, z1\.b
 **	mov	z0\.d, z2\.d
 ** )
 **	ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
index 4630595ff20..76259326467 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
@@ -19,7 +19,7 @@  TEST_UNIFORM_Z (aesimc_u8_tied1, svuint8_t,
 **	mov	z0\.d, z1\.d
 **	aesimc	z0\.b, z0\.b
 ** |
-**	aesimc	z1\.b, z0\.b
+**	aesimc	z1\.b, z1\.b
 **	mov	z0\.d, z1\.d
 ** )
 **	ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
index 6e8acf48f2a..30e83d381dc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
@@ -19,7 +19,7 @@  TEST_UNIFORM_Z (aesmc_u8_tied1, svuint8_t,
 **	mov	z0\.d, z1\.d
 **	aesmc	z0\.b, z0\.b
 ** |
-**	aesmc	z1\.b, z0\.b
+**	aesmc	z1\.b, z1\.b
 **	mov	z0\.d, z1\.d
 ** )
 **	ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
index 0ff5746d814..cf6a2a95235 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
@@ -24,7 +24,7 @@  TEST_UNIFORM_Z (sm4e_u32_tied2, svuint32_t,
 **	mov	z0\.d, z1\.d
 **	sm4e	z0\.s, z0\.s, z2\.s
 ** |
-**	sm4e	z1\.s, z0\.s, z2\.s
+**	sm4e	z1\.s, z1\.s, z2\.s
 **	mov	z0\.d, z1\.d
 ** )
 **	ret