RISC-V: Fix incorrect use of vcompress in permutation auto-vectorization
Checks
Commit Message
This patch fixes following FAILs on zvl512b of RV32 system:
FAIL: gcc.target/riscv/rvv/autovec/struct/struct_vect_run-12.c execution test
FAIL: gcc.target/riscv/rvv/autovec/struct/struct_vect_run-9.c execution test
The root cause is that for permutation indice = {0,3,7,0} use vcompress optimization
which is incorrect. Fix vcompress optimization bug.
PR target/112598
gcc/ChangeLog:
* config/riscv/riscv-v.cc (shuffle_compress_patterns): Fix vcompress bug.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/pr112598-3.c: New test.
---
gcc/config/riscv/riscv-v.cc | 15 ++++++-------
.../gcc.target/riscv/rvv/autovec/pr112598-3.c | 21 +++++++++++++++++++
2 files changed, 29 insertions(+), 7 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112598-3.c
Comments
Committed as it is obvious bug fix.
juzhe.zhong@rivai.ai
From: Juzhe-Zhong
Date: 2023-11-22 18:53
To: gcc-patches
CC: kito.cheng; kito.cheng; jeffreyalaw; rdapp.gcc; Juzhe-Zhong
Subject: [PATCH] RISC-V: Fix incorrect use of vcompress in permutation auto-vectorization
This patch fixes following FAILs on zvl512b of RV32 system:
FAIL: gcc.target/riscv/rvv/autovec/struct/struct_vect_run-12.c execution test
FAIL: gcc.target/riscv/rvv/autovec/struct/struct_vect_run-9.c execution test
The root cause is that for permutation indice = {0,3,7,0} use vcompress optimization
which is incorrect. Fix vcompress optimization bug.
PR target/112598
gcc/ChangeLog:
* config/riscv/riscv-v.cc (shuffle_compress_patterns): Fix vcompress bug.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/pr112598-3.c: New test.
---
gcc/config/riscv/riscv-v.cc | 15 ++++++-------
.../gcc.target/riscv/rvv/autovec/pr112598-3.c | 21 +++++++++++++++++++
2 files changed, 29 insertions(+), 7 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112598-3.c
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 7d6d0821d87..7d3e8038dab 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -3005,14 +3005,15 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
if (compress_point < 0)
return false;
- /* It must be series increasing from compress point. */
- if (!d->perm.series_p (compress_point, 1, d->perm[compress_point], 1))
- return false;
-
/* We can only apply compress approach when all index values from 0 to
compress point are increasing. */
for (int i = 1; i < compress_point; i++)
- if (known_le (d->perm[i], d->perm[i - 1]))
+ if (maybe_le (d->perm[i], d->perm[i - 1]))
+ return false;
+
+ /* It must be series increasing from compress point. */
+ for (int i = 1 + compress_point; i < vlen; i++)
+ if (maybe_ne (d->perm[i], d->perm[i - 1] + 1))
return false;
/* Success! */
@@ -3080,10 +3081,10 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
if (need_slideup_p)
{
int slideup_cnt = vlen - (d->perm[vlen - 1].to_constant () % vlen) - 1;
- rtx ops[] = {d->target, d->op1, gen_int_mode (slideup_cnt, Pmode)};
+ merge = gen_reg_rtx (vmode);
+ rtx ops[] = {merge, d->op1, gen_int_mode (slideup_cnt, Pmode)};
insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
emit_vlmax_insn (icode, BINARY_OP, ops);
- merge = d->target;
}
insn_code icode = code_for_pred_compress (vmode);
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112598-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112598-3.c
new file mode 100644
index 00000000000..231a068c680
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112598-3.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvfh_zfh_zvl512b -mabi=ilp32d -O3 -ftree-vectorize -std=c99 -fno-vect-cost-model" } */
+
+#include <stdint-gcc.h>
+#define TYPE uint64_t
+#define ITYPE int64_t
+
+void __attribute__ ((noinline, noclone))
+foo (TYPE *__restrict a, TYPE *__restrict b, TYPE *__restrict c,
+ TYPE *__restrict d, ITYPE n)
+{
+ for (ITYPE i = 0; i < n; ++i)
+ {
+ d[i * 3] = a[i];
+ d[i * 3 + 1] = b[i];
+ d[i * 3 + 2] = c[i];
+ }
+}
+
+/* We don't want vcompress.vv. */
+/* { dg-final { scan-assembler-not {vcompress\.vv} } } */
--
2.36.3
@@ -3005,14 +3005,15 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
if (compress_point < 0)
return false;
- /* It must be series increasing from compress point. */
- if (!d->perm.series_p (compress_point, 1, d->perm[compress_point], 1))
- return false;
-
/* We can only apply compress approach when all index values from 0 to
compress point are increasing. */
for (int i = 1; i < compress_point; i++)
- if (known_le (d->perm[i], d->perm[i - 1]))
+ if (maybe_le (d->perm[i], d->perm[i - 1]))
+ return false;
+
+ /* It must be series increasing from compress point. */
+ for (int i = 1 + compress_point; i < vlen; i++)
+ if (maybe_ne (d->perm[i], d->perm[i - 1] + 1))
return false;
/* Success! */
@@ -3080,10 +3081,10 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
if (need_slideup_p)
{
int slideup_cnt = vlen - (d->perm[vlen - 1].to_constant () % vlen) - 1;
- rtx ops[] = {d->target, d->op1, gen_int_mode (slideup_cnt, Pmode)};
+ merge = gen_reg_rtx (vmode);
+ rtx ops[] = {merge, d->op1, gen_int_mode (slideup_cnt, Pmode)};
insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
emit_vlmax_insn (icode, BINARY_OP, ops);
- merge = d->target;
}
insn_code icode = code_for_pred_compress (vmode);
new file mode 100644
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvfh_zfh_zvl512b -mabi=ilp32d -O3 -ftree-vectorize -std=c99 -fno-vect-cost-model" } */
+
+#include <stdint-gcc.h>
+#define TYPE uint64_t
+#define ITYPE int64_t
+
+void __attribute__ ((noinline, noclone))
+foo (TYPE *__restrict a, TYPE *__restrict b, TYPE *__restrict c,
+ TYPE *__restrict d, ITYPE n)
+{
+ for (ITYPE i = 0; i < n; ++i)
+ {
+ d[i * 3] = a[i];
+ d[i * 3 + 1] = b[i];
+ d[i * 3 + 2] = c[i];
+ }
+}
+
+/* We don't want vcompress.vv. */
+/* { dg-final { scan-assembler-not {vcompress\.vv} } } */