RISC-V: Legitimise the const0_rtx for RVV indexed load/store
Checks
Commit Message
From: Pan Li <pan2.li@intel.com>
This patch try to legitimise the const0_rtx (aka zero register)
as the base register for the RVV indexed load/store instructions
by allowing the const as the operand of the indexed RTL pattern.
Then the underlying combine pass will try to perform the const
propagation.
For example:
vint32m1_t
test_vluxei32_v_i32m1_shortcut (vuint32m1_t bindex, size_t vl)
{
return __riscv_vluxei32_v_i32m1 ((int32_t *)0, bindex, vl);
}
Before this patch:
li a5,0 <- can be eliminated.
vl1re32.v v1,0(a1)
vsetvli zero,a2,e32,m1,ta,ma
vluxei32.v v1,(a5),v1 <- can propagate the const 0 to a5 here.
vs1r.v v1,0(a0)
ret
After this patch:
test_vluxei32_v_i32m1_shortcut:
vl1re32.v v1,0(a1)
vsetvli zero,a2,e32,m1,ta,ma
vluxei32.v v1,(0),v1
vs1r.v v1,0(a0)
ret
As above, this patch allow you to propagaate the const 0 (aka zero
register) to the base register of the RVV indexed load in the combine
pass. This may benefit the underlying RVV auto-vectorization.
Signed-off-by: Pan Li <pan2.li@intel.com>
Co-authored-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
gcc/ChangeLog:
* config/riscv/vector.md: Allow const as the operand of RVV
indexed load/store.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c:
Adjust indexed load/store check condition.
---
gcc/config/riscv/vector.md | 32 +++++++++----------
.../base/zero_base_load_store_optimization.c | 3 +-
2 files changed, 18 insertions(+), 17 deletions(-)
Comments
vluxei32.v v1,(0),v1 is not correct assembly.
Instead, it should be vluxei32.v v1,(zero),v1
You should change the assembly print: (%1) --> (%z1)
juzhe.zhong@rivai.ai
From: pan2.li
Date: 2023-05-04 16:35
To: gcc-patches
CC: juzhe.zhong; kito.cheng; pan2.li; yanzhang.wang
Subject: [PATCH] RISC-V: Legitimise the const0_rtx for RVV indexed load/store
From: Pan Li <pan2.li@intel.com>
This patch try to legitimise the const0_rtx (aka zero register)
as the base register for the RVV indexed load/store instructions
by allowing the const as the operand of the indexed RTL pattern.
Then the underlying combine pass will try to perform the const
propagation.
For example:
vint32m1_t
test_vluxei32_v_i32m1_shortcut (vuint32m1_t bindex, size_t vl)
{
return __riscv_vluxei32_v_i32m1 ((int32_t *)0, bindex, vl);
}
Before this patch:
li a5,0 <- can be eliminated.
vl1re32.v v1,0(a1)
vsetvli zero,a2,e32,m1,ta,ma
vluxei32.v v1,(a5),v1 <- can propagate the const 0 to a5 here.
vs1r.v v1,0(a0)
ret
After this patch:
test_vluxei32_v_i32m1_shortcut:
vl1re32.v v1,0(a1)
vsetvli zero,a2,e32,m1,ta,ma
vluxei32.v v1,(0),v1
vs1r.v v1,0(a0)
ret
As above, this patch allow you to propagaate the const 0 (aka zero
register) to the base register of the RVV indexed load in the combine
pass. This may benefit the underlying RVV auto-vectorization.
Signed-off-by: Pan Li <pan2.li@intel.com>
Co-authored-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
gcc/ChangeLog:
* config/riscv/vector.md: Allow const as the operand of RVV
indexed load/store.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c:
Adjust indexed load/store check condition.
---
gcc/config/riscv/vector.md | 32 +++++++++----------
.../base/zero_base_load_store_optimization.c | 3 +-
2 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 92115e3935f..c3210eacd47 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -1511,7 +1511,7 @@ (define_insn "@pred_indexed_<order>load<mode>_same_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:V
- [(match_operand 3 "pmode_register_operand" " r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ,rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX> 4 "register_operand" " vr, vr,vr, vr")] ORDER)
(match_operand:V 2 "vector_merge_operand" " vu, vu, 0, 0")))]
@@ -1533,7 +1533,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x2_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT2
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0")))]
@@ -1554,7 +1554,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x4_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT4
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0")))]
@@ -1575,7 +1575,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x8_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT8
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0")))]
@@ -1597,7 +1597,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x2_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC2
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_DOUBLE_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC2 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1618,7 +1618,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x4_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC4
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_QUAD_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC4 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1639,7 +1639,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x8_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC8
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_OCT_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC8 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1657,7 +1657,7 @@ (define_insn "@pred_indexed_<order>store<VNX1_QHSD:mode><VNX1_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX1_QHSDI 2 "register_operand" " vr")
(match_operand:VNX1_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1674,7 +1674,7 @@ (define_insn "@pred_indexed_<order>store<VNX2_QHSD:mode><VNX2_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX2_QHSDI 2 "register_operand" " vr")
(match_operand:VNX2_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1691,7 +1691,7 @@ (define_insn "@pred_indexed_<order>store<VNX4_QHSD:mode><VNX4_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX4_QHSDI 2 "register_operand" " vr")
(match_operand:VNX4_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1708,7 +1708,7 @@ (define_insn "@pred_indexed_<order>store<VNX8_QHSD:mode><VNX8_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX8_QHSDI 2 "register_operand" " vr")
(match_operand:VNX8_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1725,7 +1725,7 @@ (define_insn "@pred_indexed_<order>store<VNX16_QHS:mode><VNX16_QHSI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX16_QHSI 2 "register_operand" " vr")
(match_operand:VNX16_QHS 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1742,7 +1742,7 @@ (define_insn "@pred_indexed_<order>store<VNX32_QHS:mode><VNX32_QHSI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX32_QHSI 2 "register_operand" " vr")
(match_operand:VNX32_QHS 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1759,8 +1759,8 @@ (define_insn "@pred_indexed_<order>store<VNX64_QH:mode><VNX64_QHI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
- (match_operand:VNX64_QHI 2 "register_operand" " vr")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VNX64_QHI 2 "register_operand" " vr")
(match_operand:VNX64_QH 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
"vs<order>xei<VNX64_QHI:sew>.v\t%3,(%1),%2%p0"
@@ -1776,7 +1776,7 @@ (define_insn "@pred_indexed_<order>store<VNX128_Q:mode><VNX128_Q:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX128_Q 2 "register_operand" " vr")
(match_operand:VNX128_Q 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c b/gcc/testsuite/gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c
index 9f323b0ba9c..38f39647687 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/zero_base_load_store_optimization.c
@@ -131,4 +131,5 @@ void test_vsoxei32_v_f32m1_shortcut (vuint32m1_t bindex, vfloat32m1_t val, size_
/* { dg-final { scan-assembler-times {v[ls]e[0-9]+\.v\s+v[0-9]+,\s*0\(zero\)} 6 } } */
/* { dg-final { scan-assembler-times {v[ls]se[0-9]+\.v\s+v[0-9]+,\s*0\(zero\),\s*[ax][0-9]+} 6 } } */
-/* { dg-final { scan-assembler-times {li\s+[a-x][0-9]+,\s*0} 12 } } */
+/* { dg-final { scan-assembler-times {v[ls][uo]xei[0-9]+\.v\s+v[0-9]+,\s*\(0\),\s*v[0-9]+} 12 } } */
+/* { dg-final { scan-assembler-not {li\s+[a-x][0-9]+,\s*0} } } */
--
2.34.1
Thanks Juzhe, make sense, let me update it soon.
Pan
From: juzhe.zhong@rivai.ai <juzhe.zhong@rivai.ai>
Sent: Thursday, May 4, 2023 4:40 PM
To: Li, Pan2 <pan2.li@intel.com>; gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Kito.cheng <kito.cheng@sifive.com>; Li, Pan2 <pan2.li@intel.com>; Wang, Yanzhang <yanzhang.wang@intel.com>
Subject: Re: [PATCH] RISC-V: Legitimise the const0_rtx for RVV indexed load/store
vluxei32.v v1,(0),v1 is not correct assembly.
Instead, it should be vluxei32.v v1,(zero),v1
You should change the assembly print: (%1) --> (%z1)
Updated the PATCH v2 with x86 bootstrap and regression test passed.
https://gcc.gnu.org/pipermail/gcc-patches/2023-May/617449.html
Pan
-----Original Message-----
From: Gcc-patches <gcc-patches-bounces+pan2.li=intel.com@gcc.gnu.org> On Behalf Of Li, Pan2 via Gcc-patches
Sent: Thursday, May 4, 2023 4:44 PM
To: juzhe.zhong@rivai.ai; gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Kito.cheng <kito.cheng@sifive.com>; Wang, Yanzhang <yanzhang.wang@intel.com>
Subject: RE: [PATCH] RISC-V: Legitimise the const0_rtx for RVV indexed load/store
Thanks Juzhe, make sense, let me update it soon.
Pan
From: juzhe.zhong@rivai.ai <juzhe.zhong@rivai.ai>
Sent: Thursday, May 4, 2023 4:40 PM
To: Li, Pan2 <pan2.li@intel.com>; gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Kito.cheng <kito.cheng@sifive.com>; Li, Pan2 <pan2.li@intel.com>; Wang, Yanzhang <yanzhang.wang@intel.com>
Subject: Re: [PATCH] RISC-V: Legitimise the const0_rtx for RVV indexed load/store
vluxei32.v v1,(0),v1 is not correct assembly.
Instead, it should be vluxei32.v v1,(zero),v1
You should change the assembly print: (%1) --> (%z1)
@@ -1511,7 +1511,7 @@ (define_insn "@pred_indexed_<order>load<mode>_same_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:V
- [(match_operand 3 "pmode_register_operand" " r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ,rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX> 4 "register_operand" " vr, vr,vr, vr")] ORDER)
(match_operand:V 2 "vector_merge_operand" " vu, vu, 0, 0")))]
@@ -1533,7 +1533,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x2_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT2
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0")))]
@@ -1554,7 +1554,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x4_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT4
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0")))]
@@ -1575,7 +1575,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x8_greater_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT8
- [(match_operand 3 "pmode_register_operand" " r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0")))]
@@ -1597,7 +1597,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x2_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC2
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_DOUBLE_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC2 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1618,7 +1618,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x4_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC4
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_QUAD_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC4 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1639,7 +1639,7 @@ (define_insn "@pred_indexed_<order>load<mode>_x8_smaller_eew"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWTRUNC8
- [(match_operand 3 "pmode_register_operand" " r, r, r, r, r, r")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
(match_operand:<VINDEX_OCT_EXT> 4 "register_operand" " 0, 0, 0, 0, vr, vr")] ORDER)
(match_operand:VEEWTRUNC8 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
@@ -1657,7 +1657,7 @@ (define_insn "@pred_indexed_<order>store<VNX1_QHSD:mode><VNX1_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX1_QHSDI 2 "register_operand" " vr")
(match_operand:VNX1_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1674,7 +1674,7 @@ (define_insn "@pred_indexed_<order>store<VNX2_QHSD:mode><VNX2_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX2_QHSDI 2 "register_operand" " vr")
(match_operand:VNX2_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1691,7 +1691,7 @@ (define_insn "@pred_indexed_<order>store<VNX4_QHSD:mode><VNX4_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX4_QHSDI 2 "register_operand" " vr")
(match_operand:VNX4_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1708,7 +1708,7 @@ (define_insn "@pred_indexed_<order>store<VNX8_QHSD:mode><VNX8_QHSDI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX8_QHSDI 2 "register_operand" " vr")
(match_operand:VNX8_QHSD 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1725,7 +1725,7 @@ (define_insn "@pred_indexed_<order>store<VNX16_QHS:mode><VNX16_QHSI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX16_QHSI 2 "register_operand" " vr")
(match_operand:VNX16_QHS 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1742,7 +1742,7 @@ (define_insn "@pred_indexed_<order>store<VNX32_QHS:mode><VNX32_QHSI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX32_QHSI 2 "register_operand" " vr")
(match_operand:VNX32_QHS 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -1759,8 +1759,8 @@ (define_insn "@pred_indexed_<order>store<VNX64_QH:mode><VNX64_QHI:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
- (match_operand:VNX64_QHI 2 "register_operand" " vr")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VNX64_QHI 2 "register_operand" " vr")
(match_operand:VNX64_QH 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
"vs<order>xei<VNX64_QHI:sew>.v\t%3,(%1),%2%p0"
@@ -1776,7 +1776,7 @@ (define_insn "@pred_indexed_<order>store<VNX128_Q:mode><VNX128_Q:mode>"
(match_operand 5 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (match_operand 1 "pmode_register_operand" " r")
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
(match_operand:VNX128_Q 2 "register_operand" " vr")
(match_operand:VNX128_Q 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
@@ -131,4 +131,5 @@ void test_vsoxei32_v_f32m1_shortcut (vuint32m1_t bindex, vfloat32m1_t val, size_
/* { dg-final { scan-assembler-times {v[ls]e[0-9]+\.v\s+v[0-9]+,\s*0\(zero\)} 6 } } */
/* { dg-final { scan-assembler-times {v[ls]se[0-9]+\.v\s+v[0-9]+,\s*0\(zero\),\s*[ax][0-9]+} 6 } } */
-/* { dg-final { scan-assembler-times {li\s+[a-x][0-9]+,\s*0} 12 } } */
+/* { dg-final { scan-assembler-times {v[ls][uo]xei[0-9]+\.v\s+v[0-9]+,\s*\(0\),\s*v[0-9]+} 12 } } */
+/* { dg-final { scan-assembler-not {li\s+[a-x][0-9]+,\s*0} } } */