RISC-V: Fix constraint bug for binary operation
Checks
Commit Message
From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
Current constraint configuration will generate:
vadd.vv v0,v24,v25,v0.t
vsll.vx v0,v24,a5,v0.t
They are incorrect according to RVV ISA.
This patch fix this obvious issue.
gcc/ChangeLog:
* config/riscv/vector-iterators.md (sll.vi): Fix constraint bug.
(sll.vv): Ditto.
(%3,%4): Ditto.
(%3,%v4): Ditto.
* config/riscv/vector.md: Ditto.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/base/binop_vv_constraint-1.c:
* gcc.target/riscv/rvv/base/shift_vx_constraint-1.c:
---
gcc/config/riscv/vector-iterators.md | 86 +++++++++----------
gcc/config/riscv/vector.md | 41 +++++----
.../riscv/rvv/base/binop_vv_constraint-1.c | 8 +-
.../riscv/rvv/base/shift_vx_constraint-1.c | 9 +-
4 files changed, 75 insertions(+), 69 deletions(-)
Comments
committed, thanks!
On Wed, Feb 1, 2023 at 9:48 AM <juzhe.zhong@rivai.ai> wrote:
>
> From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
>
> Current constraint configuration will generate:
> vadd.vv v0,v24,v25,v0.t
> vsll.vx v0,v24,a5,v0.t
>
> They are incorrect according to RVV ISA.
> This patch fix this obvious issue.
>
> gcc/ChangeLog:
>
> * config/riscv/vector-iterators.md (sll.vi): Fix constraint bug.
> (sll.vv): Ditto.
> (%3,%4): Ditto.
> (%3,%v4): Ditto.
> * config/riscv/vector.md: Ditto.
>
> gcc/testsuite/ChangeLog:
>
> * gcc.target/riscv/rvv/base/binop_vv_constraint-1.c:
> * gcc.target/riscv/rvv/base/shift_vx_constraint-1.c:
>
> ---
> gcc/config/riscv/vector-iterators.md | 86 +++++++++----------
> gcc/config/riscv/vector.md | 41 +++++----
> .../riscv/rvv/base/binop_vv_constraint-1.c | 8 +-
> .../riscv/rvv/base/shift_vx_constraint-1.c | 9 +-
> 4 files changed, 75 insertions(+), 69 deletions(-)
>
> diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
> index a2f192d6ba0..4f9799ade05 100644
> --- a/gcc/config/riscv/vector-iterators.md
> +++ b/gcc/config/riscv/vector-iterators.md
> @@ -229,42 +229,42 @@
> (umod "register_operand")])
>
> (define_code_attr binop_rhs1_constraint [
> - (plus "vr,vr,vr")
> - (minus "vr,vr,vi")
> - (ior "vr,vr,vr")
> - (xor "vr,vr,vr")
> - (and "vr,vr,vr")
> - (ashift "vr,vr,vr")
> - (ashiftrt "vr,vr,vr")
> - (lshiftrt "vr,vr,vr")
> - (smin "vr,vr,vr")
> - (smax "vr,vr,vr")
> - (umin "vr,vr,vr")
> - (umax "vr,vr,vr")
> - (mult "vr,vr,vr")
> - (div "vr,vr,vr")
> - (mod "vr,vr,vr")
> - (udiv "vr,vr,vr")
> - (umod "vr,vr,vr")])
> + (plus "vr,vr,vr,vr,vr,vr")
> + (minus "vr,vr,vr,vr,vi,vi")
> + (ior "vr,vr,vr,vr,vr,vr")
> + (xor "vr,vr,vr,vr,vr,vr")
> + (and "vr,vr,vr,vr,vr,vr")
> + (ashift "vr,vr,vr,vr,vr,vr")
> + (ashiftrt "vr,vr,vr,vr,vr,vr")
> + (lshiftrt "vr,vr,vr,vr,vr,vr")
> + (smin "vr,vr,vr,vr,vr,vr")
> + (smax "vr,vr,vr,vr,vr,vr")
> + (umin "vr,vr,vr,vr,vr,vr")
> + (umax "vr,vr,vr,vr,vr,vr")
> + (mult "vr,vr,vr,vr,vr,vr")
> + (div "vr,vr,vr,vr,vr,vr")
> + (mod "vr,vr,vr,vr,vr,vr")
> + (udiv "vr,vr,vr,vr,vr,vr")
> + (umod "vr,vr,vr,vr,vr,vr")])
>
> (define_code_attr binop_rhs2_constraint [
> - (plus "vr,vi,vr")
> - (minus "vr,vj,vr")
> - (ior "vr,vi,vr")
> - (xor "vr,vi,vr")
> - (and "vr,vi,vr")
> - (ashift "vr,vk,vr")
> - (ashiftrt "vr,vk,vr")
> - (lshiftrt "vr,vk,vr")
> - (smin "vr,vr,vr")
> - (smax "vr,vr,vr")
> - (umin "vr,vr,vr")
> - (umax "vr,vr,vr")
> - (mult "vr,vr,vr")
> - (div "vr,vr,vr")
> - (mod "vr,vr,vr")
> - (udiv "vr,vr,vr")
> - (umod "vr,vr,vr")])
> + (plus "vr,vr,vi,vi,vr,vr")
> + (minus "vr,vr,vj,vj,vr,vr")
> + (ior "vr,vr,vi,vi,vr,vr")
> + (xor "vr,vr,vi,vi,vr,vr")
> + (and "vr,vr,vi,vi,vr,vr")
> + (ashift "vr,vr,vk,vk,vr,vr")
> + (ashiftrt "vr,vr,vk,vk,vr,vr")
> + (lshiftrt "vr,vr,vk,vk,vr,vr")
> + (smin "vr,vr,vr,vr,vr,vr")
> + (smax "vr,vr,vr,vr,vr,vr")
> + (umin "vr,vr,vr,vr,vr,vr")
> + (umax "vr,vr,vr,vr,vr,vr")
> + (mult "vr,vr,vr,vr,vr,vr")
> + (div "vr,vr,vr,vr,vr,vr")
> + (mod "vr,vr,vr,vr,vr,vr")
> + (udiv "vr,vr,vr,vr,vr,vr")
> + (umod "vr,vr,vr,vr,vr,vr")])
>
> (define_code_attr int_binop_insn_type [
> (plus "vialu")
> @@ -285,9 +285,9 @@
> (udiv "vidiv")
> (umod "vidiv")])
>
> -;; <binop_alt1_insn> expands to the insn name of binop matching constraint alternative = 1.
> +;; <binop_imm_rhs1_insn> expands to the insn name of binop matching constraint rhs1 is immediate.
> ;; minus is negated as vadd and ss_minus is negated as vsadd, others remain <insn>.
> -(define_code_attr binop_alt1_insn [(ashift "sll.vi")
> +(define_code_attr binop_imm_rhs1_insn [(ashift "sll.vi")
> (ashiftrt "sra.vi")
> (lshiftrt "srl.vi")
> (div "div.vv")
> @@ -305,9 +305,9 @@
> (umax "maxu.vv")
> (mult "mul.vv")])
>
> -;; <binop_alt2_insn> expands to the insn name of binop matching constraint alternative = 2.
> +;; <binop_imm_rhs2_insn> expands to the insn name of binop matching constraint rhs2 is immediate.
> ;; minus is reversed as vrsub, others remain <insn>.
> -(define_code_attr binop_alt2_insn [(ashift "sll.vv")
> +(define_code_attr binop_imm_rhs2_insn [(ashift "sll.vv")
> (ashiftrt "sra.vv")
> (lshiftrt "srl.vv")
> (div "div.vv")
> @@ -325,9 +325,9 @@
> (umax "maxu.vv")
> (mult "mul.vv")])
>
> -(define_code_attr binop_alt1_op [(ashift "%3,%4")
> - (ashiftrt "%3,%4")
> - (lshiftrt "%3,%4")
> +(define_code_attr binop_imm_rhs1_op [(ashift "%3,%v4")
> + (ashiftrt "%3,%v4")
> + (lshiftrt "%3,%v4")
> (div "%3,%4")
> (mod "%3,%4")
> (udiv "%3,%4")
> @@ -335,7 +335,7 @@
> (ior "%3,%4")
> (xor "%3,%4")
> (and "%3,%4")
> - (plus "%3,%4")
> + (plus "%3,%v4")
> (minus "%3,%V4")
> (smin "%3,%4")
> (smax "%3,%4")
> @@ -343,7 +343,7 @@
> (umax "%3,%4")
> (mult "%3,%4")])
>
> -(define_code_attr binop_alt2_op [(ashift "%3,%4")
> +(define_code_attr binop_imm_rhs2_op [(ashift "%3,%4")
> (ashiftrt "%3,%4")
> (lshiftrt "%3,%4")
> (div "%3,%4")
> diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
> index 36b0e07728c..5267417465a 100644
> --- a/gcc/config/riscv/vector.md
> +++ b/gcc/config/riscv/vector.md
> @@ -1127,25 +1127,28 @@
> ;; -------------------------------------------------------------------------------
>
> (define_insn "@pred_<optab><mode>"
> - [(set (match_operand:VI 0 "register_operand" "=vr, vr, vr")
> + [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr, vd, vr")
> (if_then_else:VI
> (unspec:<VM>
> - [(match_operand:<VM> 1 "vector_mask_operand" " vmWc1,vmWc1,vmWc1")
> - (match_operand 5 "vector_length_operand" " rK, rK, rK")
> - (match_operand 6 "const_int_operand" " i, i, i")
> - (match_operand 7 "const_int_operand" " i, i, i")
> - (match_operand 8 "const_int_operand" " i, i, i")
> + [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1")
> + (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
> + (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
> + (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
> + (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
> (reg:SI VL_REGNUM)
> (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
> (any_int_binop:VI
> (match_operand:VI 3 "<binop_rhs1_predicate>" "<binop_rhs1_constraint>")
> (match_operand:VI 4 "<binop_rhs2_predicate>" "<binop_rhs2_constraint>"))
> - (match_operand:VI 2 "vector_merge_operand" " 0vu, 0vu, 0vu")))]
> + (match_operand:VI 2 "vector_merge_operand" "0vu,0vu,0vu,0vu,0vu,0vu")))]
> "TARGET_VECTOR"
> "@
> v<insn>.vv\t%0,%3,%4%p1
> - v<binop_alt1_insn>\t%0,<binop_alt1_op>%p1
> - v<binop_alt2_insn>\t%0,<binop_alt2_op>%p1"
> + v<insn>.vv\t%0,%3,%4%p1
> + v<binop_imm_rhs1_insn>\t%0,<binop_imm_rhs1_op>%p1
> + v<binop_imm_rhs1_insn>\t%0,<binop_imm_rhs1_op>%p1
> + v<binop_imm_rhs2_insn>\t%0,<binop_imm_rhs2_op>%p1
> + v<binop_imm_rhs2_insn>\t%0,<binop_imm_rhs2_op>%p1"
> [(set_attr "type" "<int_binop_insn_type>")
> (set_attr "mode" "<MODE>")])
>
> @@ -1154,23 +1157,25 @@
> ;; For vsll.vx/vsra.vx/vsrl.vx the scalar mode should be Pmode wheras the
> ;; scalar mode is inner mode of the RVV mode for other vx patterns.
> (define_insn "@pred_<optab><mode>_scalar"
> - [(set (match_operand:VI 0 "register_operand" "=vr, vr")
> + [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr")
> (if_then_else:VI
> (unspec:<VM>
> - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
> - (match_operand 5 "vector_length_operand" " rK, rK")
> - (match_operand 6 "const_int_operand" " i, i")
> - (match_operand 7 "const_int_operand" " i, i")
> - (match_operand 8 "const_int_operand" " i, i")
> + [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1")
> + (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
> + (match_operand 6 "const_int_operand" " i, i, i, i")
> + (match_operand 7 "const_int_operand" " i, i, i, i")
> + (match_operand 8 "const_int_operand" " i, i, i, i")
> (reg:SI VL_REGNUM)
> (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
> (any_shift:VI
> - (match_operand:VI 3 "register_operand" " vr, vr")
> - (match_operand 4 "pmode_reg_or_uimm5_operand" " r, K"))
> - (match_operand:VI 2 "vector_merge_operand" "0vu, 0vu")))]
> + (match_operand:VI 3 "register_operand" " vr, vr, vr, vr")
> + (match_operand 4 "pmode_reg_or_uimm5_operand" " r, r, K, K"))
> + (match_operand:VI 2 "vector_merge_operand" "0vu,0vu,0vu,0vu")))]
> "TARGET_VECTOR"
> "@
> v<insn>.vx\t%0,%3,%4%p1
> + v<insn>.vx\t%0,%3,%4%p1
> + v<insn>.vi\t%0,%3,%4%p1
> v<insn>.vi\t%0,%3,%4%p1"
> [(set_attr "type" "vshift")
> (set_attr "mode" "<MODE>")])
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-1.c
> index 3ab1ccee035..3372ec1d230 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-1.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-1.c
> @@ -29,7 +29,7 @@ void f1 (void * in, void *out)
> ** vsetivli\tzero,4,e32,m1,ta,ma
> ** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
> -** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> +** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> ** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -52,7 +52,7 @@ void f2 (void * in, void *out)
> ** vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
> ** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
> -** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> +** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> ** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -93,7 +93,7 @@ void f4 (void * in, void *out)
> ** vsetivli\tzero,4,e8,mf8,ta,ma
> ** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
> -** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> +** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> ** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -116,7 +116,7 @@ void f5 (void * in, void *out)
> ** vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
> ** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
> -** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> +** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
> ** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/shift_vx_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/shift_vx_constraint-1.c
> index ae3883c5af9..e40e193220c 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/base/shift_vx_constraint-1.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/shift_vx_constraint-1.c
> @@ -28,9 +28,10 @@ void f1 (void * in, void *out)
> ** vlm.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ...
> ** vsetivli\tzero,4,e32,m1,ta,ma
> +** ...
> ** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
> -** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
> +** vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
> ** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -53,7 +54,7 @@ void f2 (void * in, void *out)
> ** vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
> ** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*17
> -** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*17,\s*v0.t
> +** vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*17,\s*v0.t
> ** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -94,7 +95,7 @@ void f4 (void * in, void *out, size_t x)
> ** vsetivli\tzero,4,e8,mf8,ta,ma
> ** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*5
> -** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*5,\s*v0.t
> +** vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*5,\s*v0.t
> ** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> @@ -117,7 +118,7 @@ void f5 (void * in, void *out)
> ** vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
> ** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
> ** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
> -** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
> +** vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
> ** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
> ** ret
> */
> --
> 2.36.3
>
@@ -229,42 +229,42 @@
(umod "register_operand")])
(define_code_attr binop_rhs1_constraint [
- (plus "vr,vr,vr")
- (minus "vr,vr,vi")
- (ior "vr,vr,vr")
- (xor "vr,vr,vr")
- (and "vr,vr,vr")
- (ashift "vr,vr,vr")
- (ashiftrt "vr,vr,vr")
- (lshiftrt "vr,vr,vr")
- (smin "vr,vr,vr")
- (smax "vr,vr,vr")
- (umin "vr,vr,vr")
- (umax "vr,vr,vr")
- (mult "vr,vr,vr")
- (div "vr,vr,vr")
- (mod "vr,vr,vr")
- (udiv "vr,vr,vr")
- (umod "vr,vr,vr")])
+ (plus "vr,vr,vr,vr,vr,vr")
+ (minus "vr,vr,vr,vr,vi,vi")
+ (ior "vr,vr,vr,vr,vr,vr")
+ (xor "vr,vr,vr,vr,vr,vr")
+ (and "vr,vr,vr,vr,vr,vr")
+ (ashift "vr,vr,vr,vr,vr,vr")
+ (ashiftrt "vr,vr,vr,vr,vr,vr")
+ (lshiftrt "vr,vr,vr,vr,vr,vr")
+ (smin "vr,vr,vr,vr,vr,vr")
+ (smax "vr,vr,vr,vr,vr,vr")
+ (umin "vr,vr,vr,vr,vr,vr")
+ (umax "vr,vr,vr,vr,vr,vr")
+ (mult "vr,vr,vr,vr,vr,vr")
+ (div "vr,vr,vr,vr,vr,vr")
+ (mod "vr,vr,vr,vr,vr,vr")
+ (udiv "vr,vr,vr,vr,vr,vr")
+ (umod "vr,vr,vr,vr,vr,vr")])
(define_code_attr binop_rhs2_constraint [
- (plus "vr,vi,vr")
- (minus "vr,vj,vr")
- (ior "vr,vi,vr")
- (xor "vr,vi,vr")
- (and "vr,vi,vr")
- (ashift "vr,vk,vr")
- (ashiftrt "vr,vk,vr")
- (lshiftrt "vr,vk,vr")
- (smin "vr,vr,vr")
- (smax "vr,vr,vr")
- (umin "vr,vr,vr")
- (umax "vr,vr,vr")
- (mult "vr,vr,vr")
- (div "vr,vr,vr")
- (mod "vr,vr,vr")
- (udiv "vr,vr,vr")
- (umod "vr,vr,vr")])
+ (plus "vr,vr,vi,vi,vr,vr")
+ (minus "vr,vr,vj,vj,vr,vr")
+ (ior "vr,vr,vi,vi,vr,vr")
+ (xor "vr,vr,vi,vi,vr,vr")
+ (and "vr,vr,vi,vi,vr,vr")
+ (ashift "vr,vr,vk,vk,vr,vr")
+ (ashiftrt "vr,vr,vk,vk,vr,vr")
+ (lshiftrt "vr,vr,vk,vk,vr,vr")
+ (smin "vr,vr,vr,vr,vr,vr")
+ (smax "vr,vr,vr,vr,vr,vr")
+ (umin "vr,vr,vr,vr,vr,vr")
+ (umax "vr,vr,vr,vr,vr,vr")
+ (mult "vr,vr,vr,vr,vr,vr")
+ (div "vr,vr,vr,vr,vr,vr")
+ (mod "vr,vr,vr,vr,vr,vr")
+ (udiv "vr,vr,vr,vr,vr,vr")
+ (umod "vr,vr,vr,vr,vr,vr")])
(define_code_attr int_binop_insn_type [
(plus "vialu")
@@ -285,9 +285,9 @@
(udiv "vidiv")
(umod "vidiv")])
-;; <binop_alt1_insn> expands to the insn name of binop matching constraint alternative = 1.
+;; <binop_imm_rhs1_insn> expands to the insn name of binop matching constraint rhs1 is immediate.
;; minus is negated as vadd and ss_minus is negated as vsadd, others remain <insn>.
-(define_code_attr binop_alt1_insn [(ashift "sll.vi")
+(define_code_attr binop_imm_rhs1_insn [(ashift "sll.vi")
(ashiftrt "sra.vi")
(lshiftrt "srl.vi")
(div "div.vv")
@@ -305,9 +305,9 @@
(umax "maxu.vv")
(mult "mul.vv")])
-;; <binop_alt2_insn> expands to the insn name of binop matching constraint alternative = 2.
+;; <binop_imm_rhs2_insn> expands to the insn name of binop matching constraint rhs2 is immediate.
;; minus is reversed as vrsub, others remain <insn>.
-(define_code_attr binop_alt2_insn [(ashift "sll.vv")
+(define_code_attr binop_imm_rhs2_insn [(ashift "sll.vv")
(ashiftrt "sra.vv")
(lshiftrt "srl.vv")
(div "div.vv")
@@ -325,9 +325,9 @@
(umax "maxu.vv")
(mult "mul.vv")])
-(define_code_attr binop_alt1_op [(ashift "%3,%4")
- (ashiftrt "%3,%4")
- (lshiftrt "%3,%4")
+(define_code_attr binop_imm_rhs1_op [(ashift "%3,%v4")
+ (ashiftrt "%3,%v4")
+ (lshiftrt "%3,%v4")
(div "%3,%4")
(mod "%3,%4")
(udiv "%3,%4")
@@ -335,7 +335,7 @@
(ior "%3,%4")
(xor "%3,%4")
(and "%3,%4")
- (plus "%3,%4")
+ (plus "%3,%v4")
(minus "%3,%V4")
(smin "%3,%4")
(smax "%3,%4")
@@ -343,7 +343,7 @@
(umax "%3,%4")
(mult "%3,%4")])
-(define_code_attr binop_alt2_op [(ashift "%3,%4")
+(define_code_attr binop_imm_rhs2_op [(ashift "%3,%4")
(ashiftrt "%3,%4")
(lshiftrt "%3,%4")
(div "%3,%4")
@@ -1127,25 +1127,28 @@
;; -------------------------------------------------------------------------------
(define_insn "@pred_<optab><mode>"
- [(set (match_operand:VI 0 "register_operand" "=vr, vr, vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr, vd, vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vmWc1,vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_int_binop:VI
(match_operand:VI 3 "<binop_rhs1_predicate>" "<binop_rhs1_constraint>")
(match_operand:VI 4 "<binop_rhs2_predicate>" "<binop_rhs2_constraint>"))
- (match_operand:VI 2 "vector_merge_operand" " 0vu, 0vu, 0vu")))]
+ (match_operand:VI 2 "vector_merge_operand" "0vu,0vu,0vu,0vu,0vu,0vu")))]
"TARGET_VECTOR"
"@
v<insn>.vv\t%0,%3,%4%p1
- v<binop_alt1_insn>\t%0,<binop_alt1_op>%p1
- v<binop_alt2_insn>\t%0,<binop_alt2_op>%p1"
+ v<insn>.vv\t%0,%3,%4%p1
+ v<binop_imm_rhs1_insn>\t%0,<binop_imm_rhs1_op>%p1
+ v<binop_imm_rhs1_insn>\t%0,<binop_imm_rhs1_op>%p1
+ v<binop_imm_rhs2_insn>\t%0,<binop_imm_rhs2_op>%p1
+ v<binop_imm_rhs2_insn>\t%0,<binop_imm_rhs2_op>%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -1154,23 +1157,25 @@
;; For vsll.vx/vsra.vx/vsrl.vx the scalar mode should be Pmode wheras the
;; scalar mode is inner mode of the RVV mode for other vx patterns.
(define_insn "@pred_<optab><mode>_scalar"
- [(set (match_operand:VI 0 "register_operand" "=vr, vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_shift:VI
- (match_operand:VI 3 "register_operand" " vr, vr")
- (match_operand 4 "pmode_reg_or_uimm5_operand" " r, K"))
- (match_operand:VI 2 "vector_merge_operand" "0vu, 0vu")))]
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr")
+ (match_operand 4 "pmode_reg_or_uimm5_operand" " r, r, K, K"))
+ (match_operand:VI 2 "vector_merge_operand" "0vu,0vu,0vu,0vu")))]
"TARGET_VECTOR"
"@
v<insn>.vx\t%0,%3,%4%p1
+ v<insn>.vx\t%0,%3,%4%p1
+ v<insn>.vi\t%0,%3,%4%p1
v<insn>.vi\t%0,%3,%4%p1"
[(set_attr "type" "vshift")
(set_attr "mode" "<MODE>")])
@@ -29,7 +29,7 @@ void f1 (void * in, void *out)
** vsetivli\tzero,4,e32,m1,ta,ma
** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
-** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -52,7 +52,7 @@ void f2 (void * in, void *out)
** vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
-** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -93,7 +93,7 @@ void f4 (void * in, void *out)
** vsetivli\tzero,4,e8,mf8,ta,ma
** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
-** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -116,7 +116,7 @@ void f5 (void * in, void *out)
** vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
-** vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -28,9 +28,10 @@ void f1 (void * in, void *out)
** vlm.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
** vsetivli\tzero,4,e32,m1,ta,ma
+** ...
** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
-** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+** vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -53,7 +54,7 @@ void f2 (void * in, void *out)
** vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*17
-** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*17,\s*v0.t
+** vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*17,\s*v0.t
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -94,7 +95,7 @@ void f4 (void * in, void *out, size_t x)
** vsetivli\tzero,4,e8,mf8,ta,ma
** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*5
-** vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*5,\s*v0.t
+** vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*5,\s*v0.t
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
@@ -117,7 +118,7 @@ void f5 (void * in, void *out)
** vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
** vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
-** vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+** vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/