@@ -3768,27 +3768,28 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_dual_widen_<any_widen_binop:optab><any_extend:su><mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_widen_binop:VWEXTI
(any_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
(any_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vw<any_widen_binop:insn><any_extend:u>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_single_widen_sub<any_extend:su><mode>"
[(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
@@ -3877,27 +3878,28 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_widen_mulsu<mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(mult:VWEXTI
(sign_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
(zero_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vwmulsu.vx\t%0,%3,%z4%p1"
[(set_attr "type" "viwmul")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
;; vwcvt<u>.x.x.v
(define_insn "@pred_<optab><mode>"
@@ -7037,31 +7039,32 @@
(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
(define_insn "@pred_dual_widen_<optab><mode>_scalar"
- [(set (match_operand:VWEXTF 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWEXTF 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWEXTF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
- (match_operand 9 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
(any_widen_binop:VWEXTF
(float_extend:VWEXTF
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
(float_extend:VWEXTF
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "register_operand" " f, f"))))
- (match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "register_operand" " f, f, f, f, f, f, f, f"))))
+ (match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vfw<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vf<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_single_widen_add<mode>"
[(set (match_operand:VWEXTF 0 "register_operand" "=&vr, &vr")
new file mode 100644
@@ -0,0 +1,188 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwadd_vx_i16m2 (v0, 33, vl);
+ vint16m2_t vw1 = __riscv_vwadd_vx_i16m2 (v1, 33, vl);
+ vint16m2_t vw2 = __riscv_vwadd_vx_i16m2 (v2, 33, vl);
+ vint16m2_t vw3 = __riscv_vwadd_vx_i16m2 (v3, 33, vl);
+ vint16m2_t vw4 = __riscv_vwadd_vx_i16m2 (v4, 33, vl);
+ vint16m2_t vw5 = __riscv_vwadd_vx_i16m2 (v5, 33, vl);
+ vint16m2_t vw6 = __riscv_vwadd_vx_i16m2 (v6, 33, vl);
+ vint16m2_t vw7 = __riscv_vwadd_vx_i16m2 (v7, 33, vl);
+ vint16m2_t vw8 = __riscv_vwadd_vx_i16m2 (v8, 33, vl);
+ vint16m2_t vw9 = __riscv_vwadd_vx_i16m2 (v9, 33, vl);
+ vint16m2_t vw10 = __riscv_vwadd_vx_i16m2 (v10, 33, vl);
+ vint16m2_t vw11 = __riscv_vwadd_vx_i16m2 (v11, 33, vl);
+ vint16m2_t vw12 = __riscv_vwadd_vx_i16m2 (v12, 33, vl);
+ vint16m2_t vw13 = __riscv_vwadd_vx_i16m2 (v13, 33, vl);
+ vint16m2_t vw14 = __riscv_vwadd_vx_i16m2 (v14, 33, vl);
+ vint16m2_t vw15 = __riscv_vwadd_vx_i16m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwmulsu_vx_i16m2 (v0, 33, vl);
+ vint16m2_t vw1 = __riscv_vwmulsu_vx_i16m2 (v1, 33, vl);
+ vint16m2_t vw2 = __riscv_vwmulsu_vx_i16m2 (v2, 33, vl);
+ vint16m2_t vw3 = __riscv_vwmulsu_vx_i16m2 (v3, 33, vl);
+ vint16m2_t vw4 = __riscv_vwmulsu_vx_i16m2 (v4, 33, vl);
+ vint16m2_t vw5 = __riscv_vwmulsu_vx_i16m2 (v5, 33, vl);
+ vint16m2_t vw6 = __riscv_vwmulsu_vx_i16m2 (v6, 33, vl);
+ vint16m2_t vw7 = __riscv_vwmulsu_vx_i16m2 (v7, 33, vl);
+ vint16m2_t vw8 = __riscv_vwmulsu_vx_i16m2 (v8, 33, vl);
+ vint16m2_t vw9 = __riscv_vwmulsu_vx_i16m2 (v9, 33, vl);
+ vint16m2_t vw10 = __riscv_vwmulsu_vx_i16m2 (v10, 33, vl);
+ vint16m2_t vw11 = __riscv_vwmulsu_vx_i16m2 (v11, 33, vl);
+ vint16m2_t vw12 = __riscv_vwmulsu_vx_i16m2 (v12, 33, vl);
+ vint16m2_t vw13 = __riscv_vwmulsu_vx_i16m2 (v13, 33, vl);
+ vint16m2_t vw14 = __riscv_vwmulsu_vx_i16m2 (v14, 33, vl);
+ vint16m2_t vw15 = __riscv_vwmulsu_vx_i16m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vwadd_vx_i16m4 (v0, 55, vl);
+ vint16m4_t vw1 = __riscv_vwadd_vx_i16m4 (v1, 55, vl);
+ vint16m4_t vw2 = __riscv_vwadd_vx_i16m4 (v2, 55, vl);
+ vint16m4_t vw3 = __riscv_vwadd_vx_i16m4 (v3, 55, vl);
+ vint16m4_t vw4 = __riscv_vwadd_vx_i16m4 (v4, 55, vl);
+ vint16m4_t vw5 = __riscv_vwadd_vx_i16m4 (v5, 55, vl);
+ vint16m4_t vw6 = __riscv_vwadd_vx_i16m4 (v6, 55, vl);
+ vint16m4_t vw7 = __riscv_vwadd_vx_i16m4 (v7, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vwmulsu_vx_i16m4 (v0, 55, vl);
+ vint16m4_t vw1 = __riscv_vwmulsu_vx_i16m4 (v1, 55, vl);
+ vint16m4_t vw2 = __riscv_vwmulsu_vx_i16m4 (v2, 55, vl);
+ vint16m4_t vw3 = __riscv_vwmulsu_vx_i16m4 (v3, 55, vl);
+ vint16m4_t vw4 = __riscv_vwmulsu_vx_i16m4 (v4, 55, vl);
+ vint16m4_t vw5 = __riscv_vwmulsu_vx_i16m4 (v5, 55, vl);
+ vint16m4_t vw6 = __riscv_vwmulsu_vx_i16m4 (v6, 55, vl);
+ vint16m4_t vw7 = __riscv_vwmulsu_vx_i16m4 (v7, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vwadd_vx_i16m8 (v0, 66, vl);
+ vint16m8_t vw1 = __riscv_vwadd_vx_i16m8 (v1, 66, vl);
+ vint16m8_t vw2 = __riscv_vwadd_vx_i16m8 (v2, 66, vl);
+ vint16m8_t vw3 = __riscv_vwadd_vx_i16m8 (v3, 66, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vwmulsu_vx_i16m8 (v0, 66, vl);
+ vint16m8_t vw1 = __riscv_vwmulsu_vx_i16m8 (v1, 66, vl);
+ vint16m8_t vw2 = __riscv_vwmulsu_vx_i16m8 (v2, 66, vl);
+ vint16m8_t vw3 = __riscv_vwmulsu_vx_i16m8 (v3, 66, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwadd_vf_f64m2 (v0, 33, vl);
+ vfloat64m2_t vw1 = __riscv_vfwadd_vf_f64m2 (v1, 33, vl);
+ vfloat64m2_t vw2 = __riscv_vfwadd_vf_f64m2 (v2, 33, vl);
+ vfloat64m2_t vw3 = __riscv_vfwadd_vf_f64m2 (v3, 33, vl);
+ vfloat64m2_t vw4 = __riscv_vfwadd_vf_f64m2 (v4, 33, vl);
+ vfloat64m2_t vw5 = __riscv_vfwadd_vf_f64m2 (v5, 33, vl);
+ vfloat64m2_t vw6 = __riscv_vfwadd_vf_f64m2 (v6, 33, vl);
+ vfloat64m2_t vw7 = __riscv_vfwadd_vf_f64m2 (v7, 33, vl);
+ vfloat64m2_t vw8 = __riscv_vfwadd_vf_f64m2 (v8, 33, vl);
+ vfloat64m2_t vw9 = __riscv_vfwadd_vf_f64m2 (v9, 33, vl);
+ vfloat64m2_t vw10 = __riscv_vfwadd_vf_f64m2 (v10, 33, vl);
+ vfloat64m2_t vw11 = __riscv_vfwadd_vf_f64m2 (v11, 33, vl);
+ vfloat64m2_t vw12 = __riscv_vfwadd_vf_f64m2 (v12, 33, vl);
+ vfloat64m2_t vw13 = __riscv_vfwadd_vf_f64m2 (v13, 33, vl);
+ vfloat64m2_t vw14 = __riscv_vfwadd_vf_f64m2 (v14, 33, vl);
+ vfloat64m2_t vw15 = __riscv_vfwadd_vf_f64m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ size_t sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ size_t sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ size_t sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ size_t sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ size_t sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ size_t sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ size_t sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ size_t sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwadd_vf_f64m4 (v0, 33, vl);
+ vfloat64m4_t vw1 = __riscv_vfwadd_vf_f64m4 (v1, 33, vl);
+ vfloat64m4_t vw2 = __riscv_vfwadd_vf_f64m4 (v2, 33, vl);
+ vfloat64m4_t vw3 = __riscv_vfwadd_vf_f64m4 (v3, 33, vl);
+ vfloat64m4_t vw4 = __riscv_vfwadd_vf_f64m4 (v4, 33, vl);
+ vfloat64m4_t vw5 = __riscv_vfwadd_vf_f64m4 (v5, 33, vl);
+ vfloat64m4_t vw6 = __riscv_vfwadd_vf_f64m4 (v6, 33, vl);
+ vfloat64m4_t vw7 = __riscv_vfwadd_vf_f64m4 (v7, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwadd_vf_f64m8 (v0, 33, vl);
+ vfloat64m8_t vw1 = __riscv_vfwadd_vf_f64m8 (v1, 33, vl);
+ vfloat64m8_t vw2 = __riscv_vfwadd_vf_f64m8 (v2, 33, vl);
+ vfloat64m8_t vw3 = __riscv_vfwadd_vf_f64m8 (v3, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */