@@ -2223,67 +2223,70 @@
;; DEST eew is greater than SOURCE eew.
(define_insn "@pred_indexed_<order>load<mode>_x2_greater_eew"
- [(set (match_operand:VEEWEXT2 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT2 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT2
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT2
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")] ORDER)
+ (match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<double_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_indexed_<order>load<mode>_x4_greater_eew"
- [(set (match_operand:VEEWEXT4 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT4 0 "register_operand" "=vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT4
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT4
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" " W43, W43, W86, W86, vr, vr")] ORDER)
+ (match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<quad_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W43,W43,W86,W86,none,none")])
(define_insn "@pred_indexed_<order>load<mode>_x8_greater_eew"
- [(set (match_operand:VEEWEXT8 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT8 0 "register_operand" "=vr, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT8
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT8
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" " W87, W87, vr, vr")] ORDER)
+ (match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<oct_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W87,W87,none,none")])
;; DEST eew is smaller than SOURCE eew.
(define_insn "@pred_indexed_<order>load<mode>_x2_smaller_eew"
new file mode 100644
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v4 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v5 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v6 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v7 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v8 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v9 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v10 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v11 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v12 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v13 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v14 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v15 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vluxei8_v_i16m2 ((void *) it, v0, vl);
+ vint16m2_t vw1 = __riscv_vluxei8_v_i16m2 ((void *) it, v1, vl);
+ vint16m2_t vw2 = __riscv_vluxei8_v_i16m2 ((void *) it, v2, vl);
+ vint16m2_t vw3 = __riscv_vluxei8_v_i16m2 ((void *) it, v3, vl);
+ vint16m2_t vw4 = __riscv_vluxei8_v_i16m2 ((void *) it, v4, vl);
+ vint16m2_t vw5 = __riscv_vluxei8_v_i16m2 ((void *) it, v5, vl);
+ vint16m2_t vw6 = __riscv_vluxei8_v_i16m2 ((void *) it, v6, vl);
+ vint16m2_t vw7 = __riscv_vluxei8_v_i16m2 ((void *) it, v7, vl);
+ vint16m2_t vw8 = __riscv_vluxei8_v_i16m2 ((void *) it, v8, vl);
+ vint16m2_t vw9 = __riscv_vluxei8_v_i16m2 ((void *) it, v9, vl);
+ vint16m2_t vw10 = __riscv_vluxei8_v_i16m2 ((void *) it, v10, vl);
+ vint16m2_t vw11 = __riscv_vluxei8_v_i16m2 ((void *) it, v11, vl);
+ vint16m2_t vw12 = __riscv_vluxei8_v_i16m2 ((void *) it, v12, vl);
+ vint16m2_t vw13 = __riscv_vluxei8_v_i16m2 ((void *) it, v13, vl);
+ vint16m2_t vw14 = __riscv_vluxei8_v_i16m2 ((void *) it, v14, vl);
+ vint16m2_t vw15 = __riscv_vluxei8_v_i16m2 ((void *) it, v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m2_t v0 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v1 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v2 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v3 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v4 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v5 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v6 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v7 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vluxei8_v_i16m4 ((void *) it, v0, vl);
+ vint16m4_t vw1 = __riscv_vluxei8_v_i16m4 ((void *) it, v1, vl);
+ vint16m4_t vw2 = __riscv_vluxei8_v_i16m4 ((void *) it, v2, vl);
+ vint16m4_t vw3 = __riscv_vluxei8_v_i16m4 ((void *) it, v3, vl);
+ vint16m4_t vw4 = __riscv_vluxei8_v_i16m4 ((void *) it, v4, vl);
+ vint16m4_t vw5 = __riscv_vluxei8_v_i16m4 ((void *) it, v5, vl);
+ vint16m4_t vw6 = __riscv_vluxei8_v_i16m4 ((void *) it, v6, vl);
+ vint16m4_t vw7 = __riscv_vluxei8_v_i16m4 ((void *) it, v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m4_t v0 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v1 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v2 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v3 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vluxei8_v_i16m8 ((void *) it, v0, vl);
+ vint16m8_t vw1 = __riscv_vluxei8_v_i16m8 ((void *) it, v1, vl);
+ vint16m8_t vw2 = __riscv_vluxei8_v_i16m8 ((void *) it, v2, vl);
+ vint16m8_t vw3 = __riscv_vluxei8_v_i16m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v4 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v5 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v6 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v7 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m4_t vw0 = __riscv_vluxei8_v_i32m4 ((void *) it, v0, vl);
+ vint32m4_t vw1 = __riscv_vluxei8_v_i32m4 ((void *) it, v1, vl);
+ vint32m4_t vw2 = __riscv_vluxei8_v_i32m4 ((void *) it, v2, vl);
+ vint32m4_t vw3 = __riscv_vluxei8_v_i32m4 ((void *) it, v3, vl);
+ vint32m4_t vw4 = __riscv_vluxei8_v_i32m4 ((void *) it, v4, vl);
+ vint32m4_t vw5 = __riscv_vluxei8_v_i32m4 ((void *) it, v5, vl);
+ vint32m4_t vw6 = __riscv_vluxei8_v_i32m4 ((void *) it, v6, vl);
+ vint32m4_t vw7 = __riscv_vluxei8_v_i32m4 ((void *) it, v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m4_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m4_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m4_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m4_i32 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i32m4_i32 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i32m4_i32 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i32m4_i32 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i32m4_i32 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m2_t v0 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v1 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v2 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v3 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m8_t vw0 = __riscv_vluxei8_v_i32m8 ((void *) it, v0, vl);
+ vint32m8_t vw1 = __riscv_vluxei8_v_i32m8 ((void *) it, v1, vl);
+ vint32m8_t vw2 = __riscv_vluxei8_v_i32m8 ((void *) it, v2, vl);
+ vint32m8_t vw3 = __riscv_vluxei8_v_i32m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m8_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m8_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m8_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m8_i32 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m8_t vw0 = __riscv_vluxei8_v_i64m8 ((void *) it, v0, vl);
+ vint64m8_t vw1 = __riscv_vluxei8_v_i64m8 ((void *) it, v1, vl);
+ vint64m8_t vw2 = __riscv_vluxei8_v_i64m8 ((void *) it, v2, vl);
+ vint64m8_t vw3 = __riscv_vluxei8_v_i64m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i64m8_i64 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i64m8_i64 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i64m8_i64 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i64m8_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */