@@ -205,6 +205,7 @@ enum vlen_enum
};
bool slide1_sew64_helper (int, machine_mode, machine_mode,
machine_mode, rtx *);
+rtx gen_avl_for_scalar_move (rtx);
}
/* We classify builtin types into two classes:
@@ -701,4 +701,22 @@ slide1_sew64_helper (int unspec, machine_mode mode, machine_mode demote_mode,
return true;
}
+rtx
+gen_avl_for_scalar_move (rtx avl)
+{
+ if (CONST_INT_P (avl))
+ {
+ if (rtx_equal_p (avl, const0_rtx))
+ return const0_rtx;
+ else
+ return const1_rtx;
+ }
+ else
+ {
+ rtx tmp = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (tmp, gen_rtx_AND (Pmode, avl, const1_rtx)));
+ return tmp;
+ }
+}
+
} // namespace riscv_vector
@@ -1229,9 +1229,7 @@
else if (GET_MODE_BITSIZE (<VEL>mode) > GET_MODE_BITSIZE (Pmode))
{
// Case 2: vmv.s.x (TU) ==> andi vl + vlse.v (TU) in RV32 system.
- rtx tmp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (tmp, gen_rtx_AND (Pmode, operands[4], const1_rtx)));
- operands[4] = tmp;
+ operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (<VM>mode);
}
else
@@ -1292,9 +1290,7 @@
vlse64.v */
if (satisfies_constraint_Wb1 (operands[1]))
{
- rtx tmp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (tmp, gen_rtx_AND (Pmode, operands[4], const1_rtx)));
- operands[4] = tmp;
+ operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (<VM>mode);
}
}
@@ -4421,25 +4417,26 @@
})
(define_insn "*pred_madd<mode>"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI
(mult:VI
- (match_operand:VI 2 "register_operand" " 0, 0, vr")
- (match_operand:VI 3 "register_operand" " vr, vr, vr"))
- (match_operand:VI 4 "register_operand" " vr, vr, vr"))
+ (match_operand:VI 2 "register_operand" " 0, vr, 0, vr")
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VI 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 2)))]
"TARGET_VECTOR"
"@
vmadd.vv\t%0,%3,%4%p1
+ vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
vmadd.vv\t%0,%3,%4%p1
vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
@@ -4451,25 +4448,26 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_macc<mode>"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI
(mult:VI
- (match_operand:VI 2 "register_operand" " vr, vr, vr")
- (match_operand:VI 3 "register_operand" " vr, vr, vr"))
- (match_operand:VI 4 "register_operand" " 0, 0, vr"))
+ (match_operand:VI 2 "register_operand" " vr, vr, vr, vr")
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VI 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vmacc.vv\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1
vmacc.vv\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -4555,26 +4553,27 @@
})
(define_insn "*pred_madd<mode>_scalar"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI
(mult:VI
(vec_duplicate:VI
- (match_operand:<VEL> 2 "register_operand" " r, r, vr"))
- (match_operand:VI 3 "register_operand" " 0, 0, vr"))
- (match_operand:VI 4 "register_operand" " vr, vr, vr"))
+ (match_operand:<VEL> 2 "register_operand" " r, r, r, r"))
+ (match_operand:VI 3 "register_operand" " 0, vr, 0, vr"))
+ (match_operand:VI 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vmadd.vx\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
@@ -4586,26 +4585,27 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_macc<mode>_scalar"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI
(mult:VI
(vec_duplicate:VI
- (match_operand:<VEL> 2 "register_operand" " r, r, vr"))
- (match_operand:VI 3 "register_operand" " vr, vr, vr"))
- (match_operand:VI 4 "register_operand" " 0, 0, vr"))
+ (match_operand:<VEL> 2 "register_operand" " r, r, r, r"))
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VI 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vmacc.vx\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -4699,27 +4699,28 @@
})
(define_insn "*pred_madd<mode>_extended_scalar"
- [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI_D 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI_D
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI_D
(mult:VI_D
(vec_duplicate:VI_D
(sign_extend:<VEL>
- (match_operand:<VSUBEL> 2 "register_operand" " r, r, vr")))
- (match_operand:VI_D 3 "register_operand" " 0, 0, vr"))
- (match_operand:VI_D 4 "register_operand" " vr, vr, vr"))
+ (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
+ (match_operand:VI_D 3 "register_operand" " 0, vr, 0, vr"))
+ (match_operand:VI_D 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vmadd.vx\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
@@ -4731,27 +4732,28 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_macc<mode>_extended_scalar"
- [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI_D 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI_D
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VI_D
(mult:VI_D
(vec_duplicate:VI_D
(sign_extend:<VEL>
- (match_operand:<VSUBEL> 2 "register_operand" " r, r, vr")))
- (match_operand:VI_D 3 "register_operand" " vr, vr, vr"))
- (match_operand:VI_D 4 "register_operand" " 0, 0, vr"))
+ (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
+ (match_operand:VI_D 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VI_D 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vmacc.vx\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -4836,25 +4838,26 @@
})
(define_insn "*pred_nmsub<mode>"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI
- (match_operand:VI 4 "register_operand" " vr, vr, vr")
+ (match_operand:VI 4 "register_operand" " vr, vr, vr, vr")
(mult:VI
- (match_operand:VI 2 "register_operand" " 0, 0, vr")
- (match_operand:VI 3 "register_operand" " vr, vr, vr")))
+ (match_operand:VI 2 "register_operand" " 0, vr, 0, vr")
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr")))
(match_dup 2)))]
"TARGET_VECTOR"
"@
vnmsub.vv\t%0,%3,%4%p1
+ vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
vnmsub.vv\t%0,%3,%4%p1
vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
@@ -4866,25 +4869,26 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_nmsac<mode>"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI
- (match_operand:VI 4 "register_operand" " 0, 0, vr")
+ (match_operand:VI 4 "register_operand" " 0, vr, 0, vr")
(mult:VI
- (match_operand:VI 2 "register_operand" " vr, vr, vr")
- (match_operand:VI 3 "register_operand" " vr, vr, vr")))
+ (match_operand:VI 2 "register_operand" " vr, vr, vr, vr")
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr")))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vnmsac.vv\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
vnmsac.vv\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -4970,26 +4974,27 @@
})
(define_insn "*pred_nmsub<mode>_scalar"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI
- (match_operand:VI 4 "register_operand" " vr, vr, vr")
+ (match_operand:VI 4 "register_operand" " vr, vr, vr, vr")
(mult:VI
(vec_duplicate:VI
- (match_operand:<VEL> 2 "register_operand" " r, r, vr"))
- (match_operand:VI 3 "register_operand" " 0, 0, vr")))
+ (match_operand:<VEL> 2 "register_operand" " r, r, r, r"))
+ (match_operand:VI 3 "register_operand" " 0, vr, 0, vr")))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vnmsub.vx\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
@@ -5001,26 +5006,27 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_nmsac<mode>_scalar"
- [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI
- (match_operand:VI 4 "register_operand" " 0, 0, vr")
+ (match_operand:VI 4 "register_operand" " 0, vr, 0, vr")
(mult:VI
(vec_duplicate:VI
- (match_operand:<VEL> 2 "register_operand" " r, r, vr"))
- (match_operand:VI 3 "register_operand" " vr, vr, vr")))
+ (match_operand:<VEL> 2 "register_operand" " r, r, r, r"))
+ (match_operand:VI 3 "register_operand" " vr, vr, vr, vr")))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vnmsac.vx\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -5114,27 +5120,28 @@
})
(define_insn "*pred_nmsub<mode>_extended_scalar"
- [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI_D 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI_D
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI_D
- (match_operand:VI_D 4 "register_operand" " vr, vr, vr")
+ (match_operand:VI_D 4 "register_operand" " vr, vr, vr, vr")
(mult:VI_D
(vec_duplicate:VI_D
(sign_extend:<VEL>
- (match_operand:<VSUBEL> 2 "register_operand" " r, r, vr")))
- (match_operand:VI_D 3 "register_operand" " 0, 0, vr")))
+ (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
+ (match_operand:VI_D 3 "register_operand" " 0, vr, 0, vr")))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vnmsub.vx\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
@@ -5146,27 +5153,28 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_nmsac<mode>_extended_scalar"
- [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VI_D 0 "register_operand" "=vd,?&vd, vr,?&vr")
(if_then_else:VI_D
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(minus:VI_D
- (match_operand:VI_D 4 "register_operand" " 0, 0, vr")
+ (match_operand:VI_D 4 "register_operand" " 0, vr, 0, vr")
(mult:VI_D
(vec_duplicate:VI_D
(sign_extend:<VEL>
- (match_operand:<VSUBEL> 2 "register_operand" " r, r, vr")))
- (match_operand:VI_D 3 "register_operand" " vr, vr, vr")))
+ (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
+ (match_operand:VI_D 3 "register_operand" " vr, vr, vr, vr")))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vnmsac.vx\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
@@ -5709,25 +5717,26 @@
})
(define_insn "*pred_<madd_msub><mode>"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(mult:VF
- (match_operand:VF 2 "register_operand" " 0, 0, vr")
- (match_operand:VF 3 "register_operand" " vr, vr, vr"))
- (match_operand:VF 4 "register_operand" " vr, vr, vr"))
+ (match_operand:VF 2 "register_operand" " 0, vr, 0, vr")
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VF 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 2)))]
"TARGET_VECTOR"
"@
vf<madd_msub>.vv\t%0,%3,%4%p1
+ vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
vf<madd_msub>.vv\t%0,%3,%4%p1
vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
@@ -5739,25 +5748,26 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_<macc_msac><mode>"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(mult:VF
- (match_operand:VF 2 "register_operand" " vr, vr, vr")
- (match_operand:VF 3 "register_operand" " vr, vr, vr"))
- (match_operand:VF 4 "register_operand" " 0, 0, vr"))
+ (match_operand:VF 2 "register_operand" " vr, vr, vr, vr")
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VF 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vf<macc_msac>.vv\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
vf<macc_msac>.vv\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
@@ -5825,26 +5835,27 @@
{})
(define_insn "*pred_<madd_msub><mode>_scalar"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(mult:VF
(vec_duplicate:VF
- (match_operand:<VEL> 2 "register_operand" " f, f, vr"))
- (match_operand:VF 3 "register_operand" " 0, 0, vr"))
- (match_operand:VF 4 "register_operand" " vr, vr, vr"))
+ (match_operand:<VEL> 2 "register_operand" " f, f, f, f"))
+ (match_operand:VF 3 "register_operand" " 0, vr, 0, vr"))
+ (match_operand:VF 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vf<madd_msub>.vf\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vf<madd_msub>.vf\t%0,%2,%4%p1
vf<madd_msub>.vf\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vf<madd_msub>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
@@ -5856,26 +5867,27 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_<macc_msac><mode>_scalar"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(mult:VF
(vec_duplicate:VF
- (match_operand:<VEL> 2 "register_operand" " f, f, vr"))
- (match_operand:VF 3 "register_operand" " vr, vr, vr"))
- (match_operand:VF 4 "register_operand" " 0, 0, vr"))
+ (match_operand:<VEL> 2 "register_operand" " f, f, f, f"))
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
+ (match_operand:VF 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vf<macc_msac>.vf\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
vf<macc_msac>.vf\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
@@ -5948,26 +5960,27 @@
})
(define_insn "*pred_<nmsub_nmadd><mode>"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(neg:VF
(mult:VF
- (match_operand:VF 2 "register_operand" " 0, 0, vr")
- (match_operand:VF 3 "register_operand" " vr, vr, vr")))
- (match_operand:VF 4 "register_operand" " vr, vr, vr"))
+ (match_operand:VF 2 "register_operand" " 0, vr, 0, vr")
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr")))
+ (match_operand:VF 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 2)))]
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
@@ -5979,26 +5992,27 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_<nmsac_nmacc><mode>"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(neg:VF
(mult:VF
- (match_operand:VF 2 "register_operand" " vr, vr, vr")
- (match_operand:VF 3 "register_operand" " vr, vr, vr")))
- (match_operand:VF 4 "register_operand" " 0, 0, vr"))
+ (match_operand:VF 2 "register_operand" " vr, vr, vr, vr")
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr")))
+ (match_operand:VF 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
@@ -6068,27 +6082,28 @@
{})
(define_insn "*pred_<nmsub_nmadd><mode>_scalar"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(neg:VF
(mult:VF
(vec_duplicate:VF
- (match_operand:<VEL> 2 "register_operand" " f, f, vr"))
- (match_operand:VF 3 "register_operand" " 0, 0, vr")))
- (match_operand:VF 4 "register_operand" " vr, vr, vr"))
+ (match_operand:<VEL> 2 "register_operand" " f, f, f, f"))
+ (match_operand:VF 3 "register_operand" " 0, vr, 0, vr")))
+ (match_operand:VF 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 3)))]
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
@@ -6100,27 +6115,28 @@
(set (attr "avl_type") (symbol_ref "INTVAL (operands[8])"))])
(define_insn "*pred_<nmsac_nmacc><mode>_scalar"
- [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr")
+ [(set (match_operand:VF 0 "register_operand" "=vd, ?&vd, vr, ?&vr")
(if_then_else:VF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK, rK")
- (match_operand 6 "const_int_operand" " i, i, i")
- (match_operand 7 "const_int_operand" " i, i, i")
- (match_operand 8 "const_int_operand" " i, i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm, vm,Wc1, Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VF
(neg:VF
(mult:VF
(vec_duplicate:VF
- (match_operand:<VEL> 2 "register_operand" " f, f, vr"))
- (match_operand:VF 3 "register_operand" " vr, vr, vr")))
- (match_operand:VF 4 "register_operand" " 0, 0, vr"))
+ (match_operand:<VEL> 2 "register_operand" " f, f, f, f"))
+ (match_operand:VF 3 "register_operand" " vr, vr, vr, vr")))
+ (match_operand:VF 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
new file mode 100644
@@ -0,0 +1,146 @@
+/* { dg-do compile { target { riscv_vector } } } */
+
+#include<cmath>
+#include<iomanip>
+#include<iostream>
+#include"riscv_vector.h"
+ using std::cerr;
+ using std::endl;
+ using float32_t = float;
+ template<class T, class T2> constexpr T uint_to_float(T2 val) noexcept {
+ return *reinterpret_cast<T*>(&val);
+ }
+ constexpr const auto &f32(uint_to_float<float32_t, uint32_t>);
+ template<class T> struct To_uint {
+ };
+ template<class T> struct To_float {
+ };
+ template <class T> bool __attribute__((noinline)) check(const T *a, const T *b, size_t size) {
+ bool rv = true;
+ return rv;
+ }
+ int main() {
+ int return_value = 0;
+ size_t var_130 = 16u;
+ int64_t var_129 [] = {6407112605923540992, 7272647525046157312};
+ size_t var_124 = 8u;
+ uint64_t var_123 [] = {9906865658538243999u};
+ uint8_t var_115 [] = {110u};
+ uint8_t var_114 [] = {250u};
+ uint8_t var_113 [] = {85u};
+ uint64_t var_112 [] = {10471272766982500600u};
+ int16_t var_111 [] = {-2006};
+ int16_t var_110 [] = {-4378};
+ int32_t var_109 [] = {1647339684};
+ int32_t var_108 [] = {-45182706};
+ float32_t var_107 [] = {f32(3074604095u)};
+ float32_t var_106 [] = {f32(2164875881u)};
+ uint64_t var_105 [] = {9667821316341385629u, 8665853697871192080u, 2296727757870876501u, 9319690956375735270u};
+ uint64_t var_104 [] = {10229777523837770056u, 5058114244293053252u, 4016718152548898966u, 2756762132515514864u, 12979562465336598027u, 9351327142878884765u, 10169532824221885571u, 16684712895996566663u, 17729467819489264236u, 13842535705175483943u, 16071673669189979410u, 4743566331245526950u, 18376697588466642016u};
+ uint64_t var_103 [] = {2157995689846407707u, 13051063102191975696u, 667546917742091778u, 10978954860465027859u, 7512865698603064303u, 11412209018099318119u, 17230490434926376223u, 5511085494400459768u, 12021950472614880189u, 6439010322831869859u, 595725076462813420u, 16606087987356197472u, 17145752785601127360u};
+ uint64_t var_102 [] = {9301718537522199913u, 10819086860314557774u, 10656584420239303945u, 18162891878976053019u, 17582299882264864087u, 3506350464953941474u, 16501932931556013929u, 9988547110636159311u, 9718034997511172333u, 1092366368244583945u, 5109601230835787529u, 13831286862231834279u, 12761625194863065558u};
+ uint8_t var_101 [] = {143u, 239u, 79u, 62u, 119u, 129u, 138u, 31u, 52u, 39u, 14u, 196u, 78u};
+ uint8_t var_100 [] = {103u, 104u, 206u, 161u, 60u, 69u, 226u, 88u, 239u, 42u, 186u, 112u, 125u};
+ uint32_t var_99 [] = {3495075296u};
+ uint64_t var_98 [] = {9906865658538243999u};
+ uint64_t var_97 [] = {7694632992515967866u};
+ uint32_t var_96 [] = {4293619354u, 3967328371u, 2785433908u, 331120895u, 2375449208u, 1425357628u, 1153751515u, 2186320801u, 1880089354u, 2076227242u, 4237196230u, 2836455933u, 3097740105u};
+ int64_t var_95 [] = {-7108892398040540931, -2902837743657368167};
+ int64_t var_94 [] = {7911386467741268052, -8388898381530297241};
+ int64_t var_93 [] = {931229248179863779, -1624244528645905701};
+ uint64_t var_92 [] = {17340829577349396086u};
+ uint64_t var_91 [] = {18346545924178849585u};
+ int16_t var_90 [] = {-25624};
+ int16_t var_89 [] = {15974};
+ uint64_t var_88 [] = {11014950311344851928u, 5197895686424734690u, 9619221639658229002u, 6423828962551482482u, 3156046659732662734u, 1472617878308541895u, 11035320440097309916u, 12393707216814410666u, 8788430720741366090u, 217587670795736361u, 1034088785663124616u, 18335835928116953720u, 1219757139178322839u};
+ uint64_t var_87 [] = {11559190678994979473u, 6013183923535003445u, 9971558496525733899u, 2787678958601411480u, 5739078113805693938u, 2694895444893152828u, 7994463311770754650u, 13650164825622177945u, 579220147540012074u, 6498634454620420429u, 7041816877517723765u, 13345465999689538704u, 11472405609307956315u};
+ uint64_t var_86 [] = {7377984487966841588u, 1244521538013068582u, 5522888345332612590u, 8649552073822691714u, 5910835830871302969u, 6710091485639431753u, 205677334027990159u, 17448746121181070296u, 15084775211187351267u, 11608730292389478881u, 14526352387614949924u, 10741971681343338318u, 8417563342868988484u};
+ uint64_t var_85 [] = {1310761773208763493u};
+ uint64_t var_84 [] = {10122258205224262931u};
+ float32_t var_83 [] = {f32(2944350916u)};
+ uint64_t var_82 [] = {13530952249788556412u};
+ uint64_t var_81 [] = {2527479261213652452u};
+ uint64_t var_80 [] = {15804309544005635250u, 3788976741168456722u, 3002061457029235545u, 14726833968381313595u};
+ uint64_t var_79 [] = {13549762868574804401u, 13204174387864329136u, 18406577593008423761u, 4969511752337926994u, 15413644009768277692u, 14859789808630283526u, 13189951294894585154u, 14360526058145790575u, 2845974406889251674u, 8099633476915063883u, 16479120087725370936u, 7570588426265803252u, 8263600270781094814u};
+ uint64_t var_78 [] = {2382766674713339559u, 15396784267988302988u, 576799166167298943u, 10026375368356785750u, 14673786198559896308u, 17161962445212270786u, 14603586340041125952u, 17114221995192876065u, 16940195199015099700u, 9221412721401043987u, 11043806608938831424u, 11849880432889525171u, 13895008912398050346u};
+ uint64_t var_77 [] = {8588415007675966875u, 4269152311949827424u, 10552325710590486768u, 9144378016933111899u, 6425268388282000562u, 7844939888788970509u, 305876072217132961u, 2947493040686772403u, 7848958813978332337u, 14545434747904560508u, 14623044888673756144u, 16921902780147279842u, 12883762735999567891u};
+ uint64_t var_76 [] = {17585252785599735318u, 1289346800026352327u, 16392037194040934852u, 7130446253967916526u, 16384795498745159942u, 2768738621788241130u, 9379954951994933230u, 3811151817887162513u, 8078830653024888383u, 11400832315509777915u, 7900078021449525711u, 17739319150095105139u, 17865296949477776703u};
+ int64_t var_75 [] = {2136421353293283350, 3414837021317309556};
+ int64_t var_74 [] = {-2584452322456818966, 8445727102755491570};
+ uint64_t var_73 [] = {7421362057176036412u, 11834710639121801155u, 1879590864914770322u, 11518748137954439635u, 14445209116677294173u, 9714585140727960024u, 11636376935140013809u, 10242413799601869450u, 8639431125744255140u, 598337578025024214u, 5343291454742957618u, 8705203272278062278u, 17020841285497899380u};
+ int64_t var_72 = -7454502432211094080;
+ uint32_t var_71 = 611347429u;
+ uint64_t var_70 = 1089802931060859193u;
+ size_t var_69 = 0u;
+ float32_t var_68 = f32(829412060u);
+ vuint8mf8_t var_20 = __riscv_vle8_v_u8mf8(var_115, 1);
+ vuint8mf8_t var_21 = __riscv_vle8_v_u8mf8(var_114, 1);
+ vuint64m1_t var_25 = __riscv_vle64_v_u64m1(var_112, 1);
+ vint16mf4_t var_26 = __riscv_vle16_v_i16mf4(var_111, 1);
+ vint16mf4_t var_27 = __riscv_vle16_v_i16mf4(var_110, 1);
+ vint32mf2_t var_28 = __riscv_vle32_v_i32mf2(var_109, 1);
+ vint32mf2_t var_29 = __riscv_vle32_v_i32mf2(var_108, 1);
+ vfloat32m8_t var_30 = __riscv_vle32_v_f32m8(var_107, 1);
+ vfloat32m8_t var_31 = __riscv_vle32_v_f32m8(var_106, 1);
+ vuint64m2_t var_32 = __riscv_vle64_v_u64m2(var_105, 4);
+ vuint64m8_t var_34 = __riscv_vle64_v_u64m8(var_104, 13);
+ vuint64m8_t var_35 = __riscv_vle64_v_u64m8(var_103, 13);
+ vuint8m1_t var_37 = __riscv_vle8_v_u8m1(var_101, 13);
+ vuint8m1_t var_38 = __riscv_vle8_v_u8m1(var_100, 13);
+ vuint32m8_t var_39 = __riscv_vle32_v_u32m8(var_99, 1);
+ vuint64m1_t var_40 = __riscv_vle64_v_u64m1(var_98, 1);
+ vuint32m4_t var_42 = __riscv_vle32_v_u32m4(var_96, 13);
+ vint64m8_t var_43 = __riscv_vle64_v_i64m8(var_95, 2);
+ vint64m8_t var_44 = __riscv_vle64_v_i64m8(var_94, 2);
+ vint64m8_t var_45 = __riscv_vle64_v_i64m8(var_93, 2);
+ vuint64m4_t var_47 = __riscv_vle64_v_u64m4(var_92, 1);
+ vuint64m1_t var_48 = __riscv_vle64_v_u64m1(var_91, 1);
+ vint16m1_t var_49 = __riscv_vle16_v_i16m1(var_90, 1);
+ vint16m1_t var_50 = __riscv_vle16_v_i16m1(var_89, 1);
+ vuint64m8_t var_51 = __riscv_vle64_v_u64m8(var_88, 13);
+ vuint64m8_t var_52 = __riscv_vle64_v_u64m8(var_87, 13);
+ vuint64m1_t var_55 = __riscv_vle64_v_u64m1(var_85, 1);
+ vuint64m1_t var_56 = __riscv_vle64_v_u64m1(var_84, 1);
+ vfloat32mf2_t var_57 = __riscv_vle32_v_f32mf2(var_83, 1);
+ vuint64m1_t var_58 = __riscv_vle64_v_u64m1(var_82, 1);
+ vuint64m1_t var_59 = __riscv_vle64_v_u64m1(var_81, 1);
+ vuint64m2_t var_60 = __riscv_vle64_v_u64m2(var_80, 4);
+ vuint64m8_t var_61 = __riscv_vle64_v_u64m8(var_79, 13);
+ vuint64m8_t var_62 = __riscv_vle64_v_u64m8(var_78, 13);
+ vuint64m8_t var_63 = __riscv_vle64_v_u64m8(var_77, 13);
+ vuint64m8_t var_64 = __riscv_vle64_v_u64m8(var_76, 13);
+ vint64m8_t var_65 = __riscv_vle64_v_i64m8(var_75, 2);
+ vuint64m8_t var_67 = __riscv_vle64_v_u64m8(var_73, 13);
+ vbool64_t var_24 = __riscv_vmsbc_vv_i16mf4_b64(var_26, var_27, 1);
+ vbool64_t var_23 = __riscv_vmseq_vv_i32mf2_b64(var_28, var_29, 1);
+ vbool4_t var_17 = __riscv_vmfeq_vv_f32m8_b4(var_30, var_31, 1);
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_32, var_70, 4);
+ vbool8_t var_33 = __riscv_vmadc_vv_u8m1_b8(var_37, var_38, 13);
+ vbool8_t var_13 = __riscv_vmadc_vx_u32m4_b8(var_42, var_71, 13);
+ vint64m8_t var_12 = __riscv_vnmsub_vv_i64m8(var_43, var_44, var_45, 2);
+ vbool16_t var_46 = __riscv_vmsle_vv_i16m1_b16(var_49, var_50, 1);
+ vbool64_t var_54 = __riscv_vmfle_vf_f32mf2_b64(var_57, var_68, 1);
+ uint64_t var_7 = __riscv_vmv_x_s_u64m2_u64(var_60);
+ uint64_t var_2 = __riscv_vmv_x_s_u64m2_u64(var_16);
+ vuint64m8_t var_5 = __riscv_vmaxu_vv_u64m8_mu(var_13, var_62, var_63, var_64, 13);
+ vint64m8_t var_4 = __riscv_vmacc_vx_i64m8(var_12, var_72, var_65, 2);
+ vuint64m8_t var_1 = __riscv_vmulhu_vx_u64m8_mu(var_13, var_5, var_67, var_2, 13);
+ vuint64m8_t var_0 = __riscv_vrsub_vx_u64m8(var_1, var_7, 13);
+ vint64m8_t var_3 = __riscv_vsll_vv_i64m8_mu(var_13, var_4, var_4, var_0, 2);
+ vuint64m8_t var_6 = __riscv_vsrl_vv_u64m8(var_0, var_61, 13);
+ vuint64m1_t var_8 = __riscv_vredand_vs_u64m8_u64m1_tum(var_13, var_58, var_0, var_59, 1);
+ __riscv_vse64_v_i64m8(var_74, var_3, 2);
+ vuint64m8_t var_10 = __riscv_vmadd_vv_u64m8_mu(var_13, var_6, var_51, var_52, 13);
+ vuint64m8_t var_15 = __riscv_vssubu_vv_u64m8_mu(var_33, var_6, var_34, var_35, 13);
+ vuint64m1_t var_9 = __riscv_vadd_vv_u64m1_mu(var_54, var_8, var_55, var_56, 1);
+ vuint64m1_t var_11 = __riscv_vredxor_vs_u64m4_u64m1_tum(var_46, var_8, var_47, var_48, 1);
+ if(!check(var_74, var_129, var_130)) {cerr << "check 128 fails" << endl; return_value = 1;}
+ __riscv_vse64_v_u64m8(var_86, var_10, 13);
+ __riscv_vse64_v_u64m8(var_102, var_15, 13);
+ vbool64_t var_18 = __riscv_vmsgeu_vv_u64m1_b64_mu(var_23, var_24, var_9, var_25, 1);
+ vuint64m1_t var_14 = __riscv_vwredsumu_vs_u32m8_u64m1_tum(var_17, var_11, var_39, var_40, 1);
+ vuint8mf8_t var_19 = __riscv_vslideup_vx_u8mf8_mu(var_18, var_20, var_21, var_69, 1);
+ __riscv_vse64_v_u64m1(var_97, var_14, 1);
+ __riscv_vse8_v_u8mf8(var_113, var_19, 1);
+ if(!check(var_97, var_123, var_124)) {cerr << "check 122 fails" << endl; return_value = 1;}
+ }
new file mode 100644
@@ -0,0 +1,146 @@
+/* { dg-do compile { target { riscv_vector } } } */
+
+#include<cmath>
+#include<iomanip>
+#include<iostream>
+#include"riscv_vector.h"
+ using std::cerr;
+ using std::endl;
+ using float32_t = float;
+ template<class T, class T2> constexpr T uint_to_float(T2 val) noexcept {
+ return *reinterpret_cast<T*>(&val);
+ }
+ constexpr const auto &f32(uint_to_float<float32_t, uint32_t>);
+ template<class T> struct To_uint {
+ };
+ template<class T> struct To_float {
+ };
+ template <class T> bool __attribute__((noinline)) check(const T *a, const T *b, size_t size) {
+ bool rv = true;
+ return rv;
+ }
+ int main() {
+ int return_value = 0;
+ size_t var_130 = 16u;
+ int64_t var_129 [] = {6407112605923540992, 7272647525046157312};
+ size_t var_124 = 8u;
+ uint64_t var_123 [] = {9906865658538243999u};
+ uint8_t var_115 [] = {110u};
+ uint8_t var_114 [] = {250u};
+ uint8_t var_113 [] = {85u};
+ uint64_t var_112 [] = {10471272766982500600u};
+ int16_t var_111 [] = {-2006};
+ int16_t var_110 [] = {-4378};
+ int32_t var_109 [] = {1647339684};
+ int32_t var_108 [] = {-45182706};
+ float32_t var_107 [] = {f32(3074604095u)};
+ float32_t var_106 [] = {f32(2164875881u)};
+ uint64_t var_105 [] = {9667821316341385629u, 8665853697871192080u, 2296727757870876501u, 9319690956375735270u};
+ uint64_t var_104 [] = {10229777523837770056u, 5058114244293053252u, 4016718152548898966u, 2756762132515514864u, 12979562465336598027u, 9351327142878884765u, 10169532824221885571u, 16684712895996566663u, 17729467819489264236u, 13842535705175483943u, 16071673669189979410u, 4743566331245526950u, 18376697588466642016u};
+ uint64_t var_103 [] = {2157995689846407707u, 13051063102191975696u, 667546917742091778u, 10978954860465027859u, 7512865698603064303u, 11412209018099318119u, 17230490434926376223u, 5511085494400459768u, 12021950472614880189u, 6439010322831869859u, 595725076462813420u, 16606087987356197472u, 17145752785601127360u};
+ uint64_t var_102 [] = {9301718537522199913u, 10819086860314557774u, 10656584420239303945u, 18162891878976053019u, 17582299882264864087u, 3506350464953941474u, 16501932931556013929u, 9988547110636159311u, 9718034997511172333u, 1092366368244583945u, 5109601230835787529u, 13831286862231834279u, 12761625194863065558u};
+ uint8_t var_101 [] = {143u, 239u, 79u, 62u, 119u, 129u, 138u, 31u, 52u, 39u, 14u, 196u, 78u};
+ uint8_t var_100 [] = {103u, 104u, 206u, 161u, 60u, 69u, 226u, 88u, 239u, 42u, 186u, 112u, 125u};
+ uint32_t var_99 [] = {3495075296u};
+ uint64_t var_98 [] = {9906865658538243999u};
+ uint64_t var_97 [] = {7694632992515967866u};
+ uint32_t var_96 [] = {4293619354u, 3967328371u, 2785433908u, 331120895u, 2375449208u, 1425357628u, 1153751515u, 2186320801u, 1880089354u, 2076227242u, 4237196230u, 2836455933u, 3097740105u};
+ int64_t var_95 [] = {-7108892398040540931, -2902837743657368167};
+ int64_t var_94 [] = {7911386467741268052, -8388898381530297241};
+ int64_t var_93 [] = {931229248179863779, -1624244528645905701};
+ uint64_t var_92 [] = {17340829577349396086u};
+ uint64_t var_91 [] = {18346545924178849585u};
+ int16_t var_90 [] = {-25624};
+ int16_t var_89 [] = {15974};
+ uint64_t var_88 [] = {11014950311344851928u, 5197895686424734690u, 9619221639658229002u, 6423828962551482482u, 3156046659732662734u, 1472617878308541895u, 11035320440097309916u, 12393707216814410666u, 8788430720741366090u, 217587670795736361u, 1034088785663124616u, 18335835928116953720u, 1219757139178322839u};
+ uint64_t var_87 [] = {11559190678994979473u, 6013183923535003445u, 9971558496525733899u, 2787678958601411480u, 5739078113805693938u, 2694895444893152828u, 7994463311770754650u, 13650164825622177945u, 579220147540012074u, 6498634454620420429u, 7041816877517723765u, 13345465999689538704u, 11472405609307956315u};
+ uint64_t var_86 [] = {7377984487966841588u, 1244521538013068582u, 5522888345332612590u, 8649552073822691714u, 5910835830871302969u, 6710091485639431753u, 205677334027990159u, 17448746121181070296u, 15084775211187351267u, 11608730292389478881u, 14526352387614949924u, 10741971681343338318u, 8417563342868988484u};
+ uint64_t var_85 [] = {1310761773208763493u};
+ uint64_t var_84 [] = {10122258205224262931u};
+ float32_t var_83 [] = {f32(2944350916u)};
+ uint64_t var_82 [] = {13530952249788556412u};
+ uint64_t var_81 [] = {2527479261213652452u};
+ uint64_t var_80 [] = {15804309544005635250u, 3788976741168456722u, 3002061457029235545u, 14726833968381313595u};
+ uint64_t var_79 [] = {13549762868574804401u, 13204174387864329136u, 18406577593008423761u, 4969511752337926994u, 15413644009768277692u, 14859789808630283526u, 13189951294894585154u, 14360526058145790575u, 2845974406889251674u, 8099633476915063883u, 16479120087725370936u, 7570588426265803252u, 8263600270781094814u};
+ uint64_t var_78 [] = {2382766674713339559u, 15396784267988302988u, 576799166167298943u, 10026375368356785750u, 14673786198559896308u, 17161962445212270786u, 14603586340041125952u, 17114221995192876065u, 16940195199015099700u, 9221412721401043987u, 11043806608938831424u, 11849880432889525171u, 13895008912398050346u};
+ uint64_t var_77 [] = {8588415007675966875u, 4269152311949827424u, 10552325710590486768u, 9144378016933111899u, 6425268388282000562u, 7844939888788970509u, 305876072217132961u, 2947493040686772403u, 7848958813978332337u, 14545434747904560508u, 14623044888673756144u, 16921902780147279842u, 12883762735999567891u};
+ uint64_t var_76 [] = {17585252785599735318u, 1289346800026352327u, 16392037194040934852u, 7130446253967916526u, 16384795498745159942u, 2768738621788241130u, 9379954951994933230u, 3811151817887162513u, 8078830653024888383u, 11400832315509777915u, 7900078021449525711u, 17739319150095105139u, 17865296949477776703u};
+ int64_t var_75 [] = {2136421353293283350, 3414837021317309556};
+ int64_t var_74 [] = {-2584452322456818966, 8445727102755491570};
+ uint64_t var_73 [] = {7421362057176036412u, 11834710639121801155u, 1879590864914770322u, 11518748137954439635u, 14445209116677294173u, 9714585140727960024u, 11636376935140013809u, 10242413799601869450u, 8639431125744255140u, 598337578025024214u, 5343291454742957618u, 8705203272278062278u, 17020841285497899380u};
+ int64_t var_72 = -7454502432211094080;
+ uint32_t var_71 = 611347429u;
+ uint64_t var_70 = 1089802931060859193u;
+ size_t var_69 = 0u;
+ float32_t var_68 = f32(829412060u);
+ vuint8mf8_t var_20 = __riscv_vle8_v_u8mf8(var_115, 1);
+ vuint8mf8_t var_21 = __riscv_vle8_v_u8mf8(var_114, 1);
+ vuint64m1_t var_25 = __riscv_vle64_v_u64m1(var_112, 1);
+ vint16mf4_t var_26 = __riscv_vle16_v_i16mf4(var_111, 1);
+ vint16mf4_t var_27 = __riscv_vle16_v_i16mf4(var_110, 1);
+ vint32mf2_t var_28 = __riscv_vle32_v_i32mf2(var_109, 1);
+ vint32mf2_t var_29 = __riscv_vle32_v_i32mf2(var_108, 1);
+ vfloat32m8_t var_30 = __riscv_vle32_v_f32m8(var_107, 1);
+ vfloat32m8_t var_31 = __riscv_vle32_v_f32m8(var_106, 1);
+ vuint64m2_t var_32 = __riscv_vle64_v_u64m2(var_105, 4);
+ vuint64m8_t var_34 = __riscv_vle64_v_u64m8(var_104, 13);
+ vuint64m8_t var_35 = __riscv_vle64_v_u64m8(var_103, 13);
+ vuint8m1_t var_37 = __riscv_vle8_v_u8m1(var_101, 13);
+ vuint8m1_t var_38 = __riscv_vle8_v_u8m1(var_100, 13);
+ vuint32m8_t var_39 = __riscv_vle32_v_u32m8(var_99, 1);
+ vuint64m1_t var_40 = __riscv_vle64_v_u64m1(var_98, 1);
+ vuint32m4_t var_42 = __riscv_vle32_v_u32m4(var_96, 13);
+ vint64m8_t var_43 = __riscv_vle64_v_i64m8(var_95, 2);
+ vint64m8_t var_44 = __riscv_vle64_v_i64m8(var_94, 2);
+ vint64m8_t var_45 = __riscv_vle64_v_i64m8(var_93, 2);
+ vuint64m4_t var_47 = __riscv_vle64_v_u64m4(var_92, 1);
+ vuint64m1_t var_48 = __riscv_vle64_v_u64m1(var_91, 1);
+ vint16m1_t var_49 = __riscv_vle16_v_i16m1(var_90, 1);
+ vint16m1_t var_50 = __riscv_vle16_v_i16m1(var_89, 1);
+ vuint64m8_t var_51 = __riscv_vle64_v_u64m8(var_88, 13);
+ vuint64m8_t var_52 = __riscv_vle64_v_u64m8(var_87, 13);
+ vuint64m1_t var_55 = __riscv_vle64_v_u64m1(var_85, 1);
+ vuint64m1_t var_56 = __riscv_vle64_v_u64m1(var_84, 1);
+ vfloat32mf2_t var_57 = __riscv_vle32_v_f32mf2(var_83, 1);
+ vuint64m1_t var_58 = __riscv_vle64_v_u64m1(var_82, 1);
+ vuint64m1_t var_59 = __riscv_vle64_v_u64m1(var_81, 1);
+ vuint64m2_t var_60 = __riscv_vle64_v_u64m2(var_80, 4);
+ vuint64m8_t var_61 = __riscv_vle64_v_u64m8(var_79, 13);
+ vuint64m8_t var_62 = __riscv_vle64_v_u64m8(var_78, 13);
+ vuint64m8_t var_63 = __riscv_vle64_v_u64m8(var_77, 13);
+ vuint64m8_t var_64 = __riscv_vle64_v_u64m8(var_76, 13);
+ vint64m8_t var_65 = __riscv_vle64_v_i64m8(var_75, 2);
+ vuint64m8_t var_67 = __riscv_vle64_v_u64m8(var_73, 13);
+ vbool64_t var_24 = __riscv_vmsbc_vv_i16mf4_b64(var_26, var_27, 1);
+ vbool64_t var_23 = __riscv_vmseq_vv_i32mf2_b64(var_28, var_29, 1);
+ vbool4_t var_17 = __riscv_vmfeq_vv_f32m8_b4(var_30, var_31, 1);
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_32, var_70, 4);
+ vbool8_t var_33 = __riscv_vmadc_vv_u8m1_b8(var_37, var_38, 13);
+ vbool8_t var_13 = __riscv_vmadc_vx_u32m4_b8(var_42, var_71, 13);
+ vint64m8_t var_12 = __riscv_vnmsub_vv_i64m8(var_43, var_44, var_45, 2);
+ vbool16_t var_46 = __riscv_vmsle_vv_i16m1_b16(var_49, var_50, 1);
+ vbool64_t var_54 = __riscv_vmfle_vf_f32mf2_b64(var_57, var_68, 1);
+ uint64_t var_7 = __riscv_vmv_x_s_u64m2_u64(var_60);
+ uint64_t var_2 = __riscv_vmv_x_s_u64m2_u64(var_16);
+ vuint64m8_t var_5 = __riscv_vmaxu_vv_u64m8_mu(var_13, var_62, var_63, var_64, 13);
+ vint64m8_t var_4 = __riscv_vmacc_vx_i64m8(var_12, var_72, var_65, 2);
+ vuint64m8_t var_1 = __riscv_vmulhu_vx_u64m8_mu(var_13, var_5, var_67, var_2, 13);
+ vuint64m8_t var_0 = __riscv_vrsub_vx_u64m8(var_1, var_7, 13);
+ vint64m8_t var_3 = __riscv_vsll_vv_i64m8_mu(var_13, var_4, var_4, var_0, 2);
+ vuint64m8_t var_6 = __riscv_vsrl_vv_u64m8(var_0, var_61, 13);
+ vuint64m1_t var_8 = __riscv_vredand_vs_u64m8_u64m1_tum(var_13, var_58, var_0, var_59, 1);
+ __riscv_vse64_v_i64m8(var_74, var_3, 2);
+ vuint64m8_t var_10 = __riscv_vmacc_vv_u64m8_mu(var_13, var_6, var_51, var_52, 13);
+ vuint64m8_t var_15 = __riscv_vssubu_vv_u64m8_mu(var_33, var_6, var_34, var_35, 13);
+ vuint64m1_t var_9 = __riscv_vadd_vv_u64m1_mu(var_54, var_8, var_55, var_56, 1);
+ vuint64m1_t var_11 = __riscv_vredxor_vs_u64m4_u64m1_tum(var_46, var_8, var_47, var_48, 1);
+ if(!check(var_74, var_129, var_130)) {cerr << "check 128 fails" << endl; return_value = 1;}
+ __riscv_vse64_v_u64m8(var_86, var_10, 13);
+ __riscv_vse64_v_u64m8(var_102, var_15, 13);
+ vbool64_t var_18 = __riscv_vmsgeu_vv_u64m1_b64_mu(var_23, var_24, var_9, var_25, 1);
+ vuint64m1_t var_14 = __riscv_vwredsumu_vs_u32m8_u64m1_tum(var_17, var_11, var_39, var_40, 1);
+ vuint8mf8_t var_19 = __riscv_vslideup_vx_u8mf8_mu(var_18, var_20, var_21, var_69, 1);
+ __riscv_vse64_v_u64m1(var_97, var_14, 1);
+ __riscv_vse8_v_u8mf8(var_113, var_19, 1);
+ if(!check(var_97, var_123, var_124)) {cerr << "check 122 fails" << endl; return_value = 1;}
+ }
new file mode 100644
@@ -0,0 +1,146 @@
+/* { dg-do compile { target { riscv_vector } } } */
+
+#include<cmath>
+#include<iomanip>
+#include<iostream>
+#include"riscv_vector.h"
+ using std::cerr;
+ using std::endl;
+ using float32_t = float;
+ template<class T, class T2> constexpr T uint_to_float(T2 val) noexcept {
+ return *reinterpret_cast<T*>(&val);
+ }
+ constexpr const auto &f32(uint_to_float<float32_t, uint32_t>);
+ template<class T> struct To_uint {
+ };
+ template<class T> struct To_float {
+ };
+ template <class T> bool __attribute__((noinline)) check(const T *a, const T *b, size_t size) {
+ bool rv = true;
+ return rv;
+ }
+ int main() {
+ int return_value = 0;
+ size_t var_130 = 16u;
+ int64_t var_129 [] = {6407112605923540992, 7272647525046157312};
+ size_t var_124 = 8u;
+ uint64_t var_123 [] = {9906865658538243999u};
+ uint8_t var_115 [] = {110u};
+ uint8_t var_114 [] = {250u};
+ uint8_t var_113 [] = {85u};
+ uint64_t var_112 [] = {10471272766982500600u};
+ int16_t var_111 [] = {-2006};
+ int16_t var_110 [] = {-4378};
+ int32_t var_109 [] = {1647339684};
+ int32_t var_108 [] = {-45182706};
+ float32_t var_107 [] = {f32(3074604095u)};
+ float32_t var_106 [] = {f32(2164875881u)};
+ uint64_t var_105 [] = {9667821316341385629u, 8665853697871192080u, 2296727757870876501u, 9319690956375735270u};
+ uint64_t var_104 [] = {10229777523837770056u, 5058114244293053252u, 4016718152548898966u, 2756762132515514864u, 12979562465336598027u, 9351327142878884765u, 10169532824221885571u, 16684712895996566663u, 17729467819489264236u, 13842535705175483943u, 16071673669189979410u, 4743566331245526950u, 18376697588466642016u};
+ uint64_t var_103 [] = {2157995689846407707u, 13051063102191975696u, 667546917742091778u, 10978954860465027859u, 7512865698603064303u, 11412209018099318119u, 17230490434926376223u, 5511085494400459768u, 12021950472614880189u, 6439010322831869859u, 595725076462813420u, 16606087987356197472u, 17145752785601127360u};
+ uint64_t var_102 [] = {9301718537522199913u, 10819086860314557774u, 10656584420239303945u, 18162891878976053019u, 17582299882264864087u, 3506350464953941474u, 16501932931556013929u, 9988547110636159311u, 9718034997511172333u, 1092366368244583945u, 5109601230835787529u, 13831286862231834279u, 12761625194863065558u};
+ uint8_t var_101 [] = {143u, 239u, 79u, 62u, 119u, 129u, 138u, 31u, 52u, 39u, 14u, 196u, 78u};
+ uint8_t var_100 [] = {103u, 104u, 206u, 161u, 60u, 69u, 226u, 88u, 239u, 42u, 186u, 112u, 125u};
+ uint32_t var_99 [] = {3495075296u};
+ uint64_t var_98 [] = {9906865658538243999u};
+ uint64_t var_97 [] = {7694632992515967866u};
+ uint32_t var_96 [] = {4293619354u, 3967328371u, 2785433908u, 331120895u, 2375449208u, 1425357628u, 1153751515u, 2186320801u, 1880089354u, 2076227242u, 4237196230u, 2836455933u, 3097740105u};
+ int64_t var_95 [] = {-7108892398040540931, -2902837743657368167};
+ int64_t var_94 [] = {7911386467741268052, -8388898381530297241};
+ int64_t var_93 [] = {931229248179863779, -1624244528645905701};
+ uint64_t var_92 [] = {17340829577349396086u};
+ uint64_t var_91 [] = {18346545924178849585u};
+ int16_t var_90 [] = {-25624};
+ int16_t var_89 [] = {15974};
+ uint64_t var_88 [] = {11014950311344851928u, 5197895686424734690u, 9619221639658229002u, 6423828962551482482u, 3156046659732662734u, 1472617878308541895u, 11035320440097309916u, 12393707216814410666u, 8788430720741366090u, 217587670795736361u, 1034088785663124616u, 18335835928116953720u, 1219757139178322839u};
+ uint64_t var_87 [] = {11559190678994979473u, 6013183923535003445u, 9971558496525733899u, 2787678958601411480u, 5739078113805693938u, 2694895444893152828u, 7994463311770754650u, 13650164825622177945u, 579220147540012074u, 6498634454620420429u, 7041816877517723765u, 13345465999689538704u, 11472405609307956315u};
+ uint64_t var_86 [] = {7377984487966841588u, 1244521538013068582u, 5522888345332612590u, 8649552073822691714u, 5910835830871302969u, 6710091485639431753u, 205677334027990159u, 17448746121181070296u, 15084775211187351267u, 11608730292389478881u, 14526352387614949924u, 10741971681343338318u, 8417563342868988484u};
+ uint64_t var_85 [] = {1310761773208763493u};
+ uint64_t var_84 [] = {10122258205224262931u};
+ float32_t var_83 [] = {f32(2944350916u)};
+ uint64_t var_82 [] = {13530952249788556412u};
+ uint64_t var_81 [] = {2527479261213652452u};
+ uint64_t var_80 [] = {15804309544005635250u, 3788976741168456722u, 3002061457029235545u, 14726833968381313595u};
+ uint64_t var_79 [] = {13549762868574804401u, 13204174387864329136u, 18406577593008423761u, 4969511752337926994u, 15413644009768277692u, 14859789808630283526u, 13189951294894585154u, 14360526058145790575u, 2845974406889251674u, 8099633476915063883u, 16479120087725370936u, 7570588426265803252u, 8263600270781094814u};
+ uint64_t var_78 [] = {2382766674713339559u, 15396784267988302988u, 576799166167298943u, 10026375368356785750u, 14673786198559896308u, 17161962445212270786u, 14603586340041125952u, 17114221995192876065u, 16940195199015099700u, 9221412721401043987u, 11043806608938831424u, 11849880432889525171u, 13895008912398050346u};
+ uint64_t var_77 [] = {8588415007675966875u, 4269152311949827424u, 10552325710590486768u, 9144378016933111899u, 6425268388282000562u, 7844939888788970509u, 305876072217132961u, 2947493040686772403u, 7848958813978332337u, 14545434747904560508u, 14623044888673756144u, 16921902780147279842u, 12883762735999567891u};
+ uint64_t var_76 [] = {17585252785599735318u, 1289346800026352327u, 16392037194040934852u, 7130446253967916526u, 16384795498745159942u, 2768738621788241130u, 9379954951994933230u, 3811151817887162513u, 8078830653024888383u, 11400832315509777915u, 7900078021449525711u, 17739319150095105139u, 17865296949477776703u};
+ int64_t var_75 [] = {2136421353293283350, 3414837021317309556};
+ int64_t var_74 [] = {-2584452322456818966, 8445727102755491570};
+ uint64_t var_73 [] = {7421362057176036412u, 11834710639121801155u, 1879590864914770322u, 11518748137954439635u, 14445209116677294173u, 9714585140727960024u, 11636376935140013809u, 10242413799601869450u, 8639431125744255140u, 598337578025024214u, 5343291454742957618u, 8705203272278062278u, 17020841285497899380u};
+ int64_t var_72 = -7454502432211094080;
+ uint32_t var_71 = 611347429u;
+ uint64_t var_70 = 1089802931060859193u;
+ size_t var_69 = 0u;
+ float32_t var_68 = f32(829412060u);
+ vuint8mf8_t var_20 = __riscv_vle8_v_u8mf8(var_115, 1);
+ vuint8mf8_t var_21 = __riscv_vle8_v_u8mf8(var_114, 1);
+ vuint64m1_t var_25 = __riscv_vle64_v_u64m1(var_112, 1);
+ vint16mf4_t var_26 = __riscv_vle16_v_i16mf4(var_111, 1);
+ vint16mf4_t var_27 = __riscv_vle16_v_i16mf4(var_110, 1);
+ vint32mf2_t var_28 = __riscv_vle32_v_i32mf2(var_109, 1);
+ vint32mf2_t var_29 = __riscv_vle32_v_i32mf2(var_108, 1);
+ vfloat32m8_t var_30 = __riscv_vle32_v_f32m8(var_107, 1);
+ vfloat32m8_t var_31 = __riscv_vle32_v_f32m8(var_106, 1);
+ vuint64m2_t var_32 = __riscv_vle64_v_u64m2(var_105, 4);
+ vuint64m8_t var_34 = __riscv_vle64_v_u64m8(var_104, 13);
+ vuint64m8_t var_35 = __riscv_vle64_v_u64m8(var_103, 13);
+ vuint8m1_t var_37 = __riscv_vle8_v_u8m1(var_101, 13);
+ vuint8m1_t var_38 = __riscv_vle8_v_u8m1(var_100, 13);
+ vuint32m8_t var_39 = __riscv_vle32_v_u32m8(var_99, 1);
+ vuint64m1_t var_40 = __riscv_vle64_v_u64m1(var_98, 1);
+ vuint32m4_t var_42 = __riscv_vle32_v_u32m4(var_96, 13);
+ vint64m8_t var_43 = __riscv_vle64_v_i64m8(var_95, 2);
+ vint64m8_t var_44 = __riscv_vle64_v_i64m8(var_94, 2);
+ vint64m8_t var_45 = __riscv_vle64_v_i64m8(var_93, 2);
+ vuint64m4_t var_47 = __riscv_vle64_v_u64m4(var_92, 1);
+ vuint64m1_t var_48 = __riscv_vle64_v_u64m1(var_91, 1);
+ vint16m1_t var_49 = __riscv_vle16_v_i16m1(var_90, 1);
+ vint16m1_t var_50 = __riscv_vle16_v_i16m1(var_89, 1);
+ vuint64m8_t var_51 = __riscv_vle64_v_u64m8(var_88, 13);
+ vuint64m8_t var_52 = __riscv_vle64_v_u64m8(var_87, 13);
+ vuint64m1_t var_55 = __riscv_vle64_v_u64m1(var_85, 1);
+ vuint64m1_t var_56 = __riscv_vle64_v_u64m1(var_84, 1);
+ vfloat32mf2_t var_57 = __riscv_vle32_v_f32mf2(var_83, 1);
+ vuint64m1_t var_58 = __riscv_vle64_v_u64m1(var_82, 1);
+ vuint64m1_t var_59 = __riscv_vle64_v_u64m1(var_81, 1);
+ vuint64m2_t var_60 = __riscv_vle64_v_u64m2(var_80, 4);
+ vuint64m8_t var_61 = __riscv_vle64_v_u64m8(var_79, 13);
+ vuint64m8_t var_62 = __riscv_vle64_v_u64m8(var_78, 13);
+ vuint64m8_t var_63 = __riscv_vle64_v_u64m8(var_77, 13);
+ vuint64m8_t var_64 = __riscv_vle64_v_u64m8(var_76, 13);
+ vint64m8_t var_65 = __riscv_vle64_v_i64m8(var_75, 2);
+ vuint64m8_t var_67 = __riscv_vle64_v_u64m8(var_73, 13);
+ vbool64_t var_24 = __riscv_vmsbc_vv_i16mf4_b64(var_26, var_27, 1);
+ vbool64_t var_23 = __riscv_vmseq_vv_i32mf2_b64(var_28, var_29, 1);
+ vbool4_t var_17 = __riscv_vmfeq_vv_f32m8_b4(var_30, var_31, 1);
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_32, var_70, 4);
+ vbool8_t var_33 = __riscv_vmadc_vv_u8m1_b8(var_37, var_38, 13);
+ vbool8_t var_13 = __riscv_vmadc_vx_u32m4_b8(var_42, var_71, 13);
+ vint64m8_t var_12 = __riscv_vnmsub_vv_i64m8(var_43, var_44, var_45, 2);
+ vbool16_t var_46 = __riscv_vmsle_vv_i16m1_b16(var_49, var_50, 1);
+ vbool64_t var_54 = __riscv_vmfle_vf_f32mf2_b64(var_57, var_68, 1);
+ uint64_t var_7 = __riscv_vmv_x_s_u64m2_u64(var_60);
+ uint64_t var_2 = __riscv_vmv_x_s_u64m2_u64(var_16);
+ vuint64m8_t var_5 = __riscv_vmaxu_vv_u64m8_mu(var_13, var_62, var_63, var_64, 13);
+ vint64m8_t var_4 = __riscv_vmacc_vx_i64m8(var_12, var_72, var_65, 2);
+ vuint64m8_t var_1 = __riscv_vmulhu_vx_u64m8_mu(var_13, var_5, var_67, var_2, 13);
+ vuint64m8_t var_0 = __riscv_vrsub_vx_u64m8(var_1, var_7, 13);
+ vint64m8_t var_3 = __riscv_vsll_vv_i64m8_mu(var_13, var_4, var_4, var_0, 2);
+ vuint64m8_t var_6 = __riscv_vsrl_vv_u64m8(var_0, var_61, 13);
+ vuint64m1_t var_8 = __riscv_vredand_vs_u64m8_u64m1_tum(var_13, var_58, var_0, var_59, 1);
+ __riscv_vse64_v_i64m8(var_74, var_3, 2);
+ vuint64m8_t var_10 = __riscv_vnmsub_vv_u64m8_mu(var_13, var_6, var_51, var_52, 13);
+ vuint64m8_t var_15 = __riscv_vssubu_vv_u64m8_mu(var_33, var_6, var_34, var_35, 13);
+ vuint64m1_t var_9 = __riscv_vadd_vv_u64m1_mu(var_54, var_8, var_55, var_56, 1);
+ vuint64m1_t var_11 = __riscv_vredxor_vs_u64m4_u64m1_tum(var_46, var_8, var_47, var_48, 1);
+ if(!check(var_74, var_129, var_130)) {cerr << "check 128 fails" << endl; return_value = 1;}
+ __riscv_vse64_v_u64m8(var_86, var_10, 13);
+ __riscv_vse64_v_u64m8(var_102, var_15, 13);
+ vbool64_t var_18 = __riscv_vmsgeu_vv_u64m1_b64_mu(var_23, var_24, var_9, var_25, 1);
+ vuint64m1_t var_14 = __riscv_vwredsumu_vs_u32m8_u64m1_tum(var_17, var_11, var_39, var_40, 1);
+ vuint8mf8_t var_19 = __riscv_vslideup_vx_u8mf8_mu(var_18, var_20, var_21, var_69, 1);
+ __riscv_vse64_v_u64m1(var_97, var_14, 1);
+ __riscv_vse8_v_u8mf8(var_113, var_19, 1);
+ if(!check(var_97, var_123, var_124)) {cerr << "check 122 fails" << endl; return_value = 1;}
+ }
new file mode 100644
@@ -0,0 +1,146 @@
+/* { dg-do compile { target { riscv_vector } } } */
+
+#include<cmath>
+#include<iomanip>
+#include<iostream>
+#include"riscv_vector.h"
+ using std::cerr;
+ using std::endl;
+ using float32_t = float;
+ template<class T, class T2> constexpr T uint_to_float(T2 val) noexcept {
+ return *reinterpret_cast<T*>(&val);
+ }
+ constexpr const auto &f32(uint_to_float<float32_t, uint32_t>);
+ template<class T> struct To_uint {
+ };
+ template<class T> struct To_float {
+ };
+ template <class T> bool __attribute__((noinline)) check(const T *a, const T *b, size_t size) {
+ bool rv = true;
+ return rv;
+ }
+ int main() {
+ int return_value = 0;
+ size_t var_130 = 16u;
+ int64_t var_129 [] = {6407112605923540992, 7272647525046157312};
+ size_t var_124 = 8u;
+ uint64_t var_123 [] = {9906865658538243999u};
+ uint8_t var_115 [] = {110u};
+ uint8_t var_114 [] = {250u};
+ uint8_t var_113 [] = {85u};
+ uint64_t var_112 [] = {10471272766982500600u};
+ int16_t var_111 [] = {-2006};
+ int16_t var_110 [] = {-4378};
+ int32_t var_109 [] = {1647339684};
+ int32_t var_108 [] = {-45182706};
+ float32_t var_107 [] = {f32(3074604095u)};
+ float32_t var_106 [] = {f32(2164875881u)};
+ uint64_t var_105 [] = {9667821316341385629u, 8665853697871192080u, 2296727757870876501u, 9319690956375735270u};
+ uint64_t var_104 [] = {10229777523837770056u, 5058114244293053252u, 4016718152548898966u, 2756762132515514864u, 12979562465336598027u, 9351327142878884765u, 10169532824221885571u, 16684712895996566663u, 17729467819489264236u, 13842535705175483943u, 16071673669189979410u, 4743566331245526950u, 18376697588466642016u};
+ uint64_t var_103 [] = {2157995689846407707u, 13051063102191975696u, 667546917742091778u, 10978954860465027859u, 7512865698603064303u, 11412209018099318119u, 17230490434926376223u, 5511085494400459768u, 12021950472614880189u, 6439010322831869859u, 595725076462813420u, 16606087987356197472u, 17145752785601127360u};
+ uint64_t var_102 [] = {9301718537522199913u, 10819086860314557774u, 10656584420239303945u, 18162891878976053019u, 17582299882264864087u, 3506350464953941474u, 16501932931556013929u, 9988547110636159311u, 9718034997511172333u, 1092366368244583945u, 5109601230835787529u, 13831286862231834279u, 12761625194863065558u};
+ uint8_t var_101 [] = {143u, 239u, 79u, 62u, 119u, 129u, 138u, 31u, 52u, 39u, 14u, 196u, 78u};
+ uint8_t var_100 [] = {103u, 104u, 206u, 161u, 60u, 69u, 226u, 88u, 239u, 42u, 186u, 112u, 125u};
+ uint32_t var_99 [] = {3495075296u};
+ uint64_t var_98 [] = {9906865658538243999u};
+ uint64_t var_97 [] = {7694632992515967866u};
+ uint32_t var_96 [] = {4293619354u, 3967328371u, 2785433908u, 331120895u, 2375449208u, 1425357628u, 1153751515u, 2186320801u, 1880089354u, 2076227242u, 4237196230u, 2836455933u, 3097740105u};
+ int64_t var_95 [] = {-7108892398040540931, -2902837743657368167};
+ int64_t var_94 [] = {7911386467741268052, -8388898381530297241};
+ int64_t var_93 [] = {931229248179863779, -1624244528645905701};
+ uint64_t var_92 [] = {17340829577349396086u};
+ uint64_t var_91 [] = {18346545924178849585u};
+ int16_t var_90 [] = {-25624};
+ int16_t var_89 [] = {15974};
+ uint64_t var_88 [] = {11014950311344851928u, 5197895686424734690u, 9619221639658229002u, 6423828962551482482u, 3156046659732662734u, 1472617878308541895u, 11035320440097309916u, 12393707216814410666u, 8788430720741366090u, 217587670795736361u, 1034088785663124616u, 18335835928116953720u, 1219757139178322839u};
+ uint64_t var_87 [] = {11559190678994979473u, 6013183923535003445u, 9971558496525733899u, 2787678958601411480u, 5739078113805693938u, 2694895444893152828u, 7994463311770754650u, 13650164825622177945u, 579220147540012074u, 6498634454620420429u, 7041816877517723765u, 13345465999689538704u, 11472405609307956315u};
+ uint64_t var_86 [] = {7377984487966841588u, 1244521538013068582u, 5522888345332612590u, 8649552073822691714u, 5910835830871302969u, 6710091485639431753u, 205677334027990159u, 17448746121181070296u, 15084775211187351267u, 11608730292389478881u, 14526352387614949924u, 10741971681343338318u, 8417563342868988484u};
+ uint64_t var_85 [] = {1310761773208763493u};
+ uint64_t var_84 [] = {10122258205224262931u};
+ float32_t var_83 [] = {f32(2944350916u)};
+ uint64_t var_82 [] = {13530952249788556412u};
+ uint64_t var_81 [] = {2527479261213652452u};
+ uint64_t var_80 [] = {15804309544005635250u, 3788976741168456722u, 3002061457029235545u, 14726833968381313595u};
+ uint64_t var_79 [] = {13549762868574804401u, 13204174387864329136u, 18406577593008423761u, 4969511752337926994u, 15413644009768277692u, 14859789808630283526u, 13189951294894585154u, 14360526058145790575u, 2845974406889251674u, 8099633476915063883u, 16479120087725370936u, 7570588426265803252u, 8263600270781094814u};
+ uint64_t var_78 [] = {2382766674713339559u, 15396784267988302988u, 576799166167298943u, 10026375368356785750u, 14673786198559896308u, 17161962445212270786u, 14603586340041125952u, 17114221995192876065u, 16940195199015099700u, 9221412721401043987u, 11043806608938831424u, 11849880432889525171u, 13895008912398050346u};
+ uint64_t var_77 [] = {8588415007675966875u, 4269152311949827424u, 10552325710590486768u, 9144378016933111899u, 6425268388282000562u, 7844939888788970509u, 305876072217132961u, 2947493040686772403u, 7848958813978332337u, 14545434747904560508u, 14623044888673756144u, 16921902780147279842u, 12883762735999567891u};
+ uint64_t var_76 [] = {17585252785599735318u, 1289346800026352327u, 16392037194040934852u, 7130446253967916526u, 16384795498745159942u, 2768738621788241130u, 9379954951994933230u, 3811151817887162513u, 8078830653024888383u, 11400832315509777915u, 7900078021449525711u, 17739319150095105139u, 17865296949477776703u};
+ int64_t var_75 [] = {2136421353293283350, 3414837021317309556};
+ int64_t var_74 [] = {-2584452322456818966, 8445727102755491570};
+ uint64_t var_73 [] = {7421362057176036412u, 11834710639121801155u, 1879590864914770322u, 11518748137954439635u, 14445209116677294173u, 9714585140727960024u, 11636376935140013809u, 10242413799601869450u, 8639431125744255140u, 598337578025024214u, 5343291454742957618u, 8705203272278062278u, 17020841285497899380u};
+ int64_t var_72 = -7454502432211094080;
+ uint32_t var_71 = 611347429u;
+ uint64_t var_70 = 1089802931060859193u;
+ size_t var_69 = 0u;
+ float32_t var_68 = f32(829412060u);
+ vuint8mf8_t var_20 = __riscv_vle8_v_u8mf8(var_115, 1);
+ vuint8mf8_t var_21 = __riscv_vle8_v_u8mf8(var_114, 1);
+ vuint64m1_t var_25 = __riscv_vle64_v_u64m1(var_112, 1);
+ vint16mf4_t var_26 = __riscv_vle16_v_i16mf4(var_111, 1);
+ vint16mf4_t var_27 = __riscv_vle16_v_i16mf4(var_110, 1);
+ vint32mf2_t var_28 = __riscv_vle32_v_i32mf2(var_109, 1);
+ vint32mf2_t var_29 = __riscv_vle32_v_i32mf2(var_108, 1);
+ vfloat32m8_t var_30 = __riscv_vle32_v_f32m8(var_107, 1);
+ vfloat32m8_t var_31 = __riscv_vle32_v_f32m8(var_106, 1);
+ vuint64m2_t var_32 = __riscv_vle64_v_u64m2(var_105, 4);
+ vuint64m8_t var_34 = __riscv_vle64_v_u64m8(var_104, 13);
+ vuint64m8_t var_35 = __riscv_vle64_v_u64m8(var_103, 13);
+ vuint8m1_t var_37 = __riscv_vle8_v_u8m1(var_101, 13);
+ vuint8m1_t var_38 = __riscv_vle8_v_u8m1(var_100, 13);
+ vuint32m8_t var_39 = __riscv_vle32_v_u32m8(var_99, 1);
+ vuint64m1_t var_40 = __riscv_vle64_v_u64m1(var_98, 1);
+ vuint32m4_t var_42 = __riscv_vle32_v_u32m4(var_96, 13);
+ vint64m8_t var_43 = __riscv_vle64_v_i64m8(var_95, 2);
+ vint64m8_t var_44 = __riscv_vle64_v_i64m8(var_94, 2);
+ vint64m8_t var_45 = __riscv_vle64_v_i64m8(var_93, 2);
+ vuint64m4_t var_47 = __riscv_vle64_v_u64m4(var_92, 1);
+ vuint64m1_t var_48 = __riscv_vle64_v_u64m1(var_91, 1);
+ vint16m1_t var_49 = __riscv_vle16_v_i16m1(var_90, 1);
+ vint16m1_t var_50 = __riscv_vle16_v_i16m1(var_89, 1);
+ vuint64m8_t var_51 = __riscv_vle64_v_u64m8(var_88, 13);
+ vuint64m8_t var_52 = __riscv_vle64_v_u64m8(var_87, 13);
+ vuint64m1_t var_55 = __riscv_vle64_v_u64m1(var_85, 1);
+ vuint64m1_t var_56 = __riscv_vle64_v_u64m1(var_84, 1);
+ vfloat32mf2_t var_57 = __riscv_vle32_v_f32mf2(var_83, 1);
+ vuint64m1_t var_58 = __riscv_vle64_v_u64m1(var_82, 1);
+ vuint64m1_t var_59 = __riscv_vle64_v_u64m1(var_81, 1);
+ vuint64m2_t var_60 = __riscv_vle64_v_u64m2(var_80, 4);
+ vuint64m8_t var_61 = __riscv_vle64_v_u64m8(var_79, 13);
+ vuint64m8_t var_62 = __riscv_vle64_v_u64m8(var_78, 13);
+ vuint64m8_t var_63 = __riscv_vle64_v_u64m8(var_77, 13);
+ vuint64m8_t var_64 = __riscv_vle64_v_u64m8(var_76, 13);
+ vint64m8_t var_65 = __riscv_vle64_v_i64m8(var_75, 2);
+ vuint64m8_t var_67 = __riscv_vle64_v_u64m8(var_73, 13);
+ vbool64_t var_24 = __riscv_vmsbc_vv_i16mf4_b64(var_26, var_27, 1);
+ vbool64_t var_23 = __riscv_vmseq_vv_i32mf2_b64(var_28, var_29, 1);
+ vbool4_t var_17 = __riscv_vmfeq_vv_f32m8_b4(var_30, var_31, 1);
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_32, var_70, 4);
+ vbool8_t var_33 = __riscv_vmadc_vv_u8m1_b8(var_37, var_38, 13);
+ vbool8_t var_13 = __riscv_vmadc_vx_u32m4_b8(var_42, var_71, 13);
+ vint64m8_t var_12 = __riscv_vnmsub_vv_i64m8(var_43, var_44, var_45, 2);
+ vbool16_t var_46 = __riscv_vmsle_vv_i16m1_b16(var_49, var_50, 1);
+ vbool64_t var_54 = __riscv_vmfle_vf_f32mf2_b64(var_57, var_68, 1);
+ uint64_t var_7 = __riscv_vmv_x_s_u64m2_u64(var_60);
+ uint64_t var_2 = __riscv_vmv_x_s_u64m2_u64(var_16);
+ vuint64m8_t var_5 = __riscv_vmaxu_vv_u64m8_mu(var_13, var_62, var_63, var_64, 13);
+ vint64m8_t var_4 = __riscv_vmacc_vx_i64m8(var_12, var_72, var_65, 2);
+ vuint64m8_t var_1 = __riscv_vmulhu_vx_u64m8_mu(var_13, var_5, var_67, var_2, 13);
+ vuint64m8_t var_0 = __riscv_vrsub_vx_u64m8(var_1, var_7, 13);
+ vint64m8_t var_3 = __riscv_vsll_vv_i64m8_mu(var_13, var_4, var_4, var_0, 2);
+ vuint64m8_t var_6 = __riscv_vsrl_vv_u64m8(var_0, var_61, 13);
+ vuint64m1_t var_8 = __riscv_vredand_vs_u64m8_u64m1_tum(var_13, var_58, var_0, var_59, 1);
+ __riscv_vse64_v_i64m8(var_74, var_3, 2);
+ vuint64m8_t var_10 = __riscv_vnmsac_vv_u64m8_mu(var_13, var_6, var_51, var_52, 13);
+ vuint64m8_t var_15 = __riscv_vssubu_vv_u64m8_mu(var_33, var_6, var_34, var_35, 13);
+ vuint64m1_t var_9 = __riscv_vadd_vv_u64m1_mu(var_54, var_8, var_55, var_56, 1);
+ vuint64m1_t var_11 = __riscv_vredxor_vs_u64m4_u64m1_tum(var_46, var_8, var_47, var_48, 1);
+ if(!check(var_74, var_129, var_130)) {cerr << "check 128 fails" << endl; return_value = 1;}
+ __riscv_vse64_v_u64m8(var_86, var_10, 13);
+ __riscv_vse64_v_u64m8(var_102, var_15, 13);
+ vbool64_t var_18 = __riscv_vmsgeu_vv_u64m1_b64_mu(var_23, var_24, var_9, var_25, 1);
+ vuint64m1_t var_14 = __riscv_vwredsumu_vs_u32m8_u64m1_tum(var_17, var_11, var_39, var_40, 1);
+ vuint8mf8_t var_19 = __riscv_vslideup_vx_u8mf8_mu(var_18, var_20, var_21, var_69, 1);
+ __riscv_vse64_v_u64m1(var_97, var_14, 1);
+ __riscv_vse8_v_u8mf8(var_113, var_19, 1);
+ if(!check(var_97, var_123, var_124)) {cerr << "check 122 fails" << endl; return_value = 1;}
+ }