@@ -464,6 +464,47 @@ public:
}
};
+/* Implements vaadd/vasub/vsmul/vssra/vssrl. */
+template<int UNSPEC>
+class sat_op : public function_base
+{
+public:
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_vx:
+ return e.use_exact_insn (
+ code_for_pred_scalar (UNSPEC, e.vector_mode ()));
+ case OP_TYPE_vv:
+ return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
+/* Implements vnclip/vnclipu. */
+template<int UNSPEC>
+class vnclip : public function_base
+{
+public:
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_wx:
+ return e.use_exact_insn (
+ code_for_pred_narrow_clip_scalar (UNSPEC, e.vector_mode ()));
+ case OP_TYPE_wv:
+ return e.use_exact_insn (
+ code_for_pred_narrow_clip (UNSPEC, e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -535,6 +576,15 @@ static CONSTEXPR const binop<SS_PLUS> vsadd_obj;
static CONSTEXPR const binop<SS_MINUS> vssub_obj;
static CONSTEXPR const binop<US_PLUS> vsaddu_obj;
static CONSTEXPR const binop<US_MINUS> vssubu_obj;
+static CONSTEXPR const sat_op<UNSPEC_VAADDU> vaaddu_obj;
+static CONSTEXPR const sat_op<UNSPEC_VAADD> vaadd_obj;
+static CONSTEXPR const sat_op<UNSPEC_VASUBU> vasubu_obj;
+static CONSTEXPR const sat_op<UNSPEC_VASUB> vasub_obj;
+static CONSTEXPR const sat_op<UNSPEC_VSMUL> vsmul_obj;
+static CONSTEXPR const sat_op<UNSPEC_VSSRL> vssrl_obj;
+static CONSTEXPR const sat_op<UNSPEC_VSSRA> vssra_obj;
+static CONSTEXPR const vnclip<UNSPEC_VNCLIP> vnclip_obj;
+static CONSTEXPR const vnclip<UNSPEC_VNCLIPU> vnclipu_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -612,5 +662,14 @@ BASE (vsadd)
BASE (vssub)
BASE (vsaddu)
BASE (vssubu)
+BASE (vaadd)
+BASE (vasub)
+BASE (vaaddu)
+BASE (vasubu)
+BASE (vsmul)
+BASE (vssra)
+BASE (vssrl)
+BASE (vnclip)
+BASE (vnclipu)
} // end namespace riscv_vector
@@ -95,7 +95,17 @@ extern const function_base *const vsadd;
extern const function_base *const vssub;
extern const function_base *const vsaddu;
extern const function_base *const vssubu;
-
+extern const function_base *const vaadd;
+extern const function_base *const vasub;
+extern const function_base *const vaaddu;
+extern const function_base *const vasubu;
+extern const function_base *const vsmul;
+extern const function_base *const vssra;
+extern const function_base *const vssrl;
+extern const function_base *const vnclip;
+extern const function_base *const vnclip;
+extern const function_base *const vnclipu;
+extern const function_base *const vnclipu;
}
} // end namespace riscv_vector
@@ -166,5 +166,23 @@ DEF_RVV_FUNCTION (vsadd, alu, full_preds, i_vvx_ops)
DEF_RVV_FUNCTION (vssub, alu, full_preds, i_vvx_ops)
DEF_RVV_FUNCTION (vsaddu, alu, full_preds, u_vvx_ops)
DEF_RVV_FUNCTION (vssubu, alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvv_ops)
+DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvv_ops)
+DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vsmul, alu, full_preds, i_vvv_ops)
+DEF_RVV_FUNCTION (vssra, alu, full_preds, i_shift_vvv_ops)
+DEF_RVV_FUNCTION (vssrl, alu, full_preds, u_shift_vvv_ops)
+DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvx_ops)
+DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvx_ops)
+DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vsmul, alu, full_preds, i_vvx_ops)
+DEF_RVV_FUNCTION (vssra, alu, full_preds, i_shift_vvx_ops)
+DEF_RVV_FUNCTION (vssrl, alu, full_preds, u_shift_vvx_ops)
+DEF_RVV_FUNCTION (vnclipu, narrow_alu, full_preds, u_narrow_shift_vwv_ops)
+DEF_RVV_FUNCTION (vnclip, narrow_alu, full_preds, i_narrow_shift_vwv_ops)
+DEF_RVV_FUNCTION (vnclipu, narrow_alu, full_preds, u_narrow_shift_vwx_ops)
+DEF_RVV_FUNCTION (vnclip, narrow_alu, full_preds, i_narrow_shift_vwx_ops)
#undef DEF_RVV_FUNCTION
@@ -39,6 +39,16 @@
UNSPEC_VMADC
UNSPEC_VMSBC
UNSPEC_OVERFLOW
+
+ UNSPEC_VNCLIP
+ UNSPEC_VNCLIPU
+ UNSPEC_VSSRL
+ UNSPEC_VSSRA
+ UNSPEC_VAADDU
+ UNSPEC_VAADD
+ UNSPEC_VASUBU
+ UNSPEC_VASUB
+ UNSPEC_VSMUL
])
(define_mode_iterator V [
@@ -264,11 +274,31 @@
(define_int_iterator VMULH [UNSPEC_VMULHS UNSPEC_VMULHU UNSPEC_VMULHSU])
+(define_int_iterator VNCLIP [UNSPEC_VNCLIP UNSPEC_VNCLIPU])
+
+(define_int_iterator VSAT_OP [UNSPEC_VAADDU UNSPEC_VAADD
+ UNSPEC_VASUBU UNSPEC_VASUB UNSPEC_VSMUL
+ UNSPEC_VSSRL UNSPEC_VSSRA])
+
+(define_int_iterator VSAT_ARITH_OP [UNSPEC_VAADDU UNSPEC_VAADD
+ UNSPEC_VASUBU UNSPEC_VASUB UNSPEC_VSMUL])
+(define_int_iterator VSAT_SHIFT_OP [UNSPEC_VSSRL UNSPEC_VSSRA])
+
(define_int_attr order [
(UNSPEC_ORDERED "o") (UNSPEC_UNORDERED "u")
])
-(define_int_attr v_su [(UNSPEC_VMULHS "") (UNSPEC_VMULHU "u") (UNSPEC_VMULHSU "su")])
+(define_int_attr v_su [(UNSPEC_VMULHS "") (UNSPEC_VMULHU "u") (UNSPEC_VMULHSU "su")
+ (UNSPEC_VNCLIP "") (UNSPEC_VNCLIPU "u")])
+(define_int_attr sat_op [(UNSPEC_VAADDU "aaddu") (UNSPEC_VAADD "aadd")
+ (UNSPEC_VASUBU "asubu") (UNSPEC_VASUB "asub")
+ (UNSPEC_VSMUL "smul") (UNSPEC_VSSRL "ssrl")
+ (UNSPEC_VSSRA "ssra")])
+(define_int_attr sat_insn_type [(UNSPEC_VAADDU "vaalu") (UNSPEC_VAADD "vaalu")
+ (UNSPEC_VASUBU "vaalu") (UNSPEC_VASUB "vaalu")
+ (UNSPEC_VSMUL "vsmul") (UNSPEC_VSSRL "vsshift")
+ (UNSPEC_VSSRA "vsshift") (UNSPEC_VNCLIP "vnclip")
+ (UNSPEC_VNCLIPU "vnclip")])
(define_code_iterator any_int_binop [plus minus and ior xor ashift ashiftrt lshiftrt
smax umax smin umin mult div udiv mod umod
@@ -145,7 +145,8 @@
(cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox,\
vialu,vshift,vicmp,vimul,vidiv,vsalu,\
vext,viwalu,viwmul,vicalu,vnshift,\
- vimuladd,vimerge")
+ vimuladd,vimerge,vaalu,vsmul,vsshift,\
+ vnclip")
(const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "VNx1QI,VNx1BI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
@@ -197,7 +198,7 @@
(define_attr "merge_op_idx" ""
(cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,\
vialu,vshift,vicmp,vimul,vidiv,vsalu,vext,viwalu,\
- viwmul,vnshift,vimuladd")
+ viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,vnclip")
(const_int 2)
(eq_attr "type" "vimerge")
@@ -218,7 +219,8 @@
(const_int 4))
(eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
- viwalu,viwmul,vnshift,vimuladd,vimerge")
+ viwalu,viwmul,vnshift,vimuladd,vimerge,vaalu,vsmul,\
+ vsshift,vnclip")
(const_int 5)]
(const_int INVALID_ATTRIBUTE)))
@@ -235,7 +237,8 @@
(symbol_ref "riscv_vector::get_ta(operands[5])"))
(eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
- viwalu,viwmul,vnshift,vimuladd,vimerge")
+ viwalu,viwmul,vnshift,vimuladd,vimerge,vaalu,vsmul,\
+ vsshift,vnclip")
(symbol_ref "riscv_vector::get_ta(operands[6])")]
(const_int INVALID_ATTRIBUTE)))
@@ -252,7 +255,8 @@
(symbol_ref "riscv_vector::get_ma(operands[6])"))
(eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
- viwalu,viwmul,vnshift,vimuladd")
+ viwalu,viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,\
+ vnclip")
(symbol_ref "riscv_vector::get_ma(operands[7])")]
(const_int INVALID_ATTRIBUTE)))
@@ -271,7 +275,8 @@
(symbol_ref "INTVAL (operands[7])"))
(eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
- viwalu,viwmul,vnshift,vimuladd")
+ viwalu,viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,\
+ vnclip")
(symbol_ref "INTVAL (operands[8])")
(eq_attr "type" "vstux,vstox")
(symbol_ref "INTVAL (operands[5])")]
@@ -860,7 +865,7 @@
(match_operand:VI_D 1 "vector_merge_operand")))]
"TARGET_VECTOR"
{
- if (riscv_vector::neg_simm5_p (operands[3]))
+ if (riscv_vector::simm5_p (operands[3]))
operands[3] = force_reg (<VEL>mode, operands[3]);
else if (!TARGET_64BIT)
{
@@ -1476,7 +1481,10 @@
}
}
else
- operands[4] = force_reg (<VEL>mode, operands[4]);
+ {
+ if (!rtx_equal_p (operands[4], const0_rtx))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+ }
})
(define_insn "*pred_<optab><mode>_scalar"
@@ -1566,7 +1574,10 @@
}
}
else
- operands[4] = force_reg (<VEL>mode, operands[4]);
+ {
+ if (!rtx_equal_p (operands[4], const0_rtx))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+ }
})
(define_insn "*pred_<optab><mode>_scalar"
@@ -2025,7 +2036,10 @@
}
}
else
- operands[4] = force_reg (<VEL>mode, operands[4]);
+ {
+ if (!rtx_equal_p (operands[4], const0_rtx))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+ }
})
(define_insn "*pred_mulh<v_su><mode>_scalar"
@@ -2041,7 +2055,7 @@
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VFULLI_D
[(vec_duplicate:VFULLI_D
- (match_operand:<VEL> 4 "register_operand" " rJ, rJ"))
+ (match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ"))
(match_operand:VFULLI_D 3 "register_operand" " vr, vr")] VMULH)
(match_operand:VFULLI_D 2 "vector_merge_operand" "0vu,0vu")))]
"TARGET_VECTOR"
@@ -3183,3 +3197,205 @@
(set (attr "ta") (symbol_ref "riscv_vector::get_ta(operands[5])"))
(set (attr "ma") (symbol_ref "riscv_vector::get_ma(operands[6])"))
(set (attr "avl_type") (symbol_ref "INTVAL (operands[7])"))])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated fixed-point operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 12.2 Vector Single-Width Aaveraging Add and Subtract
+;; - 12.3 Vector Single-Width Fractional Multiply with Rounding and Saturation
+;; - 12.5 Vector Single-Width Scaling Shift Instructions
+;; - 12.6 Vector Narrowing Fixed-Point Clip Instructions
+;; -------------------------------------------------------------------------------
+
+(define_insn "@pred_<sat_op><mode>"
+ [(set (match_operand:VI 0 "register_operand" "=vd, vr")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" " vr, vr")
+ (match_operand:VI 4 "register_operand" " vr, vr")] VSAT_OP)
+ (match_operand:VI 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "v<sat_op>.vv\t%0,%3,%4%p1"
+ [(set_attr "type" "<sat_insn_type>")
+ (set_attr "mode" "<MODE>")])
+
+;; Handle GET_MODE_INNER (mode) = QImode, HImode, SImode.
+(define_insn "@pred_<sat_op><mode>_scalar"
+ [(set (match_operand:VI_QHS 0 "register_operand" "=vd, vr")
+ (if_then_else:VI_QHS
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI_QHS
+ [(match_operand:VI_QHS 3 "register_operand" " vr, vr")
+ (match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ")] VSAT_ARITH_OP)
+ (match_operand:VI_QHS 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "v<sat_op>.vx\t%0,%3,%z4%p1"
+ [(set_attr "type" "<sat_insn_type>")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_<sat_op><mode>_scalar"
+ [(set (match_operand:VI 0 "register_operand" "=vd, vr")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" " vr, vr")
+ (match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK")] VSAT_SHIFT_OP)
+ (match_operand:VI 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "v<sat_op>.v%o4\t%0,%3,%4%p1"
+ [(set_attr "type" "<sat_insn_type>")
+ (set_attr "mode" "<MODE>")])
+
+;; Handle GET_MODE_INNER (mode) = DImode. We need to split them since
+;; we need to deal with SEW = 64 in RV32 system.
+(define_expand "@pred_<sat_op><mode>_scalar"
+ [(set (match_operand:VI_D 0 "register_operand")
+ (if_then_else:VI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand")
+ (match_operand 5 "vector_length_operand")
+ (match_operand 6 "const_int_operand")
+ (match_operand 7 "const_int_operand")
+ (match_operand 8 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI_D
+ [(match_operand:VI_D 3 "register_operand")
+ (match_operand:<VEL> 4 "reg_or_int_operand")] VSAT_ARITH_OP)
+ (match_operand:VI_D 2 "vector_merge_operand")))]
+ "TARGET_VECTOR"
+ {
+ if (!TARGET_64BIT)
+ {
+ rtx v = gen_reg_rtx (<MODE>mode);
+
+ if (riscv_vector::simm32_p (operands[4]))
+ {
+ if (!rtx_equal_p (operands[4], const0_rtx))
+ operands[4] = force_reg (Pmode, operands[4]);
+ operands[4] = gen_rtx_SIGN_EXTEND (<VEL>mode, operands[4]);
+ }
+ else
+ {
+ if (CONST_INT_P (operands[4]))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+
+ riscv_vector::emit_nonvlmax_op (code_for_pred_broadcast (<MODE>mode),
+ v, operands[4], operands[5], <VM>mode);
+ emit_insn (gen_pred_<sat_op><mode> (operands[0], operands[1],
+ operands[2], operands[3], v, operands[5],
+ operands[6], operands[7], operands[8]));
+ DONE;
+ }
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[4], const0_rtx))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+ }
+ })
+
+(define_insn "*pred_<sat_op><mode>_scalar"
+ [(set (match_operand:VI_D 0 "register_operand" "=vd, vr")
+ (if_then_else:VI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI_D
+ [(match_operand:VI_D 3 "register_operand" " vr, vr")
+ (match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ")] VSAT_ARITH_OP)
+ (match_operand:VI_D 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "v<sat_op>.vx\t%0,%3,%z4%p1"
+ [(set_attr "type" "<sat_insn_type>")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*pred_<sat_op><mode>_extended_scalar"
+ [(set (match_operand:VI_D 0 "register_operand" "=vd, vr")
+ (if_then_else:VI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI_D
+ [(match_operand:VI_D 3 "register_operand" " vr, vr")
+ (sign_extend:<VEL>
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))] VSAT_ARITH_OP)
+ (match_operand:VI_D 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "v<sat_op>.vx\t%0,%3,%z4%p1"
+ [(set_attr "type" "<sat_insn_type>")
+ (set_attr "mode" "<MODE>")])
+
+;; CLIP
+(define_insn "@pred_narrow_clip<v_su><mode>"
+ [(set (match_operand:<V_DOUBLE_TRUNC> 0 "register_operand" "=vd, vr, &vr, vd, vr, &vr")
+ (if_then_else:<V_DOUBLE_TRUNC>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<V_DOUBLE_TRUNC>
+ [(match_operand:VWEXTI 3 "register_operand" " 0, 0, vr, 0, 0, vr")
+ (match_operand:<V_DOUBLE_TRUNC> 4 "vector_shift_operand" " vr, vr, vr, vk, vk, vk")] VNCLIP)
+ (match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" "0vu,0vu, 0vu,0vu,0vu, 0vu")))]
+ "TARGET_VECTOR"
+ "vnclip<v_su>.w%o4\t%0,%3,%v4%p1"
+ [(set_attr "type" "vnclip")
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+
+(define_insn "@pred_narrow_clip<v_su><mode>_scalar"
+ [(set (match_operand:<V_DOUBLE_TRUNC> 0 "register_operand" "=vd, vr, &vr, vd, vr, &vr")
+ (if_then_else:<V_DOUBLE_TRUNC>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vmWc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<V_DOUBLE_TRUNC>
+ [(match_operand:VWEXTI 3 "register_operand" " 0, 0, vr, 0, 0, vr")
+ (match_operand 4 "pmode_reg_or_uimm5_operand" " r, r, r, K, K, K")] VNCLIP)
+ (match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" "0vu,0vu, 0vu,0vu,0vu, 0vu")))]
+ "TARGET_VECTOR"
+ "vnclip<v_su>.w%o4\t%0,%3,%4%p1"
+ [(set_attr "type" "vnclip")
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")])