@@ -128,6 +128,7 @@ static const riscv_implied_info_t riscv_implied_info[] =
{"zvknha", "v"},
{"zvknhb", "v"},
{"zvksed", "v"},
+ {"zvksh", "v"},
{"zfh", "zfhmin"},
{"zfhmin", "f"},
@@ -2302,8 +2302,9 @@ public:
}
};
-/* Implements vaeskf2. */
-class vaeskf2 : public function_base
+/* Implements vaeskf2/vsm3c. */
+template<int UNSPEC>
+class vaeskf2_vsm3c : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
@@ -2312,7 +2313,20 @@ public:
rtx expand (function_expander &e) const override
{
- return e.use_exact_insn (code_for_pred_vaeskf2_scalar (e.vector_mode ()));
+ return e.use_exact_insn (code_for_pred_vi_nomaskedoff_scalar (UNSPEC, e.vector_mode ()));
+ }
+};
+
+/* Implements vsm3me. */
+class vsm3me : public function_base
+{
+public:
+ bool apply_mask_policy_p () const override { return false; }
+ bool use_mask_predication_p () const override { return false; }
+
+ rtx expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_pred_vsm3me (e.vector_mode ()));
}
};
@@ -2593,12 +2607,14 @@ static CONSTEXPR const crypto_vv<UNSPEC_VAESDF> vaesdf_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESDM> vaesdm_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESZ> vaesz_obj;
static CONSTEXPR const crypto_vi<UNSPEC_VAESKF1> vaeskf1_obj;
-static CONSTEXPR const vaeskf2 vaeskf2_obj;
+static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VAESKF2> vaeskf2_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2MS> vsha2ms_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CH> vsha2ch_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CL> vsha2cl_obj;
static CONSTEXPR const crypto_vi<UNSPEC_VSM4K> vsm4k_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VSM4R> vsm4r_obj;
+static CONSTEXPR const vsm3me vsm3me_obj;
+static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VSM3C> vsm3c_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -2887,4 +2903,6 @@ BASE (vsha2ch)
BASE (vsha2cl)
BASE (vsm4k)
BASE (vsm4r)
+BASE (vsm3me)
+BASE (vsm3c)
} // end namespace riscv_vector
@@ -306,6 +306,8 @@ extern const function_base *const vsha2ch;
extern const function_base *const vsha2cl;
extern const function_base *const vsm4k;
extern const function_base *const vsm4r;
+extern const function_base *const vsm3me;
+extern const function_base *const vsm3c;
}
} // end namespace riscv_vector
@@ -1009,7 +1009,7 @@ struct zvbb_zvbc_def : public build_base
}
};
-/* vghsh/vgmul/vaes* class. */
+/* vghsh/vgmul/vsha2ms/vsha2ch/vsha2cl/vsm3me/vaes* class. */
struct crypto_vv_def : public build_base
{
char *get_name (function_builder &b, const function_instance &instance,
@@ -1019,13 +1019,15 @@ struct crypto_vv_def : public build_base
if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
return nullptr;
b.append_base_name (instance.base_name);
- /* There is no op_type name in vghsh/vgmul/vaesz/vsha2ms/vsha2ch/vsha2cl overloaded intrinsic */
+ /* There is no op_type name in vghsh/vgmul/vaesz/vsha2ms/vsha2ch/vsha2cl/
+ vsm3me overloaded intrinsic */
if (!((strcmp (instance.base_name, "vghsh") == 0
|| strcmp (instance.base_name, "vgmul") == 0
|| strcmp (instance.base_name, "vaesz") == 0
|| strcmp (instance.base_name, "vsha2ms") == 0
|| strcmp (instance.base_name, "vsha2ch") == 0
- || strcmp (instance.base_name, "vsha2cl") == 0)
+ || strcmp (instance.base_name, "vsha2cl") == 0
+ || strcmp (instance.base_name, "vsm3me") == 0)
&& overloaded_p))
b.append_name (operand_suffixes[instance.op_info->op]);
if (!overloaded_p)
@@ -1050,7 +1052,7 @@ struct crypto_vv_def : public build_base
}
};
-/* vaeskf1/vaeskf2/vsm4k class. */
+/* vaeskf1/vaeskf2/vsm4k/vsm3c class. */
struct crypto_vi_def : public build_base
{
char *get_name (function_builder &b, const function_instance &instance,
@@ -20,5 +20,6 @@ AVAIL (zvkned, TARGET_ZVKNED)
AVAIL (zvknha_or_zvknhb, TARGET_ZVKNHA || TARGET_ZVKNHB)
AVAIL (zvknhb, TARGET_ZVKNHB)
AVAIL (zvksed, TARGET_ZVKSED)
+AVAIL (zvksh, TARGET_ZVKSH)
}
#endif
@@ -72,4 +72,7 @@ DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew
DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops, zvksed)
DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops, zvksed)
DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops, zvksed)
-DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops, zvksed)
\ No newline at end of file
+DEF_VECTOR_CRYPTO_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops, zvksed)
+//Zvksh
+DEF_VECTOR_CRYPTO_FUNCTION (vsm3me, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops, zvksh)
+DEF_VECTOR_CRYPTO_FUNCTION (vsm3c, crypto_vi, none_tu_preds, u_vvv_size_crypto_sew32_ops, zvksh)
\ No newline at end of file
@@ -454,6 +454,8 @@
;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
;; vsm4k crypto vector SM4 KeyExpansion instructions
;; vsm4r crypto vector SM4 Rounds instructions
+;; vsm3me crypto vector SM3 Message Expansion instructions
+;; vsm3c crypto vector SM3 Compression instructions
(define_attr "type"
"unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
@@ -475,7 +477,7 @@
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll,
vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
- vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r"
+ vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c"
(cond [(eq_attr "got" "load") (const_string "load")
;; If a doubleword move uses these expensive instructions,
@@ -37,6 +37,8 @@
UNSPEC_VSM4R
UNSPEC_VSM4RVV
UNSPEC_VSM4RVS
+ UNSPEC_VSM3ME
+ UNSPEC_VSM3C
])
(define_int_attr ror_rol [(UNSPEC_VROL "rol") (UNSPEC_VROR "ror")])
@@ -59,6 +61,8 @@
(define_int_attr vi_ins_name [(UNSPEC_VAESKF1 "aeskf1") (UNSPEC_VSM4K "sm4k")])
+(define_int_attr vi_ins1_name [(UNSPEC_VAESKF2 "aeskf2") (UNSPEC_VSM3C "sm3c")])
+
(define_int_attr ins_type [(UNSPEC_VGMUL "vv") (UNSPEC_VAESEFVV "vv")
(UNSPEC_VAESEMVV "vv") (UNSPEC_VAESDFVV "vv")
(UNSPEC_VAESDMVV "vv") (UNSPEC_VAESEFVS "vs")
@@ -83,6 +87,8 @@
(define_int_iterator UNSPEC_CRYPTO_VI [UNSPEC_VAESKF1 UNSPEC_VSM4K])
+(define_int_iterator UNSPEC_CRYPTO_VI1 [UNSPEC_VAESKF2 UNSPEC_VSM3C])
+
;; zvbb instructions patterns.
;; vandn.vv vandn.vx vrol.vv vrol.vx
;; vror.vv vror.vx vror.vi
@@ -462,11 +468,11 @@
[(set_attr "type" "v<vi_ins_name>")
(set_attr "mode" "<MODE>")])
-;; vaeskf2.vi
-(define_insn "@pred_vaeskf2<mode>_scalar"
+;; vaeskf2.vi vsm3c.vi
+(define_insn "@pred_vi<vi_ins1_name><mode>_nomaskedoff_scalar"
[(set (match_operand:VSI 0 "register_operand" "=vd")
(if_then_else:VSI
- (unspec:<VM>
+ (unspec:<VSI:VM>
[(match_operand 4 "vector_length_operand" "rK")
(match_operand 5 "const_int_operand" " i")
(match_operand 6 "const_int_operand" " i")
@@ -475,9 +481,30 @@
(unspec:VSI
[(match_operand:VSI 1 "register_operand" "0")
(match_operand:VSI 2 "register_operand" "vr")
- (match_operand:<VEL> 3 "const_int_operand" " i")] UNSPEC_VAESKF2)
+ (match_operand:<VEL> 3 "const_int_operand" " i")] UNSPEC_CRYPTO_VI1)
(match_dup 1)))]
- "TARGET_ZVKNED"
- "vaeskf2.vi\t%0,%2,%3"
- [(set_attr "type" "vaeskf2")
- (set_attr "mode" "<MODE>")])
\ No newline at end of file
+ "TARGET_ZVKNED || TARGET_ZVKSH"
+ "v<vi_ins1_name>.vi\t%0,%2,%3"
+ [(set_attr "type" "v<vi_ins1_name>")
+ (set_attr "mode" "<MODE>")])
+
+;; zvksh instructions patterns.
+;; vsm3me.vv
+
+(define_insn "@pred_vsm3me<mode>"
+ [(set (match_operand:VSI 0 "register_operand" "=vd, vd")
+ (if_then_else:VSI
+ (unspec:<VM>
+ [(match_operand 4 "vector_length_operand" "rK, rK")
+ (match_operand 5 "const_int_operand" " i, i")
+ (match_operand 6 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VSI
+ [(match_operand:VSI 2 "register_operand" "vr, vr")
+ (match_operand:VSI 3 "register_operand" "vr, vr")] UNSPEC_VSM3ME)
+ (match_operand:VSI 1 "vector_merge_operand" "vu, 0")))]
+ "TARGET_ZVKSH"
+ "vsm3me.vv\t%0,%2,%3"
+ [(set_attr "type" "vsm3me")
+ (set_attr "mode" "<VSI:MODE>")])
\ No newline at end of file
@@ -54,7 +54,7 @@
vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
- vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r")
+ vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c")
(const_string "true")]
(const_string "false")))
@@ -78,7 +78,7 @@
vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
- vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r")
+ vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c")
(const_string "true")]
(const_string "false")))
@@ -707,7 +707,7 @@
(const_int 2)
(eq_attr "type" "vimerge,vfmerge,vcompress,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
- vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r")
+ vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c")
(const_int 1)
(eq_attr "type" "vimuladd,vfmuladd")
@@ -747,7 +747,7 @@
vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
vlsegde,vssegts,vssegtux,vssegtox,vlsegdff,vbrev,vbrev8,vrev8,\
- vghsh,vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k")
+ vghsh,vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c")
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
@@ -780,7 +780,7 @@
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
vcompress,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8,vghsh,\
- vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k")
+ vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c")
(symbol_ref "riscv_vector::get_ta(operands[5])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
@@ -869,7 +869,7 @@
(const_int 9)
(eq_attr "type" "vmsfs,vmidx,vcompress,vghsh,vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,\
- vsm4k")
+ vsm4k,vsm3me,vsm3c")
(const_int 6)
(eq_attr "type" "vmpop,vmffs,vssegte,vclz,vctz")
@@ -48,5 +48,7 @@ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvknhb/*.\[cS\]]] \
"" $DEFAULT_CFLAGS
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvksed/*.\[cS\]]] \
"" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvksh/*.\[cS\]]] \
+ "" $DEFAULT_CFLAGS
# All done.
dg-finish
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl);
+}
+
+vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3c\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vsm3c(vd, vs2, 0, vl);
+}
+
+vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vsm3c_tu(vd, vs2, 0, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3c\.vi\s+v[0-9]+,\s*v[0-9]+,0} 10 } } */
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+ return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3me\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 10 } } */
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvksh -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+ return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+ return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+ return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+ return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+ return __riscv_vsm3me(vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+ return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+ return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+ return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+ return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+ return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsm3me\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 10 } } */
\ No newline at end of file