[2/7] RISC-V: Add intrinsic functions for crypto vector Zvbc extension

Message ID 20231204025709.3783-2-wangfeng@eswincomputing.com
State Unresolved
Headers
Series None |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

Feng Wang Dec. 4, 2023, 2:57 a.m. UTC
  This patch add the intrinsic functions(according to https://github.com/
riscv-non-isa/rvv-intrinsic-doc/blob/eopc/vector-crypto/auto-generated/
vector-crypto/intrinsic_funcs.md) for crypto vector Zvbc extension. And all
the test cases are added for api-testing.

Co-Authored by: Songhe Zhu <zhusonghe@eswincomputing.com>

gcc/ChangeLog:

        * common/config/riscv/riscv-common.cc: Add Zvbc in riscv_implied_info.
        * config/riscv/riscv-vector-builtins-bases.cc (class clmul):Add new function_base for Zvbc.
        (BASE): Add Zvbc BASE declaration.
        * config/riscv/riscv-vector-builtins-bases.h: Ditto.
        * config/riscv/riscv-vector-builtins-shapes.cc (struct zvbb_def): Add new function_builder for Zvbc.
        (struct zvbb_zvbc_def): Combine function_base of Zvbb and Zvbc.
        (SHAPE): Add Zvbc SHAPE declaration.
        * config/riscv/riscv-vector-builtins-shapes.h: Ditto.
        * config/riscv/riscv-vector-builtins.cc (DEF_RVV_CRYPTO_SEW32_OPS):Define new data struct for Zvbc.
        (DEF_RVV_CRYPTO_SEW64_OPS): Ditto.
        * config/riscv/riscv-vector-crypto-builtins-avail.h (AVAIL): Add enable condition.
        * config/riscv/riscv-vector-crypto-builtins-functions.def (vandn): Add intrinsc def.
        (vbrev):  Ditto.
        (vbrev8): Ditto.
        (vrev8):  Ditto.
        (vclz):   Ditto.
        (vctz):   Ditto.
        (vrol):   Ditto.
        (vror):   Ditto.
        (vwsll):  Ditto.
        (vclmul): Ditto.
        (vclmulh):Ditto.
        * config/riscv/riscv.md: Add Zvbc ins name.
        * config/riscv/vector-crypto.md (h): Add Zvbc md patterns.
        (@pred_vclmul<h><mode>): Ditto.
        (@pred_vclmul<h><mode>_scalar): Ditto.
        * config/riscv/vector-iterators.md: Add new iterators for Zvbc.
        * config/riscv/vector.md: Add the corresponding attribute for Zvbc.
        * config/riscv/riscv-vector-crypto-builtins-types.def: New file.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/zvk/zvk.exp:
        * gcc.target/riscv/zvk/zvbc/vclmul.c: New test.
        * gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c: New test.
        * gcc.target/riscv/zvk/zvbc/vclmulh.c: New test.
        * gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c: New test.
---
 gcc/common/config/riscv/riscv-common.cc       |   1 +
 .../riscv/riscv-vector-builtins-bases.cc      |  22 ++
 .../riscv/riscv-vector-builtins-bases.h       |   2 +
 .../riscv/riscv-vector-builtins-shapes.cc     |   6 +-
 .../riscv/riscv-vector-builtins-shapes.h      |   2 +-
 gcc/config/riscv/riscv-vector-builtins.cc     |  29 +++
 .../riscv-vector-crypto-builtins-avail.h      |   1 +
 ...riscv-vector-crypto-builtins-functions.def |  31 +--
 .../riscv-vector-crypto-builtins-types.def    |  21 ++
 gcc/config/riscv/riscv.md                     |   5 +-
 gcc/config/riscv/vector-crypto.md             |  50 +++++
 gcc/config/riscv/vector-iterators.md          |   5 +
 gcc/config/riscv/vector.md                    |  14 +-
 .../gcc.target/riscv/zvk/zvbc/vclmul.c        | 208 ++++++++++++++++++
 .../riscv/zvk/zvbc/vclmul_overloaded.c        | 208 ++++++++++++++++++
 .../gcc.target/riscv/zvk/zvbc/vclmulh.c       | 208 ++++++++++++++++++
 .../riscv/zvk/zvbc/vclmulh_overloaded.c       | 208 ++++++++++++++++++
 gcc/testsuite/gcc.target/riscv/zvk/zvk.exp    |   2 +
 18 files changed, 998 insertions(+), 25 deletions(-)
 create mode 100755 gcc/config/riscv/riscv-vector-crypto-builtins-types.def
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
  

Patch

diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index a5fb492c690..296500e15df 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -121,6 +121,7 @@  static const riscv_implied_info_t riscv_implied_info[] =
   {"zvksg", "zvks"},
   {"zvksg", "zvkg"},
   {"zvbb",  "zvkb"},
+  {"zvbc",     "v"},
   {"zvkb",     "v"},
 
   {"zfh", "zfhmin"},
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index e41343b4a1a..45b1e563ff4 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -2209,6 +2209,24 @@  public:
   }
 };
 
+template<int UNSPEC>
+class clmul : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+      {
+      case OP_TYPE_vv:
+        return e.use_exact_insn (code_for_pred_vclmul (UNSPEC, e.vector_mode ()));
+      case OP_TYPE_vx:
+        return e.use_exact_insn (code_for_pred_vclmul_scalar (UNSPEC, e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+      }
+  }
+};
+
 static CONSTEXPR const vsetvl<false> vsetvl_obj;
 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -2476,6 +2494,8 @@  static CONSTEXPR const b_reverse<UNSPEC_VREV8>   vrev8_obj;
 static CONSTEXPR const vcltz<UNSPEC_VCLZ>        vclz_obj;
 static CONSTEXPR const vcltz<UNSPEC_VCTZ>        vctz_obj;
 static CONSTEXPR const vwsll vwsll_obj;
+static CONSTEXPR const clmul<UNSPEC_VCLMUL>      vclmul_obj;
+static CONSTEXPR const clmul<UNSPEC_VCLMULH>     vclmulh_obj;
 
 /* Declare the function base NAME, pointing it to an instance
    of class <NAME>_obj.  */
@@ -2748,4 +2768,6 @@  BASE (vctz)
 BASE (vrol)
 BASE (vror)
 BASE (vwsll)
+BASE (vclmul)
+BASE (vclmulh)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h
index 2f46974bd27..7d2c86f9162 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.h
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.h
@@ -290,6 +290,8 @@  extern const function_base *const vctz;
 extern const function_base *const vrol;
 extern const function_base *const vror;
 extern const function_base *const vwsll;
+extern const function_base *const vclmul;
+extern const function_base *const vclmulh;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index a98c2389fbc..f21c459e6a2 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -984,8 +984,8 @@  struct seg_fault_load_def : public build_base
   }
 };
 
-/* vandn/vbrev/vbrev8/vrev8/vclz/vctz/vror[l]/vwsll class.  */
-struct zvbb_def : public build_base
+/* vandn/vbrev/vbrev8/vrev8/vclz/vctz/vror[l]/vwsll/vclmul/vclmulh class.  */
+struct zvbb_zvbc_def : public build_base
 {
   char *get_name (function_builder &b, const function_instance &instance,
                   bool overloaded_p) const override
@@ -1037,5 +1037,5 @@  SHAPE(vlenb, vlenb)
 SHAPE(seg_loadstore, seg_loadstore)
 SHAPE(seg_indexed_loadstore, seg_indexed_loadstore)
 SHAPE(seg_fault_load, seg_fault_load)
-SHAPE(zvbb, zvbb)
+SHAPE(zvbb_zvbc, zvbb_zvbc)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h
index e8959a2f277..a217eae33f0 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.h
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h
@@ -53,7 +53,7 @@  extern const function_shape *const seg_loadstore;
 extern const function_shape *const seg_indexed_loadstore;
 extern const function_shape *const seg_fault_load;
 /* Below function_shape are Vectro Crypto*/
-extern const function_shape *const zvbb;
+extern const function_shape *const zvbb_zvbc;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index 7a1da5c4539..ffd30c1a806 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -522,6 +522,19 @@  static const rvv_type_info tuple_ops[] = {
 #include "riscv-vector-builtins-types.def"
   {NUM_VECTOR_TYPES, 0}};
 
+/* Below types will be registered for vector-crypto intrinsic functions*/
+/* A list of sew32 will be registered for vector-crypto intrinsic functions.  */
+static const rvv_type_info crypto_sew32_ops[] = {
+#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-crypto-builtins-types.def"
+  {NUM_VECTOR_TYPES, 0}};
+
+/* A list of sew64 will be registered for vector-crypto intrinsic functions.  */
+static const rvv_type_info crypto_sew64_ops[] = {
+#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-crypto-builtins-types.def"
+  {NUM_VECTOR_TYPES, 0}};
+
 static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
   = rvv_arg_type_info (NUM_BASE_TYPES);
 
@@ -2626,6 +2639,22 @@  static CONSTEXPR const rvv_op_info all_v_vcreate_lmul4_x2_ops
      rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
      ext_vcreate_args /* Args */};
 
+/* A static operand information for vector_type func (vector_type).
+   Some ins just supports SEW=64, such as crypto vectol Zvbc extension
+   vclmul.vv, vclmul.vx.
+ * function registration.  */
+static CONSTEXPR const rvv_op_info u_vvv_crypto_sew64_ops
+  = {crypto_sew64_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvx_crypto_sew64_ops
+  = {crypto_sew64_ops,			   /* Types */
+     OP_TYPE_vx,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vx_args /* Args */};
+
 /* A list of all RVV base function types.  */
 static CONSTEXPR const function_type_info function_types[] = {
 #define DEF_RVV_TYPE_INDEX(                                                    \
diff --git a/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h b/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
index 2719027a7da..a63dea6a27b 100755
--- a/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
+++ b/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
@@ -13,6 +13,7 @@  namespace riscv_vector {
  }
 
 AVAIL (zvbb, TARGET_ZVBB)
+AVAIL (zvbc, TARGET_ZVBC)
 AVAIL (zvkb_or_zvbb, TARGET_ZVKB || TARGET_ZVBB)
 }
 #endif
diff --git a/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def b/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
index f3371f28a42..d8c74dec4f6 100755
--- a/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
@@ -4,16 +4,21 @@ 
 
 
 // ZVBB
-DEF_VECTOR_CRYPTO_FUNCTION (vandn, zvbb, full_preds, u_vvv_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vandn, zvbb, full_preds, u_vvx_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vbrev, zvbb, full_preds, u_vv_ops, zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vbrev8, zvbb, full_preds, u_vv_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vrev8, zvbb, full_preds, u_vv_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vclz, zvbb, none_m_preds, u_vv_ops, zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vctz, zvbb, none_m_preds, u_vv_ops, zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vrol, zvbb, full_preds, u_vvv_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vrol, zvbb, full_preds, u_shift_vvx_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vror, zvbb, full_preds, u_vvv_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vror, zvbb, full_preds, u_shift_vvx_ops, zvkb_or_zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vwsll, zvbb, full_preds, u_wvv_ops, zvbb)
-DEF_VECTOR_CRYPTO_FUNCTION (vwsll, zvbb, full_preds, u_shift_wvx_ops, zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vandn,  zvbb_zvbc, full_preds, u_vvv_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vandn,  zvbb_zvbc, full_preds, u_vvx_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vbrev,  zvbb_zvbc, full_preds, u_vv_ops,  zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vbrev8, zvbb_zvbc, full_preds, u_vv_ops,  zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vrev8,  zvbb_zvbc, full_preds, u_vv_ops,  zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vclz,   zvbb_zvbc, none_m_preds, u_vv_ops, zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vctz,   zvbb_zvbc, none_m_preds, u_vv_ops, zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vrol,   zvbb_zvbc, full_preds, u_vvv_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vrol,   zvbb_zvbc, full_preds, u_shift_vvx_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vror,   zvbb_zvbc, full_preds, u_vvv_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vror,   zvbb_zvbc, full_preds, u_shift_vvx_ops, zvkb_or_zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vwsll,  zvbb_zvbc, full_preds, u_wvv_ops, zvbb)
+DEF_VECTOR_CRYPTO_FUNCTION (vwsll,  zvbb_zvbc, full_preds, u_shift_wvx_ops, zvbb)
+//ZVBC
+DEF_VECTOR_CRYPTO_FUNCTION (vclmul,  zvbb_zvbc, full_preds, u_vvv_crypto_sew64_ops, zvbc)
+DEF_VECTOR_CRYPTO_FUNCTION (vclmul,  zvbb_zvbc, full_preds, u_vvx_crypto_sew64_ops, zvbc)
+DEF_VECTOR_CRYPTO_FUNCTION (vclmulh, zvbb_zvbc, full_preds, u_vvv_crypto_sew64_ops, zvbc)
+DEF_VECTOR_CRYPTO_FUNCTION (vclmulh, zvbb_zvbc, full_preds, u_vvx_crypto_sew64_ops, zvbc)
diff --git a/gcc/config/riscv/riscv-vector-crypto-builtins-types.def b/gcc/config/riscv/riscv-vector-crypto-builtins-types.def
new file mode 100755
index 00000000000..f40367ae2c3
--- /dev/null
+++ b/gcc/config/riscv/riscv-vector-crypto-builtins-types.def
@@ -0,0 +1,21 @@ 
+#ifndef DEF_RVV_CRYPTO_SEW32_OPS
+#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE)
+#endif
+
+#ifndef DEF_RVV_CRYPTO_SEW64_OPS
+#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE)
+#endif
+
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m1_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m2_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m4_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m8_t, 0)
+
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+
+#undef DEF_RVV_CRYPTO_SEW32_OPS
+#undef DEF_RVV_CRYPTO_SEW64_OPS
\ No newline at end of file
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 2a3777e168c..4a853d8238f 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -437,6 +437,8 @@ 
 ;; vrol         crypto vector rotate left instructions
 ;; vror         crypto vector rotate right instructions
 ;; vwsll        crypto vector widening shift left logical instructions
+;; vclmul       vector crypto carry-less multiply - return low half instructions
+;; vclmulh      vector crypto carry-less multiply - return high half instructions
 (define_attr "type"
   "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
    mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
@@ -456,7 +458,8 @@ 
    vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
    vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
    vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
-   vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll"
+   vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll,
+   vclmul,vclmulh"
   (cond [(eq_attr "got" "load") (const_string "load")
 
 	 ;; If a doubleword move uses these expensive instructions,
diff --git a/gcc/config/riscv/vector-crypto.md b/gcc/config/riscv/vector-crypto.md
index 0373cf6f48a..f3034ba122a 100755
--- a/gcc/config/riscv/vector-crypto.md
+++ b/gcc/config/riscv/vector-crypto.md
@@ -9,6 +9,8 @@ 
     UNSPEC_VROL
     UNSPEC_VROR
     UNSPEC_VWSLL
+    UNSPEC_VCLMUL
+    UNSPEC_VCLMULH
 ])
 
 (define_int_attr ror_rol [(UNSPEC_VROL "rol") (UNSPEC_VROR "ror")])
@@ -17,12 +19,16 @@ 
 
 (define_int_attr rev  [(UNSPEC_VBREV "brev") (UNSPEC_VBREV8 "brev8") (UNSPEC_VREV8 "rev8")])
 
+(define_int_attr h [(UNSPEC_VCLMUL "") (UNSPEC_VCLMULH "h")])
+
 (define_int_iterator UNSPEC_VRORL [UNSPEC_VROL UNSPEC_VROR])
 
 (define_int_iterator UNSPEC_VCLTZ [UNSPEC_VCLZ UNSPEC_VCTZ])
 
 (define_int_iterator UNSPEC_VRBB8 [UNSPEC_VBREV UNSPEC_VBREV8 UNSPEC_VREV8])
 
+(define_int_iterator UNSPEC_CLMUL [UNSPEC_VCLMUL UNSPEC_VCLMULH])
+
 ;; zvbb instructions patterns.
 ;; vandn.vv vandn.vx vrol.vv vrol.vx
 ;; vror.vv vror.vx vror.vi
@@ -205,3 +211,47 @@ 
   "vc<lt>.v\t%0,%2%p1"
   [(set_attr "type" "vc<lt>")
    (set_attr "mode" "<MODE>")])
+
+;; zvbc instructions patterns.
+;; vclmul.vv vclmul.vx
+;; vclmulh.vv vclmulh.vx
+
+(define_insn "@pred_vclmul<h><mode>"
+  [(set (match_operand:VDI 0  "register_operand" "=vd,vd")
+        (if_then_else:VDI
+	          (unspec:<VM>
+	            [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+	              (match_operand 5 "vector_length_operand"    "rK,rK")
+	              (match_operand 6 "const_int_operand"        "i, i")
+	              (match_operand 7 "const_int_operand"        "i, i")
+	              (match_operand 8 "const_int_operand"        "i, i")
+	              (reg:SI VL_REGNUM)
+	              (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	          (unspec:VDI
+	            [(match_operand:VDI 3 "register_operand"  "vr,vr")
+	             (match_operand:VDI 4 "register_operand"  "vr,vr")]UNSPEC_CLMUL)
+	          (match_operand:VDI 2 "vector_merge_operand" "vu, 0")))]
+  "TARGET_ZVBC && TARGET_64BIT"
+  "vclmul<h>.vv\t%0,%3,%4%p1"
+  [(set_attr "type" "vclmul<h>")
+   (set_attr "mode" "<VDI:MODE>")])
+
+(define_insn "@pred_vclmul<h><mode>_scalar"
+  [(set (match_operand:VDI 0 "register_operand"       "=vd,vd")
+    (if_then_else:VDI
+      (unspec:<VM>
+        [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+        (match_operand 5 "vector_length_operand"    "rK,rK")
+        (match_operand 6 "const_int_operand"        "i, i")
+        (match_operand 7 "const_int_operand"        "i, i")
+        (match_operand 8 "const_int_operand"        "i, i")
+        (reg:SI VL_REGNUM)
+        (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+      (unspec:VDI
+        [(match_operand:VDI 3 "register_operand"  "vr,vr")
+        (match_operand:<VDI:VEL> 4 "register_operand"  "r,r")]UNSPEC_CLMUL)
+      (match_operand:VDI 2 "vector_merge_operand" "vu, 0")))]
+  "TARGET_ZVBC && TARGET_64BIT"
+  "vclmul<h>.vx\t%0,%3,%4%p1"
+  [(set_attr "type" "vclmul<h>")
+   (set_attr "mode" "<VDI:MODE>")])
\ No newline at end of file
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 56080ed1f5f..e52709493f6 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -3916,3 +3916,8 @@ 
   (V1024BI "riscv_vector::vls_mode_valid_p (V1024BImode) && TARGET_MIN_VLEN >= 1024")
   (V2048BI "riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048")
   (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")])
+
+(define_mode_iterator VDI [
+  (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
+  (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
+])
\ No newline at end of file
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 3e08e18d355..2733ea7728f 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -53,7 +53,7 @@ 
 			  vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 			  vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
 			  vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
-                          vror,vwsll")
+                          vror,vwsll,vclmul,vclmulh")
 	 (const_string "true")]
 	(const_string "false")))
 
@@ -76,7 +76,7 @@ 
 			  vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 			  vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
 			  vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
-                          vror,vwsll")
+                          vror,vwsll,vclmul,vclmulh")
 	 (const_string "true")]
 	(const_string "false")))
 
@@ -701,7 +701,7 @@ 
 				vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\
 				vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 				vgather,vldff,viwmuladd,vfwmuladd,vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff,\
-                                vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll")
+                                vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll,vclmul,vclmulh")
 	       (const_int 2)
 
 	       (eq_attr "type" "vimerge,vfmerge,vcompress")
@@ -759,7 +759,7 @@ 
 			  vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
 			  vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 			  vgather,viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\
-                          vror,vwsll")
+                          vror,vwsll,vclmul,vclmulh")
 	   (const_int 5)
 
 	 (eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
@@ -790,7 +790,7 @@ 
 			  vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\
 			  vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
 			  vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
-			  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll")
+			  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll,vclmul,vclmulh")
 	   (symbol_ref "riscv_vector::get_ta(operands[6])")
 
 	 (eq_attr "type" "vimuladd,vfmuladd")
@@ -820,7 +820,7 @@ 
 			  vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
 			  vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
 			  viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\
-                          vror,vwsll")
+                          vror,vwsll,vclmul,vclmulh")
 	   (symbol_ref "riscv_vector::get_ma(operands[7])")
 
 	 (eq_attr "type" "vimuladd,vfmuladd")
@@ -855,7 +855,7 @@ 
 			  vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
 			  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll")
 	   (const_int 8)
-	 (eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox")
+	 (eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox,vclmul,vclmulh")
 	   (const_int 5)
 
 	 (eq_attr "type" "vimuladd,vfmuladd")
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
new file mode 100644
index 00000000000..ba3e5cf858e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
new file mode 100644
index 00000000000..1e25831f3f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmul_overloaded.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
new file mode 100644
index 00000000000..c14b8a56490
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
new file mode 100644
index 00000000000..ed3c4388af6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvbc/vclmulh_overloaded.c
@@ -0,0 +1,208 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh(vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh(mask, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl);
+}
+
+vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 16 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 8 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 16 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 24 } } */
+/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 16 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
index f0c5431d00c..2426825baae 100644
--- a/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
@@ -36,6 +36,8 @@  dg-init
 # Main loop.
 dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbb/*.\[cS\]]] \
         "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbc/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
 
 # All done.
 dg-finish