[v4,1/3] RISC-V: Add crypto vector builtin function.

Message ID 20231220070530.5381-1-wangfeng@eswincomputing.com
State Unresolved
Headers
Series [v4,1/3] RISC-V: Add crypto vector builtin function. |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

Feng Wang Dec. 20, 2023, 7:05 a.m. UTC
  Patch v4:Merge crypto vector function.def into vector.
Patch v3:Define a shape for vaesz and merge vector-crypto-types.def
         into riscv-vector-builtins-types.def.
Patch v2:Optimize function_shape class for crypto_vector.

This patch add the intrinsic funtions of crypto vector based on the
intrinsic doc(https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob
/eopc/vector-crypto/auto-generated/vector-crypto/intrinsic_funcs.md).

Co-Authored by: Songhe Zhu <zhusonghe@eswincomputing.com>
Co-Authored by: Ciyan Pan <panciyan@eswincomputing.com>

gcc/ChangeLog:

	* config/riscv/riscv-vector-builtins-bases.cc (class vandn):
				Add new function_base for crypto vector.
	(class bitmanip): Ditto. 
	(class b_reverse):Ditto. 
	(class vwsll):   Ditto. 
	(class clmul):   Ditto. 
	(class vg_nhab):  Ditto. 
	(class crypto_vv):Ditto. 
	(class crypto_vi):Ditto. 
	(class vaeskf2_vsm3c):Ditto.
	(class vsm3me): Ditto.
	(BASE): Add BASE declaration for crypto vector.
	* config/riscv/riscv-vector-builtins-bases.h: Ditto.
	* config/riscv/riscv-vector-builtins-functions.def (REQUIRED_EXTENSIONS):
				Add crypto vector intrinsic definition.
	(vbrev): Ditto.
	(vclz): Ditto.
	(vctz): Ditto.
	(vwsll): Ditto.
	(vandn): Ditto.
	(vbrev8): Ditto.
	(vrev8): Ditto.
	(vrol): Ditto.
	(vror): Ditto.
	(vclmul): Ditto.
	(vclmulh): Ditto.
	(vghsh): Ditto.
	(vgmul): Ditto.
	(vaesef): Ditto.
	(vaesem): Ditto.
	(vaesdf): Ditto.
	(vaesdm): Ditto.
	(vaesz): Ditto.
	(vaeskf1): Ditto.
	(vaeskf2): Ditto.
	(vsha2ms): Ditto.
	(vsha2ch): Ditto.
	(vsha2cl): Ditto.
	(vsm4k): Ditto.
	(vsm4r): Ditto.
	(vsm3me): Ditto.
	(vsm3c): Ditto.
	* config/riscv/riscv-vector-builtins-shapes.cc (struct crypto_vv_def):
				Add new function_shape for crypto vector.
	(struct crypto_vi_def): Ditto.
	(struct crypto_vv_no_op_type_def): Ditto.
	(SHAPE): Add SHAPE declaration of crypto vector.
	* config/riscv/riscv-vector-builtins-shapes.h: Ditto.
	* config/riscv/riscv-vector-builtins-types.def (DEF_RVV_CRYPTO_SEW32_OPS):
				Add new data type for crypto vector.
	(DEF_RVV_CRYPTO_SEW64_OPS): Ditto.
	(vuint32mf2_t): Ditto.
	(vuint32m1_t): Ditto.
	(vuint32m2_t): Ditto.
	(vuint32m4_t): Ditto.
	(vuint32m8_t): Ditto.
	(vuint64m1_t): Ditto.
	(vuint64m2_t): Ditto.
	(vuint64m4_t): Ditto.
	(vuint64m8_t): Ditto.
	* config/riscv/riscv-vector-builtins.cc (DEF_RVV_CRYPTO_SEW32_OPS):
				Add new data struct for crypto vector.
	(DEF_RVV_CRYPTO_SEW64_OPS): Ditto.
	(registered_function::overloaded_hash): Processing size_t uimm for C overloaded func.
	* config/riscv/riscv-vector-builtins.def (vi): Add vi OP_TYPE.
---
 .../riscv/riscv-vector-builtins-bases.cc      | 264 +++++++++++++++++-
 .../riscv/riscv-vector-builtins-bases.h       |  28 ++
 .../riscv/riscv-vector-builtins-functions.def |  94 +++++++
 .../riscv/riscv-vector-builtins-shapes.cc     |  87 +++++-
 .../riscv/riscv-vector-builtins-shapes.h      |   4 +
 .../riscv/riscv-vector-builtins-types.def     |  25 ++
 gcc/config/riscv/riscv-vector-builtins.cc     | 133 ++++++++-
 gcc/config/riscv/riscv-vector-builtins.def    |   1 +
 8 files changed, 633 insertions(+), 3 deletions(-)
  

Patch

diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index d70468542ee..d12bb89f91c 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -2127,6 +2127,212 @@  public:
   }
 };
 
+/* Below implements are vector crypto */
+/* Implements vandn.[vv,vx] */
+class vandn : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+      {
+      case OP_TYPE_vv:
+        return e.use_exact_insn (code_for_pred_vandn (e.vector_mode ()));
+      case OP_TYPE_vx:
+        return e.use_exact_insn (code_for_pred_vandn_scalar (e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+      }
+  }
+};
+
+/* Implements vrol/vror/clz/ctz.  */
+template<rtx_code CODE>
+class bitmanip : public function_base
+{
+public:
+  bool apply_tail_policy_p () const override
+  {
+    return (CODE == CLZ || CODE == CTZ) ? false : true;
+  }
+  bool apply_mask_policy_p () const override
+  {
+    return (CODE == CLZ || CODE == CTZ) ? false : true;
+  }
+  bool has_merge_operand_p () const override
+  {
+    return (CODE == CLZ || CODE == CTZ) ? false : true;
+  }
+  
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+    {
+      case OP_TYPE_v:
+      case OP_TYPE_vv:
+        return e.use_exact_insn (code_for_pred_v (CODE, e.vector_mode ()));
+      case OP_TYPE_vx:
+        return e.use_exact_insn (code_for_pred_v_scalar (CODE, e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+    }
+  }
+};
+
+/* Implements vbrev/vbrev8/vrev8.  */
+template<int UNSPEC>
+class b_reverse : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+      return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
+  }
+};
+
+class vwsll : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+      {
+      case OP_TYPE_vv:
+        return e.use_exact_insn (code_for_pred_vwsll (e.vector_mode ()));
+      case OP_TYPE_vx:
+        return e.use_exact_insn (code_for_pred_vwsll_scalar (e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+      }
+  }
+};
+
+/* Implements clmul */
+template<int UNSPEC>
+class clmul : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+      {
+      case OP_TYPE_vv:
+        return e.use_exact_insn (
+                 code_for_pred_vclmul (UNSPEC, e.vector_mode ()));
+      case OP_TYPE_vx:
+        return e.use_exact_insn
+                 (code_for_pred_vclmul_scalar (UNSPEC, e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+      }
+  }
+};
+
+/* Implements vghsh/vsh2ms/vsha2c[hl]. */
+template<int UNSPEC>
+class vg_nhab : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+  bool has_merge_operand_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
+  }
+};
+
+/* Implements vgmul/vaes*. */
+template<int UNSPEC>
+class crypto_vv : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+  bool has_merge_operand_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    poly_uint64 nunits = 0U;
+    switch (e.op_info->op)
+    {
+      case OP_TYPE_vv:
+        if (UNSPEC == UNSPEC_VGMUL)
+          return e.use_exact_insn
+                   (code_for_pred_crypto_vv (UNSPEC, UNSPEC, e.vector_mode ()));
+        else
+          return e.use_exact_insn
+                   (code_for_pred_crypto_vv (UNSPEC + 1, UNSPEC + 1, e.vector_mode ()));
+      case OP_TYPE_vs:
+        /* Calculate the ratio between arg0 and arg1*/
+        gcc_assert (multiple_p (GET_MODE_BITSIZE (e.arg_mode (0)),
+                                GET_MODE_BITSIZE (e.arg_mode (1)), &nunits));
+        if (maybe_eq (nunits, 1U))
+          return e.use_exact_insn (code_for_pred_crypto_vvx1_scalar
+                                   (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
+        else if (maybe_eq (nunits, 2U))
+          return e.use_exact_insn (code_for_pred_crypto_vvx2_scalar
+                                   (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
+        else if (maybe_eq (nunits, 4U))
+          return e.use_exact_insn (code_for_pred_crypto_vvx4_scalar
+                                   (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
+        else if (maybe_eq (nunits, 8U))
+          return e.use_exact_insn (code_for_pred_crypto_vvx8_scalar
+                                   (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
+        else
+          return e.use_exact_insn (code_for_pred_crypto_vvx16_scalar
+                                   (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
+      default:
+        gcc_unreachable ();
+    }
+  }
+};
+
+/* Implements vaeskf1/vsm4k. */
+template<int UNSPEC>
+class crypto_vi : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn
+             (code_for_pred_crypto_vi_scalar (UNSPEC, e.vector_mode ()));
+  }
+};
+
+/* Implements vaeskf2/vsm3c. */
+template<int UNSPEC>
+class vaeskf2_vsm3c : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+  bool has_merge_operand_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn
+             (code_for_pred_vi_nomaskedoff_scalar (UNSPEC, e.vector_mode ()));
+  }
+};
+
+/* Implements vsm3me. */
+class vsm3me : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (code_for_pred_vsm3me (e.vector_mode ()));
+  }
+};
+
 static CONSTEXPR const vsetvl<false> vsetvl_obj;
 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -2384,6 +2590,35 @@  static CONSTEXPR const seg_indexed_store<UNSPEC_UNORDERED> vsuxseg_obj;
 static CONSTEXPR const seg_indexed_store<UNSPEC_ORDERED> vsoxseg_obj;
 static CONSTEXPR const vlsegff vlsegff_obj;
 
+/* Crypto Vector */
+static CONSTEXPR const vandn vandn_obj;
+static CONSTEXPR const bitmanip<ROTATE>   vrol_obj;
+static CONSTEXPR const bitmanip<ROTATERT> vror_obj;
+static CONSTEXPR const b_reverse<UNSPEC_VBREV>   vbrev_obj;
+static CONSTEXPR const b_reverse<UNSPEC_VBREV8>  vbrev8_obj;
+static CONSTEXPR const b_reverse<UNSPEC_VREV8>   vrev8_obj;
+static CONSTEXPR const bitmanip<CLZ> vclz_obj;
+static CONSTEXPR const bitmanip<CTZ> vctz_obj;
+static CONSTEXPR const vwsll vwsll_obj;
+static CONSTEXPR const clmul<UNSPEC_VCLMUL>      vclmul_obj;
+static CONSTEXPR const clmul<UNSPEC_VCLMULH>     vclmulh_obj;
+static CONSTEXPR const vg_nhab<UNSPEC_VGHSH>     vghsh_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VGMUL>   vgmul_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VAESEF>  vaesef_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VAESEM>  vaesem_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VAESDF>  vaesdf_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VAESDM>  vaesdm_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VAESZ>   vaesz_obj;
+static CONSTEXPR const crypto_vi<UNSPEC_VAESKF1> vaeskf1_obj;
+static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VAESKF2> vaeskf2_obj;
+static CONSTEXPR const vg_nhab<UNSPEC_VSHA2MS>   vsha2ms_obj;
+static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CH>   vsha2ch_obj;
+static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CL>   vsha2cl_obj;
+static CONSTEXPR const crypto_vi<UNSPEC_VSM4K>   vsm4k_obj;
+static CONSTEXPR const crypto_vv<UNSPEC_VSM4R>   vsm4r_obj;
+static CONSTEXPR const vsm3me vsm3me_obj;
+static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VSM3C>   vsm3c_obj;
+
 /* Declare the function base NAME, pointing it to an instance
    of class <NAME>_obj.  */
 #define BASE(NAME) \
@@ -2645,5 +2880,32 @@  BASE (vloxseg)
 BASE (vsuxseg)
 BASE (vsoxseg)
 BASE (vlsegff)
-
+/* Crypto vector */
+BASE (vandn)
+BASE (vbrev)
+BASE (vbrev8)
+BASE (vrev8)
+BASE (vclz)
+BASE (vctz)
+BASE (vrol)
+BASE (vror)
+BASE (vwsll)
+BASE (vclmul)
+BASE (vclmulh)
+BASE (vghsh)
+BASE (vgmul)
+BASE (vaesef)
+BASE (vaesem)
+BASE (vaesdf)
+BASE (vaesdm)
+BASE (vaesz)
+BASE (vaeskf1)
+BASE (vaeskf2)
+BASE (vsha2ms)
+BASE (vsha2ch)
+BASE (vsha2cl)
+BASE (vsm4k)
+BASE (vsm4r)
+BASE (vsm3me)
+BASE (vsm3c)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h
index 131041ea66f..51b53a3d4a9 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.h
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.h
@@ -280,6 +280,34 @@  extern const function_base *const vloxseg;
 extern const function_base *const vsuxseg;
 extern const function_base *const vsoxseg;
 extern const function_base *const vlsegff;
+/* Below function_base are Vectro Crypto*/
+extern const function_base *const vandn;
+extern const function_base *const vbrev;
+extern const function_base *const vbrev8;
+extern const function_base *const vrev8;
+extern const function_base *const vclz;
+extern const function_base *const vctz;
+extern const function_base *const vrol;
+extern const function_base *const vror;
+extern const function_base *const vwsll;
+extern const function_base *const vclmul;
+extern const function_base *const vclmulh;
+extern const function_base *const vghsh;
+extern const function_base *const vgmul;
+extern const function_base *const vaesef;
+extern const function_base *const vaesem;
+extern const function_base *const vaesdf;
+extern const function_base *const vaesdm;
+extern const function_base *const vaesz;
+extern const function_base *const vaeskf1;
+extern const function_base *const vaeskf2;
+extern const function_base *const vsha2ms;
+extern const function_base *const vsha2ch;
+extern const function_base *const vsha2cl;
+extern const function_base *const vsm4k;
+extern const function_base *const vsm4r;
+extern const function_base *const vsm3me;
+extern const function_base *const vsm3c;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.def b/gcc/config/riscv/riscv-vector-builtins-functions.def
index 03421d5bc10..229764253a0 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.def
@@ -653,4 +653,98 @@  DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_p
 DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
 #undef REQUIRED_EXTENSIONS
 
+/* Definiation of crypto vector intrinsic functions */
+// ZVBB and ZVKB
+#define REQUIRED_EXTENSIONS ZVBB_EXT
+DEF_RVV_FUNCTION (vbrev,  alu, full_preds, u_vv_ops)
+DEF_RVV_FUNCTION (vclz,   alu, none_m_preds, u_vv_ops)
+DEF_RVV_FUNCTION (vctz,   alu, none_m_preds, u_vv_ops)
+DEF_RVV_FUNCTION (vwsll,  alu, full_preds, u_wvv_ops)
+DEF_RVV_FUNCTION (vwsll,  alu, full_preds, u_shift_wvx_ops)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS ZVBB_OR_ZVKB_EXT
+DEF_RVV_FUNCTION (vandn,  alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vandn,  alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vbrev8, alu, full_preds, u_vv_ops)
+DEF_RVV_FUNCTION (vrev8,  alu, full_preds, u_vv_ops)
+DEF_RVV_FUNCTION (vrol,   alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vror,   alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vror,   alu, full_preds, u_shift_vvx_ops)
+DEF_RVV_FUNCTION (vrol,   alu, full_preds, u_shift_vvx_ops)
+#undef REQUIRED_EXTENSIONS
+//ZVBC
+#define REQUIRED_EXTENSIONS ZVBC_EXT
+DEF_RVV_FUNCTION (vclmul,  alu, full_preds, u_vvv_crypto_sew64_ops)
+DEF_RVV_FUNCTION (vclmul,  alu, full_preds, u_vvx_crypto_sew64_ops)
+DEF_RVV_FUNCTION (vclmulh, alu, full_preds, u_vvv_crypto_sew64_ops)
+DEF_RVV_FUNCTION (vclmulh, alu, full_preds, u_vvx_crypto_sew64_ops)
+#undef REQUIRED_EXTENSIONS
+//ZVKG
+#define REQUIRED_EXTENSIONS ZVKG_EXT
+DEF_RVV_FUNCTION(vghsh, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION(vgmul, no_mask_policy, none_tu_preds, u_vvv_crypto_sew32_ops)
+#undef REQUIRED_EXTENSIONS
+//ZVKNED
+#define REQUIRED_EXTENSIONS ZVKNED_EXT
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vaesef,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vaesem,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vaesdf,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vaesdm,   crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+DEF_RVV_FUNCTION (vaesz,    crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaesz,    crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vaesz,    crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vaesz,    crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vaesz,    crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+DEF_RVV_FUNCTION (vaeskf1,  crypto_vi, none_tu_preds, u_vv_size_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vaeskf2,  crypto_vi, none_tu_preds, u_vvv_size_crypto_sew32_ops)
+#undef REQUIRED_EXTENSIONS
+//ZVKNHA
+//ZVKNHA and ZVKNHB
+#define REQUIRED_EXTENSIONS ZVKNHA_OR_ZVKNHB_EXT
+DEF_RVV_FUNCTION (vsha2ms,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsha2ch,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsha2cl,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS ZVKNHB_EXT
+DEF_RVV_FUNCTION (vsha2ms,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
+DEF_RVV_FUNCTION (vsha2ch,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
+DEF_RVV_FUNCTION (vsha2cl,  no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
+#undef REQUIRED_EXTENSIONS
+//Zvksed
+#define REQUIRED_EXTENSIONS ZVKSED_EXT
+DEF_RVV_FUNCTION (vsm4k, crypto_vi, none_tu_preds, u_vv_size_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
+DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
+#undef REQUIRED_EXTENSIONS
+//Zvksh
+#define REQUIRED_EXTENSIONS ZVKSH_EXT
+DEF_RVV_FUNCTION (vsm3me, no_mask_policy,    none_tu_preds, u_vvv_crypto_sew32_ops)
+DEF_RVV_FUNCTION (vsm3c,  crypto_vi,    none_tu_preds, u_vvv_size_crypto_sew32_ops)
+#undef REQUIRED_EXTENSIONS
 #undef DEF_RVV_FUNCTION
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index 4a754e0228f..6ba42e986ad 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -984,6 +984,89 @@  struct seg_fault_load_def : public build_base
   }
 };
 
+/* vsm4r/vaes* class.  */
+struct crypto_vv_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                  bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+    b.append_base_name (instance.base_name);
+    b.append_name (operand_suffixes[instance.op_info->op]);
+
+    if (!overloaded_p)
+    {
+      if (instance.op_info->op == OP_TYPE_vv)
+        b.append_name (type_suffixes[instance.type.index].vector);
+      else
+      {
+        vector_type_index arg0_type_idx
+          = instance.op_info->args[1].get_function_type_index
+            (instance.type.index);
+        b.append_name (type_suffixes[arg0_type_idx].vector);
+        vector_type_index ret_type_idx
+          = instance.op_info->ret.get_function_type_index
+            (instance.type.index);
+        b.append_name (type_suffixes[ret_type_idx].vector);
+      }
+    }
+
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
+/* vaeskf1/vaeskf2/vsm4k/vsm3c class.  */
+struct crypto_vi_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                  bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+    b.append_base_name (instance.base_name);
+    if (!overloaded_p)
+    {
+      b.append_name (operand_suffixes[instance.op_info->op]);
+      b.append_name (type_suffixes[instance.type.index].vector);
+    }
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
+/* vaesz class.  */
+struct crypto_vv_no_op_type_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                  bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+    b.append_base_name (instance.base_name);
+      
+    if (!overloaded_p)
+    {
+      b.append_name (operand_suffixes[instance.op_info->op]);
+      vector_type_index arg0_type_idx
+        = instance.op_info->args[1].get_function_type_index
+          (instance.type.index);
+      b.append_name (type_suffixes[arg0_type_idx].vector);
+      vector_type_index ret_type_idx
+        = instance.op_info->ret.get_function_type_index
+          (instance.type.index);
+      b.append_name (type_suffixes[ret_type_idx].vector);
+    }
+
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
 SHAPE(vsetvl, vsetvl)
 SHAPE(vsetvl, vsetvlmax)
 SHAPE(loadstore, loadstore)
@@ -1012,5 +1095,7 @@  SHAPE(vlenb, vlenb)
 SHAPE(seg_loadstore, seg_loadstore)
 SHAPE(seg_indexed_loadstore, seg_indexed_loadstore)
 SHAPE(seg_fault_load, seg_fault_load)
-
+SHAPE(crypto_vv, crypto_vv)
+SHAPE(crypto_vi, crypto_vi)
+SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h
index df9884bb572..189bcbdeff3 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.h
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h
@@ -52,6 +52,10 @@  extern const function_shape *const vlenb;
 extern const function_shape *const seg_loadstore;
 extern const function_shape *const seg_indexed_loadstore;
 extern const function_shape *const seg_fault_load;
+/* Below function_shape are Vectro Crypto*/
+extern const function_shape *const crypto_vv;
+extern const function_shape *const crypto_vi;
+extern const function_shape *const crypto_vv_no_op_type;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def
index 6aa45ae9a7e..796d89dbbf4 100644
--- a/gcc/config/riscv/riscv-vector-builtins-types.def
+++ b/gcc/config/riscv/riscv-vector-builtins-types.def
@@ -339,6 +339,18 @@  along with GCC; see the file COPYING3. If not see
 #define DEF_RVV_TUPLE_OPS(TYPE, REQUIRE)
 #endif
 
+/* Use "DEF_RVV_CRYPTO_SEW32_OPS" macro include all SEW=32 types
+   which will be iterated and registered as intrinsic functions.  */
+#ifndef DEF_RVV_CRYPTO_SEW32_OPS
+#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_CRYPTO_SEW64_OPS" macro include all SEW=64 types
+   which will be iterated and registered as intrinsic functions.  */
+#ifndef DEF_RVV_CRYPTO_SEW64_OPS
+#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE)
+#endif
+
 DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
 DEF_RVV_I_OPS (vint8mf4_t, 0)
 DEF_RVV_I_OPS (vint8mf2_t, 0)
@@ -1355,6 +1367,17 @@  DEF_RVV_TUPLE_OPS (vfloat64m2x3_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_TUPLE_OPS (vfloat64m2x4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
 
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m1_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m2_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m4_t, 0)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32m8_t, 0)
+
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_CRYPTO_SEW64_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+
 #undef DEF_RVV_I_OPS
 #undef DEF_RVV_U_OPS
 #undef DEF_RVV_F_OPS
@@ -1406,3 +1429,5 @@  DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
 #undef DEF_RVV_LMUL2_OPS
 #undef DEF_RVV_LMUL4_OPS
 #undef DEF_RVV_TUPLE_OPS
+#undef DEF_RVV_CRYPTO_SEW32_OPS
+#undef DEF_RVV_CRYPTO_SEW64_OPS
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index 4e2c66c2de7..8087c3faf7e 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -521,6 +521,19 @@  static const rvv_type_info tuple_ops[] = {
 #include "riscv-vector-builtins-types.def"
   {NUM_VECTOR_TYPES, 0}};
 
+/* Below types will be registered for vector-crypto intrinsic functions*/
+/* A list of sew32 will be registered for vector-crypto intrinsic functions.  */
+static const rvv_type_info crypto_sew32_ops[] = {
+#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+  {NUM_VECTOR_TYPES, 0}};
+
+/* A list of sew64 will be registered for vector-crypto intrinsic functions.  */
+static const rvv_type_info crypto_sew64_ops[] = {
+#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+  {NUM_VECTOR_TYPES, 0}};
+
 static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
   = rvv_arg_type_info (NUM_BASE_TYPES);
 
@@ -754,6 +767,11 @@  static CONSTEXPR const rvv_arg_type_info v_size_args[]
   = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_size),
      rvv_arg_type_info_end};
 
+/* A list of args for vector_type func (double demote_type, size_t) function.  */
+static CONSTEXPR const rvv_arg_type_info wv_size_args[]
+  = {rvv_arg_type_info (RVV_BASE_double_trunc_vector),
+    rvv_arg_type_info (RVV_BASE_size),rvv_arg_type_info_end};
+
 /* A list of args for vector_type func (vector_type, vector_type, size)
  * function.  */
 static CONSTEXPR const rvv_arg_type_info vv_size_args[]
@@ -1044,6 +1062,14 @@  static CONSTEXPR const rvv_op_info u_v_ops
      rvv_arg_type_info (RVV_BASE_vector), /* Return type */
      end_args /* Args */};
 
+/* A static operand information for vector_type func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_vv_ops
+  = {u_ops,					/* Types */
+     OP_TYPE_v,					/* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     v_args /* Args */};
+
 /* A static operand information for unsigned long func (vector_type)
  * function registration. */
 static CONSTEXPR const rvv_op_info b_ulong_m_ops
@@ -2174,6 +2200,14 @@  static CONSTEXPR const rvv_op_info u_wvv_ops
      rvv_arg_type_info (RVV_BASE_vector), /* Return type */
      wvv_args /* Args */};
 
+/* A static operand information for vector_type func (double demote type, size type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_shift_wvx_ops
+  = {wextu_ops,				  /* Types */
+     OP_TYPE_vx,			  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     wv_size_args /* Args */};
+
 /* A static operand information for vector_type func (double demote type, double
  * demote scalar_type) function registration. */
 static CONSTEXPR const rvv_op_info i_wvx_ops
@@ -2604,6 +2638,101 @@  static CONSTEXPR const rvv_op_info all_v_vcreate_lmul4_x2_ops
      rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
      ext_vcreate_args /* Args */};
 
+/* A static operand information for vector_type func (vector_type).
+   Some ins just supports SEW=32, such as crypto vectol Zvkg extension.
+ * function registration.  */
+static CONSTEXPR const rvv_arg_type_info vs_lmul_x2_args[]
+  = {rvv_arg_type_info (RVV_BASE_vlmul_ext_x2),
+     rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+static CONSTEXPR const rvv_arg_type_info vs_lmul_x4_args[]
+  = {rvv_arg_type_info (RVV_BASE_vlmul_ext_x4),
+     rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+static CONSTEXPR const rvv_arg_type_info vs_lmul_x8_args[]
+  = {rvv_arg_type_info (RVV_BASE_vlmul_ext_x8),
+     rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+static CONSTEXPR const rvv_arg_type_info vs_lmul_x16_args[]
+  = {rvv_arg_type_info (RVV_BASE_vlmul_ext_x16),
+     rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+static CONSTEXPR const rvv_op_info u_vvv_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvv_size_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vi,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_size_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vv_size_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vi,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     v_size_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vs,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x2_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vs,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
+     vs_lmul_x2_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x4_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vs,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vlmul_ext_x4), /* Return type */
+     vs_lmul_x4_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x8_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vs,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vlmul_ext_x8), /* Return type */
+     vs_lmul_x8_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x16_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vs,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vlmul_ext_x16), /* Return type */
+     vs_lmul_x16_args /* Args */};
+
+/* A static operand information for vector_type func (vector_type).
+   Some ins just supports SEW=64, such as crypto vectol Zvbc extension
+   vclmul.vv, vclmul.vx.
+ * function registration.  */
+static CONSTEXPR const rvv_op_info u_vvv_crypto_sew64_ops
+  = {crypto_sew64_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvx_crypto_sew64_ops
+  = {crypto_sew64_ops,			   /* Types */
+     OP_TYPE_vx,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vx_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops
+  = {crypto_sew64_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vvv_args /* Args */};
+
 /* A list of all RVV base function types.  */
 static CONSTEXPR const function_type_info function_types[] = {
 #define DEF_RVV_TYPE_INDEX(                                                    \
@@ -4176,7 +4305,9 @@  registered_function::overloaded_hash (const vec<tree, va_gc> &arglist)
        __riscv_vset(vint8m2_t dest, size_t index, vint8m1_t value); The reason
        is the same as above. */
       if ((instance.base == bases::vget && (i == (len - 1)))
-	  || (instance.base == bases::vset && (i == (len - 2))))
+	  || ((instance.base == bases::vset
+               || instance.shape == shapes::crypto_vi)
+             && (i == (len - 2))))
 	argument_types.safe_push (size_type_node);
       /* Vector fixed-point arithmetic instructions requiring argument vxrm.
 	     For example: vuint32m4_t __riscv_vaaddu(vuint32m4_t vs2,
diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def
index 6661629aad8..0c3ee3b2986 100644
--- a/gcc/config/riscv/riscv-vector-builtins.def
+++ b/gcc/config/riscv/riscv-vector-builtins.def
@@ -558,6 +558,7 @@  DEF_RVV_TYPE (vfloat64m8_t, 17, __rvv_float64m8_t, double, RVVM8DF, _f64m8,
 
 DEF_RVV_OP_TYPE (vv)
 DEF_RVV_OP_TYPE (vx)
+DEF_RVV_OP_TYPE (vi)
 DEF_RVV_OP_TYPE (v)
 DEF_RVV_OP_TYPE (wv)
 DEF_RVV_OP_TYPE (wx)