RISC-V: Add integer compare C/C++ intrinsic support

Message ID 20230213074113.266716-1-juzhe.zhong@rivai.ai
State Accepted
Headers
Series RISC-V: Add integer compare C/C++ intrinsic support |

Checks

Context Check Description
snail/gcc-patch-check success Github commit url

Commit Message

juzhe.zhong@rivai.ai Feb. 13, 2023, 7:41 a.m. UTC
  From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

gcc/ChangeLog:

        * config/riscv/predicates.md (vector_mask_operand): Refine the codes.
        (vector_all_trues_mask_operand): New predicate.
        (vector_undef_operand): New predicate.
        (ltge_operator): New predicate.
        (comparison_except_ltge_operator): New predicate.
        (comparison_except_eqge_operator): New predicate.
        (ge_operator): New predicate.
        * config/riscv/riscv-v.cc (has_vi_variant_p): Add compare support.
        * config/riscv/riscv-vector-builtins-bases.cc (class icmp): New class.
        (BASE): Ditto.
        * config/riscv/riscv-vector-builtins-bases.h: Ditto.
        * config/riscv/riscv-vector-builtins-functions.def (vmseq): Ditto.
        (vmsne): Ditto.
        (vmslt): Ditto.
        (vmsgt): Ditto.
        (vmsle): Ditto.
        (vmsge): Ditto.
        (vmsltu): Ditto.
        (vmsgtu): Ditto.
        (vmsleu): Ditto.
        (vmsgeu): Ditto.
        * config/riscv/riscv-vector-builtins-shapes.cc (struct return_mask_def): Adjust for compare support.
        * config/riscv/riscv-vector-builtins.cc (function_expander::use_compare_insn): New function.
        * config/riscv/riscv-vector-builtins.h (function_expander::add_integer_operand): Ditto.
        * config/riscv/riscv.cc (riscv_print_operand): Add compare support.
        * config/riscv/riscv.md: Add vector min/max attributes.
        * config/riscv/vector-iterators.md (xnor): New iterator.
        * config/riscv/vector.md (@pred_cmp<mode>): New pattern.
        (*pred_cmp<mode>): Ditto.
        (*pred_cmp<mode>_narrow): Ditto.
        (@pred_ltge<mode>): Ditto.
        (*pred_ltge<mode>): Ditto.
        (*pred_ltge<mode>_narrow): Ditto.
        (@pred_cmp<mode>_scalar): Ditto.
        (*pred_cmp<mode>_scalar): Ditto.
        (*pred_cmp<mode>_scalar_narrow): Ditto.
        (@pred_eqne<mode>_scalar): Ditto.
        (*pred_eqne<mode>_scalar): Ditto.
        (*pred_eqne<mode>_scalar_narrow): Ditto.
        (*pred_cmp<mode>_extended_scalar): Ditto.
        (*pred_cmp<mode>_extended_scalar_narrow): Ditto.
        (*pred_eqne<mode>_extended_scalar): Ditto.
        (*pred_eqne<mode>_extended_scalar_narrow): Ditto.
        (@pred_ge<mode>_scalar): Ditto.
        (@pred_<optab><mode>): Ditto.
        (@pred_n<optab><mode>): Ditto.
        (@pred_<optab>n<mode>): Ditto.
        (@pred_not<mode>): Ditto.

---
 gcc/config/riscv/predicates.md                |  25 +-
 gcc/config/riscv/riscv-v.cc                   |  32 +-
 .../riscv/riscv-vector-builtins-bases.cc      |  54 ++
 .../riscv/riscv-vector-builtins-bases.h       |  10 +
 .../riscv/riscv-vector-builtins-functions.def |  20 +
 .../riscv/riscv-vector-builtins-shapes.cc     |   2 +
 gcc/config/riscv/riscv-vector-builtins.cc     |  77 ++
 gcc/config/riscv/riscv-vector-builtins.h      |   9 +
 gcc/config/riscv/riscv.cc                     |   7 +-
 gcc/config/riscv/riscv.md                     |  10 +-
 gcc/config/riscv/vector-iterators.md          |  20 +-
 gcc/config/riscv/vector.md                    | 774 +++++++++++++++++-
 12 files changed, 999 insertions(+), 41 deletions(-)
  

Patch

diff --git a/gcc/config/riscv/predicates.md b/gcc/config/riscv/predicates.md
index fe2c5ba3c5c..bbbf523d588 100644
--- a/gcc/config/riscv/predicates.md
+++ b/gcc/config/riscv/predicates.md
@@ -283,14 +283,21 @@ 
 		|| satisfies_constraint_vi (op)
 		|| satisfies_constraint_Wc0 (op)"))))
 
-(define_predicate "vector_mask_operand"
+(define_predicate "vector_all_trues_mask_operand"
   (ior (match_operand 0 "register_operand")
        (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
 
+(define_predicate "vector_mask_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "vector_all_trues_mask_operand")))
+
+(define_predicate "vector_undef_operand"
+  (match_test "GET_CODE (op) == UNSPEC
+		    && (XINT (op, 1) == UNSPEC_VUNDEF)"))
+
 (define_predicate "vector_merge_operand"
   (ior (match_operand 0 "register_operand")
-       (match_test "GET_CODE (op) == UNSPEC
-		    && (XINT (op, 1) == UNSPEC_VUNDEF)")))
+       (match_operand 0 "vector_undef_operand")))
 
 (define_predicate "vector_arith_operand"
   (ior (match_operand 0 "register_operand")
@@ -307,6 +314,18 @@ 
        (and (match_code "const_vector")
             (match_test "riscv_vector::const_vec_all_same_in_range_p (op, 0, 31)"))))
 
+(define_predicate "ltge_operator"
+  (match_code "lt,ltu,ge,geu"))
+
+(define_predicate "comparison_except_ltge_operator"
+  (match_code "eq,ne,le,leu,gt,gtu"))
+
+(define_predicate "comparison_except_eqge_operator"
+  (match_code "le,leu,gt,gtu,lt,ltu"))
+
+(define_predicate "ge_operator"
+  (match_code "ge,geu"))
+
 ;; pmode_reg_or_uimm5_operand can be used by vsll.vx/vsrl.vx/vsra.vx instructions.
 ;; Since it has the same predicate with vector_length_operand which allows register
 ;; or immediate (0 ~ 31), we define this predicate same as vector_length_operand here.
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 83cb1f83606..cc26888d58b 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -427,11 +427,33 @@  neg_simm5_p (rtx x)
 bool
 has_vi_variant_p (rtx_code code, rtx x)
 {
-  if (code != PLUS && code != MINUS && code != AND && code != IOR && code != XOR
-      && code != SS_PLUS && code != SS_MINUS && code != US_PLUS
-      && code != US_MINUS)
-    return false;
-  return simm5_p (x);
+  switch (code)
+    {
+    case PLUS:
+    case MINUS:
+    case AND:
+    case IOR:
+    case XOR:
+    case SS_PLUS:
+    case SS_MINUS:
+    case US_PLUS:
+    case US_MINUS:
+    case EQ:
+    case NE:
+    case LE:
+    case LEU:
+    case GT:
+    case GTU:
+      return simm5_p (x);
+
+    case LT:
+    case LTU:
+    case GE:
+    case GEU:
+      return neg_simm5_p (x);
+    default:
+      return false;
+    }
 }
 
 } // namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index 6eb6dab3149..4f3531d4486 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -505,6 +505,40 @@  public:
   }
 };
 
+/* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge.  */
+template<rtx_code CODE>
+class icmp : public function_base
+{
+public:
+  rtx expand (function_expander &e) const override
+  {
+    switch (e.op_info->op)
+      {
+	case OP_TYPE_vx: {
+	  if (CODE == GE || CODE == GEU)
+	    return e.use_compare_insn (CODE, code_for_pred_ge_scalar (
+					       e.vector_mode ()));
+	  else if (CODE == EQ || CODE == NE)
+	    return e.use_compare_insn (CODE, code_for_pred_eqne_scalar (
+					       e.vector_mode ()));
+	  else
+	    return e.use_compare_insn (CODE, code_for_pred_cmp_scalar (
+					       e.vector_mode ()));
+	}
+	case OP_TYPE_vv: {
+	  if (CODE == LT || CODE == LTU || CODE == GE || CODE == GEU)
+	    return e.use_compare_insn (CODE,
+				       code_for_pred_ltge (e.vector_mode ()));
+	  else
+	    return e.use_compare_insn (CODE,
+				       code_for_pred_cmp (e.vector_mode ()));
+	}
+      default:
+	gcc_unreachable ();
+      }
+  }
+};
+
 static CONSTEXPR const vsetvl<false> vsetvl_obj;
 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -572,6 +606,16 @@  static CONSTEXPR const vnshift<ASHIFTRT> vnsra_obj;
 static CONSTEXPR const vncvt_x vncvt_x_obj;
 static CONSTEXPR const vmerge vmerge_obj;
 static CONSTEXPR const vmv_v vmv_v_obj;
+static CONSTEXPR const icmp<EQ> vmseq_obj;
+static CONSTEXPR const icmp<NE> vmsne_obj;
+static CONSTEXPR const icmp<LT> vmslt_obj;
+static CONSTEXPR const icmp<GT> vmsgt_obj;
+static CONSTEXPR const icmp<LE> vmsle_obj;
+static CONSTEXPR const icmp<GE> vmsge_obj;
+static CONSTEXPR const icmp<LTU> vmsltu_obj;
+static CONSTEXPR const icmp<GTU> vmsgtu_obj;
+static CONSTEXPR const icmp<LEU> vmsleu_obj;
+static CONSTEXPR const icmp<GEU> vmsgeu_obj;
 static CONSTEXPR const binop<SS_PLUS> vsadd_obj;
 static CONSTEXPR const binop<SS_MINUS> vssub_obj;
 static CONSTEXPR const binop<US_PLUS> vsaddu_obj;
@@ -658,6 +702,16 @@  BASE (vnsra)
 BASE (vncvt_x)
 BASE (vmerge)
 BASE (vmv_v)
+BASE (vmseq)
+BASE (vmsne)
+BASE (vmslt)
+BASE (vmsgt)
+BASE (vmsle)
+BASE (vmsge)
+BASE (vmsltu)
+BASE (vmsgtu)
+BASE (vmsleu)
+BASE (vmsgeu)
 BASE (vsadd)
 BASE (vssub)
 BASE (vsaddu)
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h
index dcc706ea805..6233b4122f8 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.h
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.h
@@ -91,6 +91,16 @@  extern const function_base *const vnsra;
 extern const function_base *const vncvt_x;
 extern const function_base *const vmerge;
 extern const function_base *const vmv_v;
+extern const function_base *const vmseq;
+extern const function_base *const vmsne;
+extern const function_base *const vmslt;
+extern const function_base *const vmsgt;
+extern const function_base *const vmsle;
+extern const function_base *const vmsge;
+extern const function_base *const vmsltu;
+extern const function_base *const vmsgtu;
+extern const function_base *const vmsleu;
+extern const function_base *const vmsgeu;
 extern const function_base *const vsadd;
 extern const function_base *const vssub;
 extern const function_base *const vsaddu;
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.def b/gcc/config/riscv/riscv-vector-builtins-functions.def
index 66fa63530f3..f61f48d36a0 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.def
@@ -157,6 +157,26 @@  DEF_RVV_FUNCTION (vmerge, no_mask_policy, tu_preds, all_vvvm_ops)
 DEF_RVV_FUNCTION (vmerge, no_mask_policy, tu_preds, iu_vvxm_ops)
 DEF_RVV_FUNCTION (vmv_v, move, tu_preds, all_v_ops)
 DEF_RVV_FUNCTION (vmv_v, move, tu_preds, iu_x_ops)
+DEF_RVV_FUNCTION (vmseq, return_mask, none_m_mu_preds, iu_mvv_ops)
+DEF_RVV_FUNCTION (vmsne, return_mask, none_m_mu_preds, iu_mvv_ops)
+DEF_RVV_FUNCTION (vmslt, return_mask, none_m_mu_preds, i_mvv_ops)
+DEF_RVV_FUNCTION (vmsgt, return_mask, none_m_mu_preds, i_mvv_ops)
+DEF_RVV_FUNCTION (vmsle, return_mask, none_m_mu_preds, i_mvv_ops)
+DEF_RVV_FUNCTION (vmsge, return_mask, none_m_mu_preds, i_mvv_ops)
+DEF_RVV_FUNCTION (vmsltu, return_mask, none_m_mu_preds, u_mvv_ops)
+DEF_RVV_FUNCTION (vmsgtu, return_mask, none_m_mu_preds, u_mvv_ops)
+DEF_RVV_FUNCTION (vmsleu, return_mask, none_m_mu_preds, u_mvv_ops)
+DEF_RVV_FUNCTION (vmsgeu, return_mask, none_m_mu_preds, u_mvv_ops)
+DEF_RVV_FUNCTION (vmseq, return_mask, none_m_mu_preds, iu_mvx_ops)
+DEF_RVV_FUNCTION (vmsne, return_mask, none_m_mu_preds, iu_mvx_ops)
+DEF_RVV_FUNCTION (vmslt, return_mask, none_m_mu_preds, i_mvx_ops)
+DEF_RVV_FUNCTION (vmsgt, return_mask, none_m_mu_preds, i_mvx_ops)
+DEF_RVV_FUNCTION (vmsle, return_mask, none_m_mu_preds, i_mvx_ops)
+DEF_RVV_FUNCTION (vmsge, return_mask, none_m_mu_preds, i_mvx_ops)
+DEF_RVV_FUNCTION (vmsltu, return_mask, none_m_mu_preds, u_mvx_ops)
+DEF_RVV_FUNCTION (vmsgtu, return_mask, none_m_mu_preds, u_mvx_ops)
+DEF_RVV_FUNCTION (vmsleu, return_mask, none_m_mu_preds, u_mvx_ops)
+DEF_RVV_FUNCTION (vmsgeu, return_mask, none_m_mu_preds, u_mvx_ops)
 /* 12. Vector Fixed-Point Arithmetic Instructions. */
 DEF_RVV_FUNCTION (vsadd, alu, full_preds, i_vvv_ops)
 DEF_RVV_FUNCTION (vssub, alu, full_preds, i_vvv_ops)
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index e1d8f4f13f0..2836170323a 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -288,6 +288,8 @@  struct return_mask_def : public build_base
 	b.append_name (type_suffixes[ret_type_idx].vector);
       }
 
+    if (overloaded_p && instance.pred == PRED_TYPE_m)
+      return b.finish_name ();
     b.append_name (predication_suffixes[instance.pred]);
     return b.finish_name ();
   }
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index 486b8b722b5..3d842c6e3a3 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -438,6 +438,10 @@  static CONSTEXPR const predication_type_index tu_preds[]
 static CONSTEXPR const predication_type_index none_m_preds[]
   = {PRED_TYPE_none, PRED_TYPE_m, NUM_PRED_TYPES};
 
+/* vop/vop_m/vop_mu will be registered.  */
+static CONSTEXPR const predication_type_index none_m_mu_preds[]
+  = {PRED_TYPE_none, PRED_TYPE_m, PRED_TYPE_mu, NUM_PRED_TYPES};
+
 /* A static operand information for size_t func (void) function registration. */
 static CONSTEXPR const rvv_op_info i_none_size_void_ops
   = {i_ops,				/* Types */
@@ -621,6 +625,22 @@  static CONSTEXPR const rvv_op_info iu_mvv_ops
      rvv_arg_type_info (RVV_BASE_mask), /* Return type */
      vv_args /* Args */};
 
+/* A static operand information for mask_type func (vector_type, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i_mvv_ops
+  = {i_ops,				/* Types */
+     OP_TYPE_vv,			/* Suffix */
+     rvv_arg_type_info (RVV_BASE_mask), /* Return type */
+     vv_args /* Args */};
+
+/* A static operand information for mask_type func (vector_type, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_mvv_ops
+  = {u_ops,				/* Types */
+     OP_TYPE_vv,			/* Suffix */
+     rvv_arg_type_info (RVV_BASE_mask), /* Return type */
+     vv_args /* Args */};
+
 /* A static operand information for mask_type func (vector_type, scalar_type)
  * function registration. */
 static CONSTEXPR const rvv_op_info iu_mvx_ops
@@ -629,6 +649,22 @@  static CONSTEXPR const rvv_op_info iu_mvx_ops
      rvv_arg_type_info (RVV_BASE_mask), /* Return type */
      vx_args /* Args */};
 
+/* A static operand information for mask_type func (vector_type, scalar_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i_mvx_ops
+  = {i_ops,				/* Types */
+     OP_TYPE_vx,			/* Suffix */
+     rvv_arg_type_info (RVV_BASE_mask), /* Return type */
+     vx_args /* Args */};
+
+/* A static operand information for mask_type func (vector_type, scalar_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_mvx_ops
+  = {u_ops,				/* Types */
+     OP_TYPE_vx,			/* Suffix */
+     rvv_arg_type_info (RVV_BASE_mask), /* Return type */
+     vx_args /* Args */};
+
 /* A static operand information for vector_type func (vector_type, vector_type)
  * function registration. */
 static CONSTEXPR const rvv_op_info i_vvv_ops
@@ -1902,6 +1938,47 @@  function_expander::use_contiguous_store_insn (insn_code icode)
   return generate_insn (icode);
 }
 
+/* Implement the call using instruction ICODE, with a 1:1 mapping between
+   arguments and input operands.  */
+rtx
+function_expander::use_compare_insn (rtx_code rcode, insn_code icode)
+{
+  machine_mode mode = TYPE_MODE (builtin_types[type.index].vector);
+  machine_mode mask_mode = TYPE_MODE (TREE_TYPE (exp));
+
+  /* Record the offset to get the argument.  */
+  int arg_offset = 0;
+
+  if (use_real_mask_p (pred))
+    add_input_operand (arg_offset++);
+  else
+    add_all_one_mask_operand (mask_mode);
+
+  if (use_real_merge_p (pred))
+    add_input_operand (arg_offset++);
+  else
+    add_vundef_operand (mask_mode);
+
+  rtx op1 = expand_normal (CALL_EXPR_ARG (exp, arg_offset++));
+  rtx op2 = expand_normal (CALL_EXPR_ARG (exp, arg_offset++));
+  rtx comparison = gen_rtx_fmt_ee (rcode, mask_mode, op1, op2);
+  if (!VECTOR_MODE_P (GET_MODE (op2)))
+    comparison = gen_rtx_fmt_ee (rcode, mask_mode, op1,
+				 gen_rtx_VEC_DUPLICATE (mode, op2));
+  add_fixed_operand (comparison);
+  add_fixed_operand (op1);
+  if (CONST_INT_P (op2))
+    add_integer_operand (op2);
+  else
+    add_fixed_operand (op2);
+  for (int argno = arg_offset; argno < call_expr_nargs (exp); argno++)
+    add_input_operand (argno);
+
+  add_input_operand (Pmode, get_mask_policy_for_pred (pred));
+  add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
+  return generate_insn (icode);
+}
+
 /* Generate instruction ICODE, given that its operands have already
    been added to M_OPS.  Return the value of the first operand.  */
 rtx
diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h
index f00b2c51020..9807fdf2938 100644
--- a/gcc/config/riscv/riscv-vector-builtins.h
+++ b/gcc/config/riscv/riscv-vector-builtins.h
@@ -338,6 +338,7 @@  public:
   void add_all_one_mask_operand (machine_mode);
   void add_vundef_operand (machine_mode);
   void add_fixed_operand (rtx);
+  void add_integer_operand (rtx);
   void add_mem_operand (machine_mode, unsigned);
 
   machine_mode vector_mode (void) const;
@@ -346,6 +347,7 @@  public:
   rtx use_exact_insn (insn_code);
   rtx use_contiguous_load_insn (insn_code);
   rtx use_contiguous_store_insn (insn_code);
+  rtx use_compare_insn (rtx_code, insn_code);
   rtx generate_insn (insn_code);
 
   /* The function call expression.  */
@@ -467,6 +469,13 @@  function_expander::add_fixed_operand (rtx x)
   create_fixed_operand (&m_ops[opno++], x);
 }
 
+/* Add an integer operand X.  */
+inline void
+function_expander::add_integer_operand (rtx x)
+{
+  create_integer_operand (&m_ops[opno++], INTVAL (x));
+}
+
 /* Return the machine_mode of the corresponding vector type.  */
 inline machine_mode
 function_expander::vector_mode (void) const
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 88a73297ffa..8078dd4727f 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -4263,7 +4263,8 @@  riscv_print_operand (FILE *file, rtx op, int letter)
 	      output_operand_lossage ("invalid vector constant");
 	    else if (satisfies_constraint_Wc0 (op))
 	      asm_fprintf (file, "0");
-	    else if (satisfies_constraint_vi (op))
+	    else if (satisfies_constraint_vi (op)
+		     || satisfies_constraint_vj (op))
 	      asm_fprintf (file, "%wd", INTVAL (elt));
 	    else
 	      output_operand_lossage ("invalid vector constant");
@@ -4376,6 +4377,10 @@  riscv_print_operand (FILE *file, rtx op, int letter)
         fputs ("i", file);
       break;
 
+    case 'B':
+      fputs (GET_RTX_NAME (code), file);
+      break;
+
     case 'S':
       {
 	rtx newop = GEN_INT (ctz_hwi (INTVAL (op)));
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index c8adc5af5d2..487059ebe97 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -267,7 +267,8 @@ 
 ;; vicalu      vector arithmetic with carry or borrow instructions
 ;; vshift      vector single-width bit shift instructions
 ;; vnshift     vector narrowing integer shift instructions
-;; vicmp       vector integer comparison/min/max instructions
+;; viminmax    vector integer min/max instructions
+;; vicmp       vector integer comparison instructions
 ;; vimul       vector single-width integer multiply instructions
 ;; vidiv       vector single-width integer divide instructions
 ;; viwmul      vector widening integer multiply instructions
@@ -291,7 +292,8 @@ 
 ;; vfwmuladd   vector widening floating-point multiply-add instructions
 ;; vfsqrt      vector floating-point square-root instructions
 ;; vfrecp      vector floating-point reciprocal square-root instructions
-;; vfcmp       vector floating-point comparison/min/max instructions
+;; vfminmax    vector floating-point min/max instructions
+;; vfcmp       vector floating-point comparison instructions
 ;; vfsgnj      vector floating-point sign-injection instructions
 ;; vfclass     vector floating-point classify instruction
 ;; vfmerge     vector floating-point merge instruction
@@ -335,11 +337,11 @@ 
    fmadd,fdiv,fcmp,fcvt,fsqrt,multi,auipc,sfb_alu,nop,ghost,bitmanip,rotate,
    atomic,rdvlenb,rdvl,vsetvl,vlde,vste,vldm,vstm,vlds,vsts,
    vldux,vldox,vstux,vstox,vldff,vldr,vstr,
-   vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,
+   vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
    vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
    vsalu,vaalu,vsmul,vsshift,vnclip,
    vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
-   vfcmp,vfsgnj,vfclass,vfmerge,vfmov,
+   vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
    vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
    vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
    vired,viwred,vfred,vfredo,vfwred,vfwredo,
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 9087129c70a..c8b24150f4e 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -225,6 +225,15 @@ 
   (VNx1DF "VNx1BI") (VNx2DF "VNx2BI") (VNx4DF "VNx4BI") (VNx8DF "VNx8BI")
 ])
 
+(define_mode_attr vm [
+  (VNx1QI "vnx1bi") (VNx2QI "vnx2bi") (VNx4QI "vnx4bi") (VNx8QI "vnx8bi") (VNx16QI "vnx16bi") (VNx32QI "vnx32bi") (VNx64QI "vnx64bi")
+  (VNx1HI "vnx1bi") (VNx2HI "vnx2bi") (VNx4HI "vnx4bi") (VNx8HI "vnx8bi") (VNx16HI "vnx16bi") (VNx32HI "vnx32bi")
+  (VNx1SI "vnx1bi") (VNx2SI "vnx2bi") (VNx4SI "vnx4bi") (VNx8SI "vnx8bi") (VNx16SI "vnx16bi")
+  (VNx1DI "vnx1bi") (VNx2DI "vnx2bi") (VNx4DI "vnx4bi") (VNx8DI "vnx8bi")
+  (VNx1SF "vnx1bi") (VNx2SF "vnx2bi") (VNx4SF "vnx4bi") (VNx8SF "vnx8bi") (VNx16SF "vnx16bi")
+  (VNx1DF "vnx1bi") (VNx2DF "vnx2bi") (VNx4DF "vnx4bi") (VNx8DF "vnx8bi")
+])
+
 (define_mode_attr VEL [
   (VNx1QI "QI") (VNx2QI "QI") (VNx4QI "QI") (VNx8QI "QI") (VNx16QI "QI") (VNx32QI "QI") (VNx64QI "QI")
   (VNx1HI "HI") (VNx2HI "HI") (VNx4HI "HI") (VNx8HI "HI") (VNx16HI "HI") (VNx32HI "HI")
@@ -322,6 +331,9 @@ 
 (define_code_attr macc_nmsac [(plus "macc") (minus "nmsac")])
 (define_code_attr madd_nmsub [(plus "madd") (minus "nmsub")])
 
+(define_code_iterator and_ior [and ior])
+(define_code_attr ninsn [(and "nand") (ior "nor") (xor "xnor")])
+
 (define_code_attr binop_rhs1_predicate [
 			(plus "register_operand")
 			(minus "vector_arith_operand")
@@ -419,10 +431,10 @@ 
 			(ashift "vshift")
 			(ashiftrt "vshift")
 			(lshiftrt "vshift")
-			(smin "vicmp")
-			(smax "vicmp")
-			(umin "vicmp")
-			(umax "vicmp")
+			(smin "viminmax")
+			(smax "viminmax")
+			(umin "viminmax")
+			(umax "viminmax")
 			(mult "vimul")
 			(div "vidiv")
 			(mod "vidiv")
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 68472797a9c..239ffc851e3 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -41,11 +41,11 @@ 
 (define_attr "has_vtype_op" "false,true"
   (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
 			  vldux,vldox,vstux,vstox,vldff,\
-			  vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+			  vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,\
 			  vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
 			  vsalu,vaalu,vsmul,vsshift,vnclip,\
 			  vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
-			  vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+			  vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,\
 			  vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
 			  vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
 			  vired,viwred,vfred,vfredo,vfwred,vfwredo,\
@@ -61,11 +61,11 @@ 
 (define_attr "has_vl_op" "false,true"
   (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
 			  vldux,vldox,vstux,vstox,vldff,\
-			  vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+			  vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,\
 			  vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
 			  vsalu,vaalu,vsmul,vsshift,vnclip,\
 			  vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
-			  vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+			  vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,\
 			  vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
 			  vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
 			  vired,viwred,vfred,vfredo,vfwred,vfwredo,\
@@ -146,7 +146,7 @@ 
 			  vialu,vshift,vicmp,vimul,vidiv,vsalu,\
 			  vext,viwalu,viwmul,vicalu,vnshift,\
 			  vimuladd,vimerge,vaalu,vsmul,vsshift,\
-			  vnclip")
+			  vnclip,viminmax")
 	   (const_int INVALID_ATTRIBUTE)
 	 (eq_attr "mode" "VNx1QI,VNx1BI")
 	   (symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
@@ -196,8 +196,8 @@ 
 
 ;; The index of operand[] to get the merge op.
 (define_attr "merge_op_idx" ""
-	(cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,\
-				vialu,vshift,vicmp,vimul,vidiv,vsalu,vext,viwalu,\
+	(cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,vicmp,\
+				vialu,vshift,viminmax,vimul,vidiv,vsalu,vext,viwalu,\
 				viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,vnclip")
 	       (const_int 2)
 
@@ -218,10 +218,13 @@ 
              (const_int 5)
              (const_int 4))
 
-	 (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
+	 (eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
 			  viwalu,viwmul,vnshift,vimuladd,vimerge,vaalu,vsmul,\
 			  vsshift,vnclip")
-	   (const_int 5)]
+	   (const_int 5)
+
+	 (eq_attr "type" "vicmp")
+	   (const_int 6)]
   (const_int INVALID_ATTRIBUTE)))
 
 ;; The tail policy op value.
@@ -236,7 +239,7 @@ 
 	     (symbol_ref "riscv_vector::get_ta(operands[6])")
 	     (symbol_ref "riscv_vector::get_ta(operands[5])"))
 
-	 (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
+	 (eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
 			  viwalu,viwmul,vnshift,vimuladd,vimerge,vaalu,vsmul,\
 			  vsshift,vnclip")
 	   (symbol_ref "riscv_vector::get_ta(operands[6])")]
@@ -254,9 +257,9 @@ 
 	     (symbol_ref "riscv_vector::get_ma(operands[7])")
 	     (symbol_ref "riscv_vector::get_ma(operands[6])"))
 
-	 (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
+	 (eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
 			  viwalu,viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,\
-			  vnclip")
+			  vnclip,vicmp")
 	   (symbol_ref "riscv_vector::get_ma(operands[7])")]
 	(const_int INVALID_ATTRIBUTE)))
 
@@ -274,9 +277,9 @@ 
 	     (const_int INVALID_ATTRIBUTE)
 	     (symbol_ref "INTVAL (operands[7])"))
 
-	 (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu,\
+	 (eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
 			  viwalu,viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,\
-			  vnclip")
+			  vnclip,vicmp")
 	   (symbol_ref "INTVAL (operands[8])")
 	 (eq_attr "type" "vstux,vstox")
 	   (symbol_ref "INTVAL (operands[5])")]
@@ -763,16 +766,16 @@ 
 ;; constraint alternative 3 match vmclr.m.
 ;; constraint alternative 4 match vmset.m.
 (define_insn_and_split "@pred_mov<mode>"
-  [(set (match_operand:VB 0 "nonimmediate_operand"       "=vr,   m,  vr,  vr,  vr")
+  [(set (match_operand:VB 0 "nonimmediate_operand"               "=vr,   m,  vr,  vr,  vr")
 	(if_then_else:VB
 	  (unspec:VB
-	    [(match_operand:VB 1 "vector_mask_operand"   "Wc1, Wc1, Wc1, Wc1, Wc1")
-	     (match_operand 4 "vector_length_operand"    " rK,  rK,  rK,  rK,  rK")
-	     (match_operand 5 "const_int_operand"        "  i,   i,   i,   i,   i")
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1, Wc1, Wc1, Wc1, Wc1")
+	     (match_operand 4 "vector_length_operand"            " rK,  rK,  rK,  rK,  rK")
+	     (match_operand 5 "const_int_operand"                "  i,   i,   i,   i,   i")
 	     (reg:SI VL_REGNUM)
 	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
-	  (match_operand:VB 3 "vector_move_operand"      "  m,  vr,  vr, Wc0, Wc1")
-	  (match_operand:VB 2 "vector_merge_operand"     " vu,  vu,  vu,  vu,  vu")))]
+	  (match_operand:VB 3 "vector_move_operand"              "  m,  vr,  vr, Wc0, Wc1")
+	  (match_operand:VB 2 "vector_undef_operand"             " vu,  vu,  vu,  vu,  vu")))]
   "TARGET_VECTOR"
   "@
    vlm.v\t%0,%3
@@ -790,14 +793,14 @@ 
 ;; Dedicated pattern for vsm.v instruction since we can't reuse pred_mov pattern to include
 ;; memory operand as input which will produce inferior codegen.
 (define_insn "@pred_store<mode>"
-  [(set (match_operand:VB 0 "memory_operand"            "+m")
+  [(set (match_operand:VB 0 "memory_operand"                      "+m")
 	(if_then_else:VB
 	  (unspec:VB
-	    [(match_operand:VB 1 "vector_mask_operand" "Wc1")
-	     (match_operand 3 "vector_length_operand"  " rK")
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
+	     (match_operand 3 "vector_length_operand"            " rK")
 	     (reg:SI VL_REGNUM)
 	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
-	  (match_operand:VB 2 "register_operand"       " vr")
+	  (match_operand:VB 2 "register_operand"                 " vr")
 	  (match_dup 0)))]
   "TARGET_VECTOR"
   "vsm.v\t%2,%0"
@@ -3399,3 +3402,726 @@ 
   "vnclip<v_su>.w%o4\t%0,%3,%4%p1"
   [(set_attr "type" "vnclip")
    (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated comparison operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 11.8 Vector Integer Comparision Instructions
+;; -------------------------------------------------------------------------------
+
+(define_expand "@pred_cmp<mode>"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_ltge_operator"
+	     [(match_operand:VI 4 "register_operand")
+	      (match_operand:VI 5 "vector_arith_operand")])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {})
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_cmp<mode>"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr,   vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK,   rK")
+	     (match_operand 7 "const_int_operand"             "    i,    i")
+	     (match_operand 8 "const_int_operand"             "    i,    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_ltge_operator"
+	     [(match_operand:VI 4 "register_operand"          "   vr,   vr")
+	      (match_operand:VI 5 "vector_arith_operand"      "   vr,   vi")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu,  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.v%o5\t%0,%4,%v5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_cmp<mode>_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"              "=&vr,   &vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK,   rK")
+	     (match_operand 7 "const_int_operand"             "    i,    i")
+	     (match_operand 8 "const_int_operand"             "    i,    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_ltge_operator"
+	     [(match_operand:VI 4 "register_operand"          "   vr,   vr")
+	      (match_operand:VI 5 "vector_arith_operand"      "   vr,   vi")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu,  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.v%o5\t%0,%4,%v5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_ltge<mode>"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "ltge_operator"
+	     [(match_operand:VI 4 "register_operand")
+	      (match_operand:VI 5 "vector_neg_arith_operand")])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {})
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_ltge<mode>"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr,   vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK,   rK")
+	     (match_operand 7 "const_int_operand"             "    i,    i")
+	     (match_operand 8 "const_int_operand"             "    i,    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "ltge_operator"
+	     [(match_operand:VI 4 "register_operand"          "   vr,   vr")
+	      (match_operand:VI 5 "vector_neg_arith_operand"  "   vr,   vj")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu,  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.v%o5\t%0,%4,%v5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_ltge<mode>_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"              "=&vr,   &vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK,   rK")
+	     (match_operand 7 "const_int_operand"             "    i,    i")
+	     (match_operand 8 "const_int_operand"             "    i,    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "ltge_operator"
+	     [(match_operand:VI 4 "register_operand"          "   vr,   vr")
+	      (match_operand:VI 5 "vector_neg_arith_operand"  "   vr,   vj")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu,  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.v%o5\t%0,%4,%v5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_cmp<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_QHS 4 "register_operand")
+	      (vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"))])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {})
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_cmp<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_QHS 4 "register_operand"      "   vr")
+	      (vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_cmp<mode>_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"               "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_QHS 4 "register_operand"      "   vr")
+	      (vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_eqne<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"))
+	      (match_operand:VI_QHS 4 "register_operand")])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {})
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_eqne<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))
+	      (match_operand:VI_QHS 4 "register_operand"      "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_eqne<mode>_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"               "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_QHS
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))
+	      (match_operand:VI_QHS 4 "register_operand"      "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; Handle GET_MODE_INNER (mode) = DImode. We need to split them since
+;; we need to deal with SEW = 64 in RV32 system.
+(define_expand "@pred_cmp<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_D 4 "register_operand")
+	      (vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "reg_or_int_operand"))])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {
+    enum rtx_code code = GET_CODE (operands[3]);
+
+    if (riscv_vector::has_vi_variant_p (code, operands[5]))
+      operands[5] = force_reg (<VEL>mode, operands[5]);
+    else if (!TARGET_64BIT)
+      {
+	rtx v = gen_reg_rtx (<MODE>mode);
+
+	if (riscv_vector::simm32_p (operands[5]))
+	  {
+	    if (!rtx_equal_p (operands[5], const0_rtx))
+	      operands[5] = force_reg (Pmode, operands[5]);
+	    operands[5] = gen_rtx_SIGN_EXTEND (<VEL>mode, operands[5]);
+	  }
+	else
+	  {
+	    if (CONST_INT_P (operands[5]))
+	      operands[5] = force_reg (<VEL>mode, operands[5]);
+
+	    riscv_vector::emit_nonvlmax_op (code_for_pred_broadcast (<MODE>mode),
+			v, operands[5], operands[6], <VM>mode);
+	    if (code == LT || code == LTU)
+		emit_insn (gen_pred_ltge<mode> (operands[0], operands[1],
+			   operands[2], operands[3], operands[4], v,
+			   operands[6], operands[7], operands[8]));
+	    else
+		emit_insn (gen_pred_cmp<mode> (operands[0], operands[1],
+			   operands[2], operands[3], operands[4], v,
+			   operands[6], operands[7], operands[8]));	      
+	    DONE;
+	  }
+      }
+    else
+      operands[5] = force_reg (<VEL>mode, operands[5]);
+  })
+
+(define_expand "@pred_eqne<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "reg_or_int_operand"))
+	      (match_operand:VI_D 4 "register_operand")])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+  {
+    enum rtx_code code = GET_CODE (operands[3]);
+
+    if (riscv_vector::has_vi_variant_p (code, operands[5]))
+      operands[5] = force_reg (<VEL>mode, operands[5]);
+    else if (!TARGET_64BIT)
+      {
+	rtx v = gen_reg_rtx (<MODE>mode);
+
+	if (riscv_vector::simm32_p (operands[5]))
+	  {
+	    if (!rtx_equal_p (operands[5], const0_rtx))
+	      operands[5] = force_reg (Pmode, operands[5]);
+	    operands[5] = gen_rtx_SIGN_EXTEND (<VEL>mode, operands[5]);
+	  }
+	else
+	  {
+	    if (CONST_INT_P (operands[5]))
+	      operands[5] = force_reg (<VEL>mode, operands[5]);
+
+	    riscv_vector::emit_nonvlmax_op (code_for_pred_broadcast (<MODE>mode),
+			v, operands[5], operands[6], <VM>mode);
+	    emit_insn (gen_pred_cmp<mode> (operands[0], operands[1],
+		       operands[2], operands[3], operands[4], v,
+		       operands[6], operands[7], operands[8]));	      
+	    DONE;
+	  }
+      }
+    else
+      operands[5] = force_reg (<VEL>mode, operands[5]);
+  })
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_cmp<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_D 4 "register_operand"        "    vr")
+	      (vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_cmp<mode>_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"               "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_D 4 "register_operand"        "   vr")
+	      (vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_eqne<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))
+	      (match_operand:VI_D 4 "register_operand"        "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We use early-clobber for source LMUL > dest LMUL.
+(define_insn "*pred_eqne<mode>_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"               "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1")
+	     (match_operand 6 "vector_length_operand"         "   rK")
+	     (match_operand 7 "const_int_operand"             "    i")
+	     (match_operand 8 "const_int_operand"             "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_D
+	        (match_operand:<VEL> 5 "register_operand"     "    r"))
+	      (match_operand:VI_D 4 "register_operand"        "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"        "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_cmp<mode>_extended_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                 "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"       "vmWc1")
+	     (match_operand 6 "vector_length_operand"          "   rK")
+	     (match_operand 7 "const_int_operand"              "    i")
+	     (match_operand 8 "const_int_operand"              "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_D 4 "register_operand"         "   vr")
+	      (vec_duplicate:VI_D
+	        (sign_extend:<VEL>
+	          (match_operand:<VSUBEL> 5 "register_operand" "    r")))])
+	  (match_operand:<VM> 2 "vector_merge_operand"         "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*pred_cmp<mode>_extended_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"                "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"       "vmWc1")
+	     (match_operand 6 "vector_length_operand"          "   rK")
+	     (match_operand 7 "const_int_operand"              "    i")
+	     (match_operand 8 "const_int_operand"              "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "comparison_except_eqge_operator"
+	     [(match_operand:VI_D 4 "register_operand"         "   vr")
+	      (vec_duplicate:VI_D
+	        (sign_extend:<VEL>
+	          (match_operand:<VSUBEL> 5 "register_operand" "    r")))])
+	  (match_operand:<VM> 2 "vector_merge_operand"         "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; We don't use early-clobber for LMUL <= 1 to get better codegen.
+(define_insn "*pred_eqne<mode>_extended_scalar"
+  [(set (match_operand:<VM> 0 "register_operand"                 "=vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"       "vmWc1")
+	     (match_operand 6 "vector_length_operand"          "   rK")
+	     (match_operand 7 "const_int_operand"              "    i")
+	     (match_operand 8 "const_int_operand"              "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_D
+	        (sign_extend:<VEL>
+	          (match_operand:<VSUBEL> 5 "register_operand" "    r")))
+	      (match_operand:VI_D 4 "register_operand"         "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"         "  0vu")))]
+  "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*pred_eqne<mode>_extended_scalar_narrow"
+  [(set (match_operand:<VM> 0 "register_operand"                "=&vr")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"       "vmWc1")
+	     (match_operand 6 "vector_length_operand"          "   rK")
+	     (match_operand 7 "const_int_operand"              "    i")
+	     (match_operand 8 "const_int_operand"              "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "equality_operator"
+	     [(vec_duplicate:VI_D
+	        (sign_extend:<VEL>
+	          (match_operand:<VSUBEL> 5 "register_operand" "    r")))
+	      (match_operand:VI_D 4 "register_operand"         "   vr")])
+	  (match_operand:<VM> 2 "vector_merge_operand"         "  0vu")))]
+  "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), BYTES_PER_RISCV_VECTOR)"
+  "vms%B3.vx\t%0,%4,%5%p1"
+  [(set_attr "type" "vicmp")
+   (set_attr "mode" "<MODE>")])
+
+;; GE, vmsge.vx/vmsgeu.vx
+;;
+;; unmasked va >= x
+;;  - pseudoinstruction: vmsge{u}.vx vd, va, x
+;;  - expansion: vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
+;;
+;; masked va >= x, vd != v0
+;;  - pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t
+;;  - expansion: vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
+;;
+;; masked va >= x, vd == v0
+;;  - pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
+;;  - expansion: vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
+(define_expand "@pred_ge<mode>_scalar"
+  [(set (match_operand:<VM> 0 "register_operand")
+	(if_then_else:<VM>
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand")
+	     (match_operand 6 "vector_length_operand")
+	     (match_operand 7 "const_int_operand")
+	     (match_operand 8 "const_int_operand")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (match_operator:<VM> 3 "ge_operator"
+	     [(match_operand:VI 4 "register_operand")
+	      (vec_duplicate:VI
+	        (match_operand:<VEL> 5 "reg_or_int_operand"))])
+	  (match_operand:<VM> 2 "vector_merge_operand")))]
+  "TARGET_VECTOR"
+{
+  enum rtx_code code = GET_CODE (operands[3]);
+  rtx undef = gen_rtx_UNSPEC (<VM>mode, gen_rtvec (1, const0_rtx), UNSPEC_VUNDEF);
+  if (code == GEU && rtx_equal_p (operands[5], const0_rtx))
+    {
+      /* If vmsgeu with 0 immediate, expand it to vmset.  */
+      if (satisfies_constraint_Wc1 (operands[1]))
+	emit_insn (
+	  gen_pred_mov (<VM>mode, operands[0], CONSTM1_RTX (<VM>mode), undef,
+			CONSTM1_RTX (<VM>mode), operands[6], operands[8]));
+      else
+	{
+	  /* If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
+	   */
+	  if (rtx_equal_p (operands[1], operands[2]))
+	    emit_move_insn (operands[0], operands[1]);
+	  else if (register_operand (operands[2], <VM>mode))
+	    emit_insn (gen_pred (IOR, <VM>mode, operands[0],
+				 CONSTM1_RTX (<VM>mode), undef, operands[1],
+				 operands[2], operands[6], operands[8]));
+	  else
+	    emit_insn (gen_pred (IOR, <VM>mode, operands[0],
+				 CONSTM1_RTX (<VM>mode), undef, operands[1],
+				 operands[1], operands[6], operands[8]));
+	}
+    }
+  else if (riscv_vector::neg_simm5_p (operands[5]))
+    emit_insn (
+      gen_pred_ltge<mode> (operands[0], operands[1], operands[2], operands[3],
+			   operands[4],
+			   gen_const_vec_duplicate (<MODE>mode, operands[5]),
+			   operands[6], operands[7], operands[8]));
+  else
+    {
+      if (code == GE)
+	operands[3] = gen_rtx_fmt_ee (LT, <VM>mode, XEXP (operands[3], 0),
+				      XEXP (operands[3], 1));
+      else
+	operands[3] = gen_rtx_fmt_ee (LTU, <VM>mode, XEXP (operands[3], 0),
+				      XEXP (operands[3], 1));
+      if (GET_MODE_BITSIZE (<VEL>mode) <= GET_MODE_BITSIZE (Pmode))
+	operands[5] = force_reg (<VEL>mode, operands[5]);
+
+      if (satisfies_constraint_Wc1 (operands[1]))
+	{
+	  /* unmasked va >= x
+	    - pseudoinstruction: vmsge{u}.vx vd, va, x
+	    - expansion: vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd.  */
+	  emit_insn (
+	    gen_pred_cmp<mode>_scalar (operands[0], operands[1], operands[2],
+					operands[3], operands[4], operands[5],
+					operands[6], operands[7], operands[8]));
+	  emit_insn (gen_pred_nand<vm> (operands[0], CONSTM1_RTX (<VM>mode),
+					undef, operands[0], operands[0],
+					operands[6], operands[8]));
+	}
+      else
+	{
+	  if (rtx_equal_p (operands[1], operands[2]))
+	    {
+	      /* masked va >= x, vd == v0
+		- pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
+		- expansion: vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt.  */
+	      rtx reg = gen_reg_rtx (<VM>mode);
+	      emit_insn (gen_pred_cmp<mode>_scalar (
+		reg, CONSTM1_RTX (<VM>mode), undef, operands[3], operands[4],
+		operands[5], operands[6], operands[7], operands[8]));
+	      emit_insn (
+		gen_pred_andn<vm> (operands[0], CONSTM1_RTX (<VM>mode), undef,
+				   operands[1], reg, operands[6], operands[8]));
+	    }
+	  else
+	    {
+	      /* masked va >= x, vd != v0
+		- pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t
+		- expansion: vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0.
+	      */
+	      emit_insn (gen_pred_cmp<mode>_scalar (
+		operands[0], operands[1], operands[2], operands[3], operands[4],
+		operands[5], operands[6], operands[7], operands[8]));
+	      emit_insn (gen_pred (XOR, <VM>mode, operands[0],
+				   CONSTM1_RTX (<VM>mode), undef, operands[0],
+				   operands[1], operands[6], operands[8]));
+	    }
+	}
+    }
+  DONE;
+})
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated BOOL mask operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 15.1 Vector Mask-Register Logical Instructions
+;; -------------------------------------------------------------------------------
+
+;; We keep this pattern same as pred_mov so that we can gain more optimizations.
+;; For example, if we have vmxor.mm v1,v1,v1. It will be optmized as vmclr.m which
+;; is generated by pred_mov.
+(define_insn "@pred_<optab><mode>"
+  [(set (match_operand:VB 0 "register_operand"                   "=vr")
+	(if_then_else:VB
+	  (unspec:VB
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
+	     (match_operand 5 "vector_length_operand"            " rK")
+	     (match_operand 6 "const_int_operand"                "  i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (any_bitwise:VB
+	    (match_operand:VB 3 "register_operand"               " vr")
+	    (match_operand:VB 4 "register_operand"               " vr"))
+	  (match_operand:VB 2 "vector_undef_operand"             " vu")))]
+  "TARGET_VECTOR"
+  "vm<insn>.mm\t%0,%3,%4"
+  [(set_attr "type" "vmalu")
+   (set_attr "mode" "<MODE>")
+   (set_attr "vl_op_idx" "5")
+   (set (attr "avl_type") (symbol_ref "INTVAL (operands[6])"))])
+
+(define_insn "@pred_n<optab><mode>"
+  [(set (match_operand:VB 0 "register_operand"                   "=vr")
+	(if_then_else:VB
+	  (unspec:VB
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
+	     (match_operand 5 "vector_length_operand"            " rK")
+	     (match_operand 6 "const_int_operand"                "  i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (not:VB
+	    (any_bitwise:VB
+	      (match_operand:VB 3 "register_operand"             " vr")
+	      (match_operand:VB 4 "register_operand"             " vr")))
+	  (match_operand:VB 2 "vector_undef_operand"             " vu")))]
+  "TARGET_VECTOR"
+  "vm<ninsn>.mm\t%0,%3,%4"
+  [(set_attr "type" "vmalu")
+   (set_attr "mode" "<MODE>")
+   (set_attr "vl_op_idx" "5")
+   (set (attr "avl_type") (symbol_ref "INTVAL (operands[6])"))])
+
+(define_insn "@pred_<optab>n<mode>"
+  [(set (match_operand:VB 0 "register_operand"                   "=vr")
+	(if_then_else:VB
+	  (unspec:VB
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
+	     (match_operand 5 "vector_length_operand"            " rK")
+	     (match_operand 6 "const_int_operand"                "  i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (and_ior:VB
+	    (match_operand:VB 3 "register_operand"               " vr")
+	    (not:VB
+	      (match_operand:VB 4 "register_operand"             " vr")))
+	  (match_operand:VB 2 "vector_undef_operand"             " vu")))]
+  "TARGET_VECTOR"
+  "vm<insn>n.mm\t%0,%3,%4"
+  [(set_attr "type" "vmalu")
+   (set_attr "mode" "<MODE>")
+   (set_attr "vl_op_idx" "5")
+   (set (attr "avl_type") (symbol_ref "INTVAL (operands[6])"))])
+
+(define_insn "@pred_not<mode>"
+  [(set (match_operand:VB 0 "register_operand"                   "=vr")
+	(if_then_else:VB
+	  (unspec:VB
+	    [(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
+	     (match_operand 4 "vector_length_operand"            " rK")
+	     (match_operand 5 "const_int_operand"                "  i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (not:VB
+	    (match_operand:VB 3 "register_operand"               " vr"))
+	  (match_operand:VB 2 "vector_undef_operand"             " vu")))]
+  "TARGET_VECTOR"
+  "vmnot.mm\t%0,%3"
+  [(set_attr "type" "vmalu")
+   (set_attr "mode" "<MODE>")
+   (set_attr "vl_op_idx" "4")
+   (set (attr "avl_type") (symbol_ref "INTVAL (operands[5])"))])