@@ -549,7 +549,7 @@ riscv*)
extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
extra_objs="${extra_objs} thead.o riscv-target-attr.o"
d_target_objs="riscv-d.o"
- extra_headers="riscv_vector.h"
+ extra_headers="riscv_vector.h riscv_th_vector.h"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
;;
@@ -2579,7 +2579,7 @@
[(match_operand 0 "register_operand")
(match_operand 1 "memory_operand")
(match_operand:ANYI 2 "const_int_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
{
riscv_vector::expand_rawmemchr(<MODE>mode, operands[0], operands[1],
operands[2]);
@@ -428,7 +428,9 @@
;; Predicates for the V extension.
(define_special_predicate "vector_length_operand"
(ior (match_operand 0 "pmode_register_operand")
- (match_operand 0 "const_csr_operand")))
+ (and (ior (match_test "TARGET_XTHEADVECTOR && rtx_equal_p (op, const0_rtx)")
+ (match_test "!TARGET_XTHEADVECTOR"))
+ (match_operand 0 "const_csr_operand"))))
(define_special_predicate "autovec_length_operand"
(ior (match_operand 0 "pmode_register_operand")
@@ -195,7 +195,8 @@ riscv_pragma_intrinsic (cpp_reader *)
const char *name = TREE_STRING_POINTER (x);
- if (strcmp (name, "vector") == 0)
+ if (strcmp (name, "vector") == 0
+ || strcmp (name, "xtheadvector") == 0)
{
if (!TARGET_VECTOR)
{
@@ -773,7 +773,8 @@ riscv_expand_block_move_scalar (rtx dest, rtx src, rtx length)
bool
riscv_expand_block_move (rtx dest, rtx src, rtx length)
{
- if (TARGET_VECTOR && stringop_strategy & STRATEGY_VECTOR)
+ if ((TARGET_VECTOR && !TARGET_XTHEADVECTOR)
+ && stringop_strategy & STRATEGY_VECTOR)
{
bool ok = riscv_vector::expand_block_move (dest, src, length);
if (ok)
@@ -1772,7 +1772,7 @@ get_prefer_tail_policy ()
compiler pick up either agnostic or undisturbed. Maybe we
will have a compile option like -mprefer=agnostic to set
this value???. */
- return TAIL_ANY;
+ return TARGET_XTHEADVECTOR ? TAIL_AGNOSTIC : TAIL_ANY;
}
/* Get prefer mask policy. */
@@ -1783,7 +1783,7 @@ get_prefer_mask_policy ()
compiler pick up either agnostic or undisturbed. Maybe we
will have a compile option like -mprefer=agnostic to set
this value???. */
- return MASK_ANY;
+ return TARGET_XTHEADVECTOR ? MASK_UNDISTURBED : MASK_ANY;
}
/* Get avl_type rtx. */
@@ -4383,7 +4383,7 @@ cmp_lmul_gt_one (machine_mode mode)
bool
vls_mode_valid_p (machine_mode vls_mode)
{
- if (!TARGET_VECTOR)
+ if (!TARGET_VECTOR || TARGET_XTHEADVECTOR)
return false;
if (riscv_autovec_preference == RVV_SCALABLE)
@@ -115,23 +115,37 @@ public:
tree type = builtin_types[e.type.index].vector;
machine_mode mode = TYPE_MODE (type);
- /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
-
- - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
- - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
- - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
- - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
- - e8,m2/e16,m4/e32,m8 --> e8m2
- - e8,m4/e16,m8 --> e8m4
- - e8,m8 --> e8m8
- */
- /* SEW. */
- e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
-
- /* LMUL. */
- machine_mode e8_mode
- = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
- e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
+
+ if (TARGET_XTHEADVECTOR)
+ {
+ machine_mode inner_mode = GET_MODE_INNER (mode);
+ /* SEW. */
+ e.add_input_operand (Pmode,
+ gen_int_mode (GET_MODE_BITSIZE (inner_mode), Pmode));
+ /* LMUL. */
+ e.add_input_operand (Pmode,
+ gen_int_mode (get_vlmul (mode), Pmode));
+ }
+ else
+ {
+ /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
+
+ - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
+ - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
+ - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
+ - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
+ - e8,m2/e16,m4/e32,m8 --> e8m2
+ - e8,m4/e16,m8 --> e8m4
+ - e8,m8 --> e8m8
+ */
+ /* SEW. */
+ e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
+
+ /* LMUL. */
+ machine_mode e8_mode
+ = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
+ e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
+ }
/* TAIL_ANY. */
e.add_input_operand (Pmode,
@@ -33,6 +33,25 @@
namespace riscv_vector {
+/* Check whether the RETURN_TYPE and ARGUMENT_TYPES are
+ valid for the function. */
+
+static bool
+check_type (tree return_type, vec<tree> &argument_types)
+{
+ tree arg;
+ unsigned i;
+
+ if (!return_type)
+ return false;
+
+ FOR_EACH_VEC_ELT (argument_types, i, arg)
+ if (!arg)
+ return false;
+
+ return true;
+}
+
/* Add one function instance for GROUP, using operand suffix at index OI,
mode suffix at index PAIR && bi and predication suffix at index pred_idx. */
static void
@@ -49,6 +68,10 @@ build_one (function_builder &b, const function_group_info &group,
group.ops_infos.types[vec_type_idx].index);
b.allocate_argument_types (function_instance, argument_types);
b.apply_predication (function_instance, return_type, argument_types);
+
+ if (TARGET_XTHEADVECTOR && !check_type (return_type, argument_types))
+ return;
+
b.add_overloaded_function (function_instance, *group.shape);
b.add_unique_function (function_instance, (*group.shape), return_type,
argument_types);
@@ -68,9 +68,9 @@ Encode the ratio of SEW/LMUL into the mask types.
#endif
/* Disable modes if TARGET_MIN_VLEN == 32. */
-ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
-ENTRY (RVVMF32BI, true, LMUL_F4, 32)
-ENTRY (RVVMF16BI, true, LMUL_F2, 16)
+ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
+ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
+ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
ENTRY (RVVMF8BI, true, LMUL_1, 8)
ENTRY (RVVMF4BI, true, LMUL_2, 4)
ENTRY (RVVMF2BI, true, LMUL_4, 2)
@@ -81,39 +81,39 @@ ENTRY (RVVM8QI, true, LMUL_8, 1)
ENTRY (RVVM4QI, true, LMUL_4, 2)
ENTRY (RVVM2QI, true, LMUL_2, 4)
ENTRY (RVVM1QI, true, LMUL_1, 8)
-ENTRY (RVVMF2QI, true, LMUL_F2, 16)
-ENTRY (RVVMF4QI, true, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
+ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
+ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
+ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8HI, true, LMUL_8, 2)
ENTRY (RVVM4HI, true, LMUL_4, 4)
ENTRY (RVVM2HI, true, LMUL_2, 8)
ENTRY (RVVM1HI, true, LMUL_1, 16)
-ENTRY (RVVMF2HI, true, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_16. */
ENTRY (RVVM8HF, TARGET_VECTOR_ELEN_FP_16, LMUL_8, 2)
ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
-ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8SI, true, LMUL_8, 4)
ENTRY (RVVM4SI, true, LMUL_4, 8)
ENTRY (RVVM2SI, true, LMUL_2, 16)
ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32. */
ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if !TARGET_VECTOR_ELEN_64. */
ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
@@ -140,127 +140,127 @@ ENTRY (RVVM1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_1, 64)
#endif
TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x8QI, true, RVVMF2QI, 8, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x8QI, true, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x7QI, true, RVVMF2QI, 7, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x7QI, true, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x6QI, true, RVVMF2QI, 6, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x6QI, true, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x5QI, true, RVVMF2QI, 5, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x5QI, true, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x4QI, true, RVVMF2QI, 4, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x4QI, true, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x3QI, true, RVVMF2QI, 3, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x3QI, true, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x2QI, true, RVVMF2QI, 2, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x2QI, true, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HI, true, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HI, true, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HI, true, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HI, true, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HI, true, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HI, true, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HI, true, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
@@ -1406,6 +1406,9 @@ riscv_v_adjust_bytesize (machine_mode mode, int scale)
{
if (riscv_v_ext_vector_mode_p (mode))
{
+ if (TARGET_XTHEADVECTOR)
+ return BYTES_PER_RISCV_VECTOR;
+
poly_int64 nunits = GET_MODE_NUNITS (mode);
poly_int64 mode_size = GET_MODE_SIZE (mode);
@@ -9962,7 +9965,7 @@ riscv_use_divmod_expander (void)
static machine_mode
riscv_preferred_simd_mode (scalar_mode mode)
{
- if (TARGET_VECTOR)
+ if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
return riscv_vector::preferred_simd_mode (mode);
return word_mode;
@@ -10313,7 +10316,7 @@ riscv_mode_priority (int, int n)
unsigned int
riscv_autovectorize_vector_modes (vector_modes *modes, bool all)
{
- if (TARGET_VECTOR)
+ if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
return riscv_vector::autovectorize_vector_modes (modes, all);
return default_autovectorize_vector_modes (modes, all);
@@ -10496,6 +10499,16 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
return false;
}
+/* Implements target hook vector_mode_supported_any_target_p. */
+
+static bool
+riscv_vector_mode_supported_any_target_p (machine_mode mode)
+{
+ if (TARGET_XTHEADVECTOR)
+ return false;
+ return true;
+}
+
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
@@ -10839,6 +10852,9 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
#undef TARGET_PREFERRED_ELSE_VALUE
#define TARGET_PREFERRED_ELSE_VALUE riscv_preferred_else_value
+#undef TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P
+#define TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P riscv_vector_mode_supported_any_target_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-riscv.h"
new file mode 100644
@@ -0,0 +1,49 @@
+/* RISC-V 'XTheadVector' Extension intrinsics include file.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <" target="_blank">http://www.gnu.org/licenses/>. */
+
+#ifndef __RISCV_TH_VECTOR_H
+#define __RISCV_TH_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_xtheadvector
+#error "XTheadVector intrinsics require the xtheadvector extension."
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* NOTE: This implementation of riscv_th_vector.h is intentionally short. It does
+ not define the RVV types and intrinsic functions directly in C and C++
+ code, but instead uses the following pragma to tell GCC to insert the
+ necessary type and function definitions itself. The net effect is the
+ same, and the file is a complete implementation of riscv_th_vector.h. */
+#pragma riscv intrinsic "xtheadvector"
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __riscv_xtheadvector
+#endif // __RISCV_TH_ECTOR_H
new file mode 100644
@@ -0,0 +1,69 @@
+(define_c_enum "unspec" [
+ UNSPEC_TH_VWLDST
+])
+
+(define_mode_iterator V_VLS_VT [V VLS VT])
+(define_mode_iterator V_VB_VLS_VT [V VB VLS VT])
+
+(define_split
+ [(set (match_operand:V_VB_VLS_VT 0 "reg_or_mem_operand")
+ (match_operand:V_VB_VLS_VT 1 "reg_or_mem_operand"))]
+ "TARGET_XTHEADVECTOR"
+ [(const_int 0)]
+ {
+ emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+ RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+ DONE;
+ })
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+ [(set (match_operand:V_VLS_VT 0 "reg_or_mem_operand" "=vr,vr, m")
+ (unspec:V_VLS_VT
+ [(match_operand:V_VLS_VT 1 "reg_or_mem_operand" " vr, m,vr")
+ (match_operand 2 "vector_length_operand" " rK, rK, rK")
+ (match_operand 3 "const_1_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)]
+ UNSPEC_TH_VWLDST))]
+ "TARGET_XTHEADVECTOR"
+ "@
+ vmv.v.v\t%0,%1
+ vle.v\t%0,%1
+ vse.v\t%1,%0"
+ "&& REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+ ""
+ [(set_attr "type" "vimov,vlds,vlds")
+ (set_attr "mode" "<MODE>")
+ (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+ (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+ (set (attr "avl_type_idx") (const_int 3))
+ (set_attr "vl_op_idx" "2")])
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+ [(set (match_operand:VB 0 "reg_or_mem_operand" "=vr,vr, m")
+ (unspec:VB
+ [(match_operand:VB 1 "reg_or_mem_operand" " vr, m,vr")
+ (match_operand 2 "vector_length_operand" " rK, rK, rK")
+ (match_operand 3 "const_1_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)]
+ UNSPEC_TH_VWLDST))]
+ "TARGET_XTHEADVECTOR"
+ "@
+ vmv.v.v\t%0,%1
+ vle.v\t%0,%1
+ vse.v\t%1,%0"
+ "&& REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+ ""
+ [(set_attr "type" "vimov,vlds,vlds")
+ (set_attr "mode" "<MODE>")
+ (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+ (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+ (set (attr "avl_type_idx") (const_int 3))
+ (set_attr "vl_op_idx" "2")
+ (set (attr "sew") (const_int 8))
+ (set (attr "vlmul") (symbol_ref "riscv_vector::LMUL_1"))])
@@ -109,11 +109,11 @@
])
(define_mode_iterator VI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -128,11 +128,11 @@
;; allow the instruction and mode to be matched during combine et al.
(define_mode_iterator VF [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -140,11 +140,11 @@
(define_mode_iterator VF_ZVFHMIN [
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -271,16 +271,16 @@
])
(define_mode_iterator VEEWEXT2 [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -290,10 +290,10 @@
])
(define_mode_iterator VEEWEXT4 [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -311,59 +311,59 @@
])
(define_mode_iterator VEEWTRUNC2 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
(RVVM4SI "TARGET_64BIT")
(RVVM2SI "TARGET_64BIT")
(RVVM1SI "TARGET_64BIT")
- (RVVMF2SI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC4 [
- RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM2HI "TARGET_64BIT")
(RVVM1HI "TARGET_64BIT")
- (RVVMF2HI "TARGET_64BIT")
- (RVVMF4HI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC8 [
(RVVM1QI "TARGET_64BIT")
- (RVVMF2QI "TARGET_64BIT")
- (RVVMF4QI "TARGET_64BIT")
- (RVVMF8QI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2QI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF4QI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEI16 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -452,11 +452,11 @@
])
(define_mode_iterator VFULLI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_FULL_V") (RVVM4DI "TARGET_FULL_V") (RVVM2DI "TARGET_FULL_V") (RVVM1DI "TARGET_FULL_V")
@@ -509,17 +509,17 @@
])
(define_mode_iterator VI_QH [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
])
(define_mode_iterator VI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -560,11 +560,11 @@
])
(define_mode_iterator VI_QHS_NO_M8 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -603,11 +603,11 @@
(define_mode_iterator VF_HS [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -638,12 +638,12 @@
(RVVM4HF "TARGET_ZVFH")
(RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH")
- (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -674,11 +674,11 @@
])
(define_mode_iterator V_VLSI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -756,27 +756,27 @@
;; E.g. when index mode = RVVM8QImde and Pmode = SImode, if it is not zero_extend or
;; scalar != 1, such gather/scatter is not allowed since we don't have RVVM32SImode.
(define_mode_iterator RATIO64 [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM1DI "TARGET_VECTOR_ELEN_64")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator RATIO32 [
- RVVMF4QI
- RVVMF2HI
+ (RVVMF4QI "!TARGET_XTHEADVECTOR")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR")
RVVM1SI
(RVVM2DI "TARGET_VECTOR_ELEN_64")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator RATIO16 [
- RVVMF2QI
+ (RVVMF2QI "!TARGET_XTHEADVECTOR")
RVVM1HI
RVVM2SI
(RVVM4DI "TARGET_VECTOR_ELEN_64")
@@ -814,21 +814,21 @@
])
(define_mode_iterator RATIO64I [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator RATIO32I [
- RVVMF4QI
- RVVMF2HI
+ (RVVMF4QI "!TARGET_XTHEADVECTOR")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR")
RVVM1SI
(RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator RATIO16I [
- RVVMF2QI
+ (RVVMF2QI "!TARGET_XTHEADVECTOR")
RVVM1HI
RVVM2SI
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
@@ -873,21 +873,21 @@
])
(define_mode_iterator V_FRACT [
- RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
])
(define_mode_iterator VWEXTI [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -933,7 +933,7 @@
(RVVM4SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -966,7 +966,7 @@
(RVVM4SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -996,7 +996,7 @@
(define_mode_iterator VWCONVERTI [
(RVVM8SI "TARGET_ZVFH") (RVVM4SI "TARGET_ZVFH") (RVVM2SI "TARGET_ZVFH") (RVVM1SI "TARGET_ZVFH")
- (RVVMF2SI "TARGET_ZVFH")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
(RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
@@ -1045,7 +1045,7 @@
])
(define_mode_iterator VQEXTI [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -1456,11 +1456,11 @@
;; VINDEXED [VI8 VI16 VI32 (VI64 "TARGET_64BIT")].
(define_mode_iterator VINDEXED [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
@@ -1468,12 +1468,12 @@
(RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
@@ -3173,11 +3173,11 @@
(define_mode_iterator V_VLS_F_CONVERT_SI [
(RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH")
- (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH") (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
@@ -3290,12 +3290,12 @@
])
(define_mode_iterator V_VLS_F_CONVERT_DI [
- (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -83,7 +83,7 @@
;; check. However, we need default value of SEW for vsetvl instruction since there
;; is no field for ratio in the vsetvl instruction encoding.
(define_attr "sew" ""
- (cond [(eq_attr "mode" "RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
+ (cond [(eq_attr "mode" "RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,\
RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,\
RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,\
@@ -95,6 +95,18 @@
V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,\
V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI")
(const_int 8)
+ (eq_attr "mode" "RVVMF16BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 16)
+ (const_int 8))
+ (eq_attr "mode" "RVVMF32BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 32)
+ (const_int 8))
+ (eq_attr "mode" "RVVMF64BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 64)
+ (const_int 8))
(eq_attr "mode" "RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,\
RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,\
RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,\
@@ -155,9 +167,9 @@
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1QI,RVVMF8BI") (symbol_ref "riscv_vector::LMUL_1")
- (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "riscv_vector::LMUL_F2")
- (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "riscv_vector::LMUL_F4")
- (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "riscv_vector::LMUL_F8")
+ (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F2")
+ (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F4")
+ (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM8HI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4HI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2HI") (symbol_ref "riscv_vector::LMUL_2")
@@ -428,6 +440,10 @@
vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vlsegdux,vlsegdox,vssegtux,vssegtox")
(const_int INVALID_ATTRIBUTE)
+ (and (eq_attr "type" "vlde,vste,vlsegde,vssegte,vlsegds,vssegts,\
+ vlsegdff,vssegtux,vlsegdox,vlsegdux")
+ (match_test "TARGET_XTHEADVECTOR"))
+ (const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "RVVM8QI,RVVM1BI") (const_int 1)
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (const_int 2)
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (const_int 4)
@@ -888,6 +904,8 @@
(symbol_ref "riscv_vector::FRM_DYN")]
(symbol_ref "riscv_vector::FRM_NONE")))
+(include "thead-vector.md")
+
;; -----------------------------------------------------------------
;; ---- Miscellaneous Operations
;; -----------------------------------------------------------------
@@ -1061,6 +1079,12 @@
- We can not leave it to TARGET_SECONDARY_RELOAD since it happens
before spilling. The clobber scratch is used by spilling fractional
registers in IRA/LRA so it's too early. */
+ if (TARGET_XTHEADVECTOR)
+ {
+ emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+ RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+ DONE;
+ }
if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
@@ -1097,7 +1121,7 @@
(define_insn "*mov<mode>_whole"
[(set (match_operand:V_WHOLE 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:V_WHOLE 1 "reg_or_mem_operand" " m,vr,vr"))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"@
vl%m1re<sew>.v\t%0,%1
vs%m1r.v\t%1,%0
@@ -1118,6 +1142,13 @@
(match_operand:VB 1 "general_operand"))]
"TARGET_VECTOR"
{
+ if (TARGET_XTHEADVECTOR)
+ {
+ emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+ RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+ DONE;
+ }
+
if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
@@ -1125,7 +1156,7 @@
(define_insn "*mov<mode>"
[(set (match_operand:VB 0 "register_operand" "=vr")
(match_operand:VB 1 "register_operand" " vr"))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -1511,7 +1542,7 @@
(match_dup 4)
(match_dup 5)] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vset%i1vli\t%0,%1,e%2,%m3,t%p4,m%p5"
+ { return TARGET_XTHEADVECTOR ? "vsetvli\t%0,%1,e%2,%m3" : "vset%i1vli\t%0,%1,e%2,%m3,t%p4,m%p5"; }
[(set_attr "type" "vsetvl")
(set_attr "mode" "<MODE>")
(set (attr "sew") (symbol_ref "INTVAL (operands[2])"))
@@ -1529,7 +1560,7 @@
(match_operand 2 "const_int_operand" "i")
(match_operand 3 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vsetvli\tzero,zero,e%0,%m1,t%p2,m%p3"
+ { return TARGET_XTHEADVECTOR ? "vsetvli\tzero,zero,e%0,%m1" : "vsetvli\tzero,zero,e%0,%m1,t%p2,m%p3"; }
[(set_attr "type" "vsetvl")
(set_attr "mode" "SI")
(set (attr "sew") (symbol_ref "INTVAL (operands[0])"))
@@ -1551,7 +1582,7 @@
(match_operand 3 "const_int_operand" "i")
(match_operand 4 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vset%i0vli\tzero,%0,e%1,%m2,t%p3,m%p4"
+ { return TARGET_XTHEADVECTOR ? "vsetvli\tzero,%0,e%1,%m2" : "vset%i0vli\tzero,%0,e%1,%m2,t%p3,m%p4"; }
[(set_attr "type" "vsetvl")
(set_attr "mode" "<MODE>")
(set (attr "sew") (symbol_ref "INTVAL (operands[1])"))
@@ -3680,7 +3711,7 @@
(any_extend:VWEXTI
(match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf2\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -3701,7 +3732,7 @@
(any_extend:VQEXTI
(match_operand:<V_QUAD_TRUNC> 3 "register_operand" "W43,W43,W43,W43,W86,W86,W86,W86, vr, vr"))
(match_operand:VQEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf4\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -3722,7 +3753,7 @@
(any_extend:VOEXTI
(match_operand:<V_OCT_TRUNC> 3 "register_operand" "W87,W87,W87,W87, vr, vr"))
(match_operand:VOEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf8\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { ! riscv_xtheadvector } } } */
/* { dg-skip-if "test rvv intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
void foo0 () {__rvv_bool64_t t;}
@@ -1,4 +1,4 @@
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc -mabi=ilp32d" } */
-#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' extension enabled} } */
+#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' or 'XTHEADVECTOR' extension enabled} } */
@@ -1952,6 +1952,18 @@ proc check_effective_target_riscv_zbb { } {
}]
}
+# Return 1 if the target arch supports the XTheadVector extension, 0 otherwise.
+# Cache the result.
+
+proc check_effective_target_riscv_xtheadvector { } {
+ return [check_no_compiler_messages riscv_ext_xtheadvector assembly {
+ #ifndef __riscv_xtheadvector
+ #error "Not __riscv_xtheadvector"
+ #endif
+ }]
+}
+
+
# Return 1 if we can execute code when using dg-add-options riscv_v
proc check_effective_target_riscv_v_ok { } {