@@ -549,7 +549,7 @@ riscv*)
extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
extra_objs="${extra_objs} thead.o riscv-target-attr.o"
d_target_objs="riscv-d.o"
- extra_headers="riscv_vector.h"
+ extra_headers="riscv_vector.h riscv_th_vector.h"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
;;
@@ -2579,7 +2579,7 @@
[(match_operand 0 "register_operand")
(match_operand 1 "memory_operand")
(match_operand:ANYI 2 "const_int_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
{
riscv_vector::expand_rawmemchr(<MODE>mode, operands[0], operands[1],
operands[2]);
@@ -64,8 +64,9 @@
(match_operand 0 "register_operand")))
(define_predicate "vector_csr_operand"
- (ior (match_operand 0 "const_csr_operand")
- (match_operand 0 "register_operand")))
+ (ior (and (match_test "!TARGET_XTHEADVECTOR || rtx_equal_p (op, const0_rtx)")
+ (match_operand 0 "const_csr_operand"))
+ (match_operand 0 "register_operand")))
;; V has 32-bit unsigned immediates. This happens to be the same constraint as
;; the csr_operand, but it's not CSR related.
@@ -432,7 +433,8 @@
;; Predicates for the V extension.
(define_special_predicate "vector_length_operand"
(ior (match_operand 0 "pmode_register_operand")
- (match_operand 0 "const_csr_operand")))
+ (and (match_test "!TARGET_XTHEADVECTOR || rtx_equal_p (op, const0_rtx)")
+ (match_operand 0 "const_csr_operand"))))
(define_special_predicate "autovec_length_operand"
(ior (match_operand 0 "pmode_register_operand")
@@ -808,6 +808,9 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
bnez a2, loop # Any more?
ret # Return
*/
+ if (TARGET_XTHEADVECTOR)
+ return false;
+
gcc_assert (TARGET_VECTOR);
HOST_WIDE_INT potential_ew
@@ -1523,6 +1523,13 @@ legitimize_move (rtx dest, rtx *srcp)
return true;
}
+ if (TARGET_XTHEADVECTOR)
+ {
+ emit_insn (gen_pred_th_whole_mov (mode, dest, src,
+ RVV_VLMAX, GEN_INT(VLMAX)));
+ return true;
+ }
+
if (riscv_v_ext_vls_mode_p (mode))
{
if (GET_MODE_NUNITS (mode).to_constant () <= 31)
@@ -1772,7 +1779,7 @@ get_prefer_tail_policy ()
compiler pick up either agnostic or undisturbed. Maybe we
will have a compile option like -mprefer=agnostic to set
this value???. */
- return TAIL_ANY;
+ return TARGET_XTHEADVECTOR ? TAIL_AGNOSTIC : TAIL_ANY;
}
/* Get prefer mask policy. */
@@ -1783,7 +1790,7 @@ get_prefer_mask_policy ()
compiler pick up either agnostic or undisturbed. Maybe we
will have a compile option like -mprefer=agnostic to set
this value???. */
- return MASK_ANY;
+ return TARGET_XTHEADVECTOR ? MASK_UNDISTURBED : MASK_ANY;
}
/* Get avl_type rtx. */
@@ -4383,7 +4390,7 @@ cmp_lmul_gt_one (machine_mode mode)
bool
vls_mode_valid_p (machine_mode vls_mode)
{
- if (!TARGET_VECTOR)
+ if (!TARGET_VECTOR || TARGET_XTHEADVECTOR)
return false;
if (riscv_autovec_preference == RVV_SCALABLE)
@@ -33,6 +33,25 @@
namespace riscv_vector {
+/* Check whether the RETURN_TYPE and ARGUMENT_TYPES are
+ valid for the function. */
+
+static bool
+check_type (tree return_type, vec<tree> &argument_types)
+{
+ tree arg;
+ unsigned i;
+
+ if (!return_type)
+ return false;
+
+ FOR_EACH_VEC_ELT (argument_types, i, arg)
+ if (!arg)
+ return false;
+
+ return true;
+}
+
/* Add one function instance for GROUP, using operand suffix at index OI,
mode suffix at index PAIR && bi and predication suffix at index pred_idx. */
static void
@@ -49,6 +68,10 @@ build_one (function_builder &b, const function_group_info &group,
group.ops_infos.types[vec_type_idx].index);
b.allocate_argument_types (function_instance, argument_types);
b.apply_predication (function_instance, return_type, argument_types);
+
+ if (TARGET_XTHEADVECTOR && !check_type (return_type, argument_types))
+ return;
+
b.add_overloaded_function (function_instance, *group.shape);
b.add_unique_function (function_instance, (*group.shape), return_type,
argument_types);
@@ -68,9 +68,9 @@ Encode the ratio of SEW/LMUL into the mask types.
#endif
/* Disable modes if TARGET_MIN_VLEN == 32. */
-ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
-ENTRY (RVVMF32BI, true, LMUL_F4, 32)
-ENTRY (RVVMF16BI, true, LMUL_F2, 16)
+ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
+ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
+ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
ENTRY (RVVMF8BI, true, LMUL_1, 8)
ENTRY (RVVMF4BI, true, LMUL_2, 4)
ENTRY (RVVMF2BI, true, LMUL_4, 2)
@@ -81,39 +81,39 @@ ENTRY (RVVM8QI, true, LMUL_8, 1)
ENTRY (RVVM4QI, true, LMUL_4, 2)
ENTRY (RVVM2QI, true, LMUL_2, 4)
ENTRY (RVVM1QI, true, LMUL_1, 8)
-ENTRY (RVVMF2QI, true, LMUL_F2, 16)
-ENTRY (RVVMF4QI, true, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
+ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
+ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
+ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8HI, true, LMUL_8, 2)
ENTRY (RVVM4HI, true, LMUL_4, 4)
ENTRY (RVVM2HI, true, LMUL_2, 8)
ENTRY (RVVM1HI, true, LMUL_1, 16)
-ENTRY (RVVMF2HI, true, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_16. */
ENTRY (RVVM8HF, TARGET_VECTOR_ELEN_FP_16, LMUL_8, 2)
ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
-ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8SI, true, LMUL_8, 4)
ENTRY (RVVM4SI, true, LMUL_4, 8)
ENTRY (RVVM2SI, true, LMUL_2, 16)
ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32. */
ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if !TARGET_VECTOR_ELEN_64. */
ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
@@ -140,127 +140,127 @@ ENTRY (RVVM1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_1, 64)
#endif
TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x8QI, true, RVVMF2QI, 8, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x8QI, true, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x7QI, true, RVVMF2QI, 7, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x7QI, true, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x6QI, true, RVVMF2QI, 6, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x6QI, true, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x5QI, true, RVVMF2QI, 5, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x5QI, true, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x4QI, true, RVVMF2QI, 4, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x4QI, true, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x3QI, true, RVVMF2QI, 3, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x3QI, true, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x2QI, true, RVVMF2QI, 2, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x2QI, true, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HI, true, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HI, true, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HI, true, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HI, true, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HI, true, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HI, true, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HI, true, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
@@ -1117,6 +1117,16 @@ public:
avl = GEN_INT (0);
rtx sew = gen_int_mode (get_sew (), Pmode);
rtx vlmul = gen_int_mode (get_vlmul (), Pmode);
+
+ if (TARGET_XTHEADVECTOR) {
+ if (change_vtype_only_p ())
+ return gen_th_vsetvl_vtype_change_only (sew, vlmul);
+ else if (has_vl () && !ignore_vl)
+ return gen_th_vsetvl (Pmode, get_vl (), avl, sew, vlmul);
+ else
+ return gen_th_vsetvl_discard_result (Pmode, avl, sew, vlmul);
+ }
+
rtx ta = gen_int_mode (get_ta (), Pmode);
rtx ma = gen_int_mode (get_ma (), Pmode);
@@ -1406,6 +1406,9 @@ riscv_v_adjust_bytesize (machine_mode mode, int scale)
{
if (riscv_v_ext_vector_mode_p (mode))
{
+ if (TARGET_XTHEADVECTOR)
+ return BYTES_PER_RISCV_VECTOR;
+
poly_int64 nunits = GET_MODE_NUNITS (mode);
poly_int64 mode_size = GET_MODE_SIZE (mode);
@@ -9970,7 +9973,7 @@ riscv_use_divmod_expander (void)
static machine_mode
riscv_preferred_simd_mode (scalar_mode mode)
{
- if (TARGET_VECTOR)
+ if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
return riscv_vector::preferred_simd_mode (mode);
return word_mode;
@@ -10321,7 +10324,7 @@ riscv_mode_priority (int, int n)
unsigned int
riscv_autovectorize_vector_modes (vector_modes *modes, bool all)
{
- if (TARGET_VECTOR)
+ if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
return riscv_vector::autovectorize_vector_modes (modes, all);
return default_autovectorize_vector_modes (modes, all);
@@ -10504,6 +10507,16 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
return false;
}
+/* Implements target hook vector_mode_supported_any_target_p. */
+
+static bool
+riscv_vector_mode_supported_any_target_p (machine_mode mode)
+{
+ if (TARGET_XTHEADVECTOR)
+ return false;
+ return true;
+}
+
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
@@ -10847,6 +10860,9 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
#undef TARGET_PREFERRED_ELSE_VALUE
#define TARGET_PREFERRED_ELSE_VALUE riscv_preferred_else_value
+#undef TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P
+#define TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P riscv_vector_mode_supported_any_target_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-riscv.h"
new file mode 100644
@@ -0,0 +1,49 @@
+/* RISC-V 'XTheadVector' Extension intrinsics include file.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef __RISCV_TH_VECTOR_H
+#define __RISCV_TH_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_xtheadvector
+#error "XTheadVector intrinsics require the xtheadvector extension."
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* NOTE: This implementation of riscv_th_vector.h is intentionally short. It does
+ not define the RVV types and intrinsic functions directly in C and C++
+ code, but instead uses the following pragma to tell GCC to insert the
+ necessary type and function definitions itself. The net effect is the
+ same, and the file is a complete implementation of riscv_th_vector.h. */
+#pragma riscv intrinsic "vector"
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __riscv_xtheadvector
+#endif // __RISCV_TH_ECTOR_H
new file mode 100644
@@ -0,0 +1,120 @@
+(define_c_enum "unspec" [
+ UNSPEC_TH_VWLDST
+])
+
+(define_mode_iterator V_VLS_VT [V VLS VT])
+(define_mode_iterator V_VB_VLS_VT [V VB VLS VT])
+
+(define_split
+ [(set (match_operand:V_VB_VLS_VT 0 "reg_or_mem_operand")
+ (match_operand:V_VB_VLS_VT 1 "reg_or_mem_operand"))]
+ "TARGET_XTHEADVECTOR"
+ [(const_int 0)]
+ {
+ emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+ RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+ DONE;
+ })
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+ [(set (match_operand:V_VLS_VT 0 "reg_or_mem_operand" "=vr,vr, m")
+ (unspec:V_VLS_VT
+ [(match_operand:V_VLS_VT 1 "reg_or_mem_operand" " vr, m,vr")
+ (match_operand 2 "vector_length_operand" " rK, rK, rK")
+ (match_operand 3 "const_1_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)]
+ UNSPEC_TH_VWLDST))]
+ "TARGET_XTHEADVECTOR"
+ "@
+ vmv.v.v\t%0,%1
+ vle.v\t%0,%1
+ vse.v\t%1,%0"
+ "&& REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+ ""
+ [(set_attr "type" "vimov,vlds,vlds")
+ (set_attr "mode" "<MODE>")
+ (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+ (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+ (set (attr "avl_type_idx") (const_int 3))
+ (set_attr "vl_op_idx" "2")])
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+ [(set (match_operand:VB 0 "reg_or_mem_operand" "=vr,vr, m")
+ (unspec:VB
+ [(match_operand:VB 1 "reg_or_mem_operand" " vr, m,vr")
+ (match_operand 2 "vector_length_operand" " rK, rK, rK")
+ (match_operand 3 "const_1_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)]
+ UNSPEC_TH_VWLDST))]
+ "TARGET_XTHEADVECTOR"
+ "@
+ vmv.v.v\t%0,%1
+ vle.v\t%0,%1
+ vse.v\t%1,%0"
+ "&& REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+ ""
+ [(set_attr "type" "vimov,vlds,vlds")
+ (set_attr "mode" "<MODE>")
+ (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+ (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+ (set (attr "avl_type_idx") (const_int 3))
+ (set_attr "vl_op_idx" "2")
+ (set (attr "sew") (const_int 8))
+ (set (attr "vlmul") (symbol_ref "riscv_vector::LMUL_1"))])
+
+(define_insn "@th_vsetvl<mode>"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "vector_csr_operand" "rK")
+ (match_operand 2 "const_int_operand" "i")
+ (match_operand 3 "const_int_operand" "i")] UNSPEC_VSETVL))
+ (set (reg:SI VL_REGNUM)
+ (unspec:SI [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)] UNSPEC_VSETVL))
+ (set (reg:SI VTYPE_REGNUM)
+ (unspec:SI [(match_dup 2)
+ (match_dup 3)] UNSPEC_VSETVL))]
+ "TARGET_XTHEADVECTOR"
+ "vsetvli\t%0,%1,e%2,%m3"
+ [(set_attr "type" "vsetvl")
+ (set_attr "mode" "<MODE>")
+ (set (attr "sew") (symbol_ref "INTVAL (operands[2])"))
+ (set (attr "vlmul") (symbol_ref "INTVAL (operands[3])"))])
+
+;; vsetvl zero,zero,vtype instruction.
+;; This pattern has no side effects and does not set X0 register.
+(define_insn "th_vsetvl_vtype_change_only"
+ [(set (reg:SI VTYPE_REGNUM)
+ (unspec:SI
+ [(match_operand 0 "const_int_operand" "i")
+ (match_operand 1 "const_int_operand" "i")] UNSPEC_VSETVL))]
+ "TARGET_XTHEADVECTOR"
+ "vsetvli\tzero,zero,e%0,%m1"
+ [(set_attr "type" "vsetvl")
+ (set_attr "mode" "SI")
+ (set (attr "sew") (symbol_ref "INTVAL (operands[0])"))
+ (set (attr "vlmul") (symbol_ref "INTVAL (operands[1])"))])
+
+;; vsetvl zero,rs1,vtype instruction.
+;; The reason we need this pattern since we should avoid setting X0 register
+;; in vsetvl instruction pattern.
+(define_insn "@th_vsetvl_discard_result<mode>"
+ [(set (reg:SI VL_REGNUM)
+ (unspec:SI [(match_operand:P 0 "vector_csr_operand" "rK")
+ (match_operand 1 "const_int_operand" "i")
+ (match_operand 2 "const_int_operand" "i")] UNSPEC_VSETVL))
+ (set (reg:SI VTYPE_REGNUM)
+ (unspec:SI [(match_dup 1)
+ (match_dup 2)] UNSPEC_VSETVL))]
+ "TARGET_XTHEADVECTOR"
+ "vsetvli\tzero,%0,e%1,%m2"
+ [(set_attr "type" "vsetvl")
+ (set_attr "mode" "<MODE>")
+ (set (attr "sew") (symbol_ref "INTVAL (operands[1])"))
+ (set (attr "vlmul") (symbol_ref "INTVAL (operands[2])"))])
\ No newline at end of file
@@ -109,11 +109,11 @@
])
(define_mode_iterator VI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -128,11 +128,11 @@
;; allow the instruction and mode to be matched during combine et al.
(define_mode_iterator VF [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -140,11 +140,11 @@
(define_mode_iterator VF_ZVFHMIN [
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -271,16 +271,16 @@
])
(define_mode_iterator VEEWEXT2 [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -290,10 +290,10 @@
])
(define_mode_iterator VEEWEXT4 [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -311,59 +311,59 @@
])
(define_mode_iterator VEEWTRUNC2 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
(RVVM4SI "TARGET_64BIT")
(RVVM2SI "TARGET_64BIT")
(RVVM1SI "TARGET_64BIT")
- (RVVMF2SI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC4 [
- RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM2HI "TARGET_64BIT")
(RVVM1HI "TARGET_64BIT")
- (RVVMF2HI "TARGET_64BIT")
- (RVVMF4HI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC8 [
(RVVM1QI "TARGET_64BIT")
- (RVVMF2QI "TARGET_64BIT")
- (RVVMF4QI "TARGET_64BIT")
- (RVVMF8QI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2QI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF4QI "!TARGET_XTHEADVECTOR && TARGET_64BIT")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
])
(define_mode_iterator VEI16 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -452,11 +452,11 @@
])
(define_mode_iterator VFULLI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_FULL_V") (RVVM4DI "TARGET_FULL_V") (RVVM2DI "TARGET_FULL_V") (RVVM1DI "TARGET_FULL_V")
@@ -509,17 +509,17 @@
])
(define_mode_iterator VI_QH [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
])
(define_mode_iterator VI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -560,11 +560,11 @@
])
(define_mode_iterator VI_QHS_NO_M8 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -603,11 +603,11 @@
(define_mode_iterator VF_HS [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -638,12 +638,12 @@
(RVVM4HF "TARGET_ZVFH")
(RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH")
- (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -674,11 +674,11 @@
])
(define_mode_iterator V_VLSI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -756,27 +756,27 @@
;; E.g. when index mode = RVVM8QImde and Pmode = SImode, if it is not zero_extend or
;; scalar != 1, such gather/scatter is not allowed since we don't have RVVM32SImode.
(define_mode_iterator RATIO64 [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM1DI "TARGET_VECTOR_ELEN_64")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator RATIO32 [
- RVVMF4QI
- RVVMF2HI
+ (RVVMF4QI "!TARGET_XTHEADVECTOR")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR")
RVVM1SI
(RVVM2DI "TARGET_VECTOR_ELEN_64")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator RATIO16 [
- RVVMF2QI
+ (RVVMF2QI "!TARGET_XTHEADVECTOR")
RVVM1HI
RVVM2SI
(RVVM4DI "TARGET_VECTOR_ELEN_64")
@@ -814,21 +814,21 @@
])
(define_mode_iterator RATIO64I [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator RATIO32I [
- RVVMF4QI
- RVVMF2HI
+ (RVVMF4QI "!TARGET_XTHEADVECTOR")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR")
RVVM1SI
(RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator RATIO16I [
- RVVMF2QI
+ (RVVMF2QI "!TARGET_XTHEADVECTOR")
RVVM1HI
RVVM2SI
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
@@ -873,21 +873,21 @@
])
(define_mode_iterator V_FRACT [
- RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
])
(define_mode_iterator VWEXTI [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -933,7 +933,7 @@
(RVVM4SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -966,7 +966,7 @@
(RVVM4SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -996,7 +996,7 @@
(define_mode_iterator VWCONVERTI [
(RVVM8SI "TARGET_ZVFH") (RVVM4SI "TARGET_ZVFH") (RVVM2SI "TARGET_ZVFH") (RVVM1SI "TARGET_ZVFH")
- (RVVMF2SI "TARGET_ZVFH")
+ (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
(RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
@@ -1045,7 +1045,7 @@
])
(define_mode_iterator VQEXTI [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -1456,11 +1456,11 @@
;; VINDEXED [VI8 VI16 VI32 (VI64 "TARGET_64BIT")].
(define_mode_iterator VINDEXED [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI (RVVMF2QI "!TARGET_XTHEADVECTOR") (RVVMF4QI "!TARGET_XTHEADVECTOR") (RVVMF8QI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI (RVVMF2HI "!TARGET_XTHEADVECTOR") (RVVMF4HI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "!TARGET_XTHEADVECTOR && TARGET_MIN_VLEN > 32")
(RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
@@ -1468,12 +1468,12 @@
(RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
- (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
@@ -3173,11 +3173,11 @@
(define_mode_iterator V_VLS_F_CONVERT_SI [
(RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH")
- (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH") (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
@@ -3290,12 +3290,12 @@
])
(define_mode_iterator V_VLS_F_CONVERT_DI [
- (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH")
+ (RVVMF4HF "!TARGET_XTHEADVECTOR && TARGET_ZVFH && TARGET_MIN_VLEN > 32")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "!TARGET_XTHEADVECTOR && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -83,7 +83,7 @@
;; check. However, we need default value of SEW for vsetvl instruction since there
;; is no field for ratio in the vsetvl instruction encoding.
(define_attr "sew" ""
- (cond [(eq_attr "mode" "RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
+ (cond [(eq_attr "mode" "RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,\
RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,\
RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,\
@@ -95,6 +95,18 @@
V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,\
V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI")
(const_int 8)
+ (eq_attr "mode" "RVVMF16BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 16)
+ (const_int 8))
+ (eq_attr "mode" "RVVMF32BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 32)
+ (const_int 8))
+ (eq_attr "mode" "RVVMF64BI")
+ (if_then_else (match_test "TARGET_XTHEADVECTOR")
+ (const_int 64)
+ (const_int 8))
(eq_attr "mode" "RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,\
RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,\
RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,\
@@ -155,9 +167,9 @@
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1QI,RVVMF8BI") (symbol_ref "riscv_vector::LMUL_1")
- (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "riscv_vector::LMUL_F2")
- (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "riscv_vector::LMUL_F4")
- (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "riscv_vector::LMUL_F8")
+ (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F2")
+ (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F4")
+ (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM8HI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4HI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2HI") (symbol_ref "riscv_vector::LMUL_2")
@@ -428,6 +440,10 @@
vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vlsegdux,vlsegdox,vssegtux,vssegtox")
(const_int INVALID_ATTRIBUTE)
+ (and (eq_attr "type" "vlde,vste,vlsegde,vssegte,vlsegds,vssegts,\
+ vlsegdff,vssegtux,vlsegdox,vlsegdux")
+ (match_test "TARGET_XTHEADVECTOR"))
+ (const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "RVVM8QI,RVVM1BI") (const_int 1)
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (const_int 2)
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (const_int 4)
@@ -888,6 +904,8 @@
(symbol_ref "riscv_vector::FRM_DYN")]
(symbol_ref "riscv_vector::FRM_NONE")))
+(include "thead-vector.md")
+
;; -----------------------------------------------------------------
;; ---- Miscellaneous Operations
;; -----------------------------------------------------------------
@@ -1097,7 +1115,7 @@
(define_insn "*mov<mode>_whole"
[(set (match_operand:V_WHOLE 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:V_WHOLE 1 "reg_or_mem_operand" " m,vr,vr"))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"@
vl%m1re<sew>.v\t%0,%1
vs%m1r.v\t%1,%0
@@ -1125,7 +1143,7 @@
(define_insn "*mov<mode>"
[(set (match_operand:VB 0 "register_operand" "=vr")
(match_operand:VB 1 "register_operand" " vr"))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -3680,7 +3698,7 @@
(any_extend:VWEXTI
(match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf2\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -3701,7 +3719,7 @@
(any_extend:VQEXTI
(match_operand:<V_QUAD_TRUNC> 3 "register_operand" "W43,W43,W43,W43,W86,W86,W86,W86, vr, vr"))
(match_operand:VQEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf4\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -3722,7 +3740,7 @@
(any_extend:VOEXTI
(match_operand:<V_OCT_TRUNC> 3 "register_operand" "W87,W87,W87,W87, vr, vr"))
(match_operand:VOEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf8\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { ! riscv_xtheadvector } } } */
/* { dg-skip-if "test rvv intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
void foo0 () {__rvv_bool64_t t;}
@@ -1,4 +1,4 @@
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc -mabi=ilp32d" } */
-#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' extension enabled} } */
+#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' or 'XTHEADVECTOR' extension enabled} } */
@@ -1952,6 +1952,18 @@ proc check_effective_target_riscv_zbb { } {
}]
}
+# Return 1 if the target arch supports the XTheadVector extension, 0 otherwise.
+# Cache the result.
+
+proc check_effective_target_riscv_xtheadvector { } {
+ return [check_no_compiler_messages riscv_ext_xtheadvector assembly {
+ #ifndef __riscv_xtheadvector
+ #error "Not __riscv_xtheadvector"
+ #endif
+ }]
+}
+
+
# Return 1 if we can execute code when using dg-add-options riscv_v
proc check_effective_target_riscv_v_ok { } {