@@ -547,7 +547,7 @@ riscv*)
extra_objs="riscv-builtins.o riscv-c.o riscv-sr.o riscv-shorten-memrefs.o riscv-selftests.o riscv-string.o"
extra_objs="${extra_objs} riscv-v.o riscv-vsetvl.o riscv-vector-costs.o riscv-avlprop.o"
extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
- extra_objs="${extra_objs} thead.o riscv-target-attr.o"
+ extra_objs="${extra_objs} thead.o riscv-target-attr.o thead-vector-builtins.o"
d_target_objs="riscv-d.o"
extra_headers="riscv_vector.h riscv_th_vector.h"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
@@ -211,6 +211,104 @@ struct indexed_loadstore_def : public function_shape
}
};
+/* th_loadstore_width_def class. */
+struct th_loadstore_width_def : public build_base
+{
+ void build (function_builder &b,
+ const function_group_info &group) const override
+ {
+ /* Report an error if there is no xtheadvector. */
+ if (!TARGET_XTHEADVECTOR)
+ return;
+
+ build_all (b, group);
+ }
+
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Report an error if there is no xtheadvector. */
+ if (!TARGET_XTHEADVECTOR)
+ return nullptr;
+
+ /* Return nullptr if it can not be overloaded. */
+ if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+
+ /* vop_v --> vop_v_<type>. */
+ if (!overloaded_p)
+ {
+ /* vop --> vop_v. */
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ /* vop_v --> vop_v_<type>. */
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+
+ /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+ for vop_m C++ overloaded API. */
+ if (overloaded_p && instance.pred == PRED_TYPE_m)
+ return b.finish_name ();
+ b.append_name (predication_suffixes[instance.pred]);
+ return b.finish_name ();
+ }
+};
+
+
+/* th_indexed_loadstore_width_def class. */
+struct th_indexed_loadstore_width_def : public function_shape
+{
+ void build (function_builder &b,
+ const function_group_info &group) const override
+ {
+ /* Report an error if there is no xtheadvector. */
+ if (!TARGET_XTHEADVECTOR)
+ return;
+
+ for (unsigned int pred_idx = 0; group.preds[pred_idx] != NUM_PRED_TYPES;
+ ++pred_idx)
+ {
+ for (unsigned int vec_type_idx = 0;
+ group.ops_infos.types[vec_type_idx].index != NUM_VECTOR_TYPES;
+ ++vec_type_idx)
+ {
+ tree index_type = group.ops_infos.args[1].get_tree_type (
+ group.ops_infos.types[vec_type_idx].index);
+ if (!index_type)
+ continue;
+ build_one (b, group, pred_idx, vec_type_idx);
+ }
+ }
+ }
+
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+
+ /* Return nullptr if it can not be overloaded. */
+ if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+ /* vop_v --> vop_v_<type>. */
+ if (!overloaded_p)
+ {
+ /* vop --> vop_v. */
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ /* vop_v --> vop_v_<type>. */
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+
+ /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+ for vop_m C++ overloaded API. */
+ if (overloaded_p && instance.pred == PRED_TYPE_m)
+ return b.finish_name ();
+ b.append_name (predication_suffixes[instance.pred]);
+ return b.finish_name ();
+ }
+};
+
/* alu_def class. */
struct alu_def : public build_base
{
@@ -632,6 +730,31 @@ struct reduc_alu_def : public build_base
}
};
+/* th_extract_def class. */
+struct th_extract_def : public build_base
+{
+ void build (function_builder &b,
+ const function_group_info &group) const override
+ {
+ /* Report an error if there is no xtheadvector. */
+ if (!TARGET_XTHEADVECTOR)
+ return;
+
+ build_all (b, group);
+ }
+
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ b.append_base_name (instance.base_name);
+ if (overloaded_p)
+ return b.finish_name ();
+ b.append_name (type_suffixes[instance.type.index].vector);
+ b.append_name (type_suffixes[instance.type.index].scalar);
+ return b.finish_name ();
+ }
+};
+
/* scalar_move_def class. */
struct scalar_move_def : public build_base
{
@@ -1011,6 +1134,8 @@ SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
SHAPE(indexed_loadstore, indexed_loadstore)
+SHAPE(th_loadstore_width, th_loadstore_width)
+SHAPE(th_indexed_loadstore_width, th_indexed_loadstore_width)
SHAPE(alu, alu)
SHAPE(alu_frm, alu_frm)
SHAPE(widen_alu, widen_alu)
@@ -1023,6 +1148,7 @@ SHAPE(move, move)
SHAPE(mask_alu, mask_alu)
SHAPE(reduc_alu, reduc_alu)
SHAPE(reduc_alu_frm, reduc_alu_frm)
+SHAPE(th_extract, th_extract)
SHAPE(scalar_move, scalar_move)
SHAPE(vundefined, vundefined)
SHAPE(misc, misc)
@@ -28,6 +28,8 @@ extern const function_shape *const vsetvl;
extern const function_shape *const vsetvlmax;
extern const function_shape *const loadstore;
extern const function_shape *const indexed_loadstore;
+extern const function_shape *const th_loadstore_width;
+extern const function_shape *const th_indexed_loadstore_width;
extern const function_shape *const alu;
extern const function_shape *const alu_frm;
extern const function_shape *const widen_alu;
@@ -41,6 +43,7 @@ extern const function_shape *const mask_alu;
extern const function_shape *const reduc_alu;
extern const function_shape *const reduc_alu_frm;
extern const function_shape *const scalar_move;
+extern const function_shape *const th_extract;
extern const function_shape *const vundefined;
extern const function_shape *const misc;
extern const function_shape *const vset;
@@ -24,12 +24,48 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_I_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_I8_OPS" macro include some signed integer (i8/i16/i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_I8_OPS
+#define DEF_RVV_I8_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_I16_OPS" macro include some signed integer (i16/i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_I16_OPS
+#define DEF_RVV_I16_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_I32_OPS" macro include some signed integer (i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_I32_OPS
+#define DEF_RVV_I32_OPS(TYPE, REQUIRE)
+#endif
+
/* Use "DEF_RVV_U_OPS" macro include all unsigned integer which will be
iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_U_OPS
#define DEF_RVV_U_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_U8_OPS" macro include some unsigned integer (i8/i16/i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_U8_OPS
+#define DEF_RVV_U8_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_U16_OPS" macro include some unsigned integer (i16/i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_U16_OPS
+#define DEF_RVV_U16_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_U32_OPS" macro include some unsigned integer (i32/i64)
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_U32_OPS
+#define DEF_RVV_U32_OPS(TYPE, REQUIRE)
+#endif
+
/* Use "DEF_RVV_F_OPS" macro include all floating-point which will be
iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_F_OPS
@@ -362,6 +398,45 @@ DEF_RVV_I_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I8_OPS (vint8m1_t, 0)
+DEF_RVV_I8_OPS (vint8m2_t, 0)
+DEF_RVV_I8_OPS (vint8m4_t, 0)
+DEF_RVV_I8_OPS (vint8m8_t, 0)
+DEF_RVV_I8_OPS (vint16m1_t, 0)
+DEF_RVV_I8_OPS (vint16m2_t, 0)
+DEF_RVV_I8_OPS (vint16m4_t, 0)
+DEF_RVV_I8_OPS (vint16m8_t, 0)
+DEF_RVV_I8_OPS (vint32m1_t, 0)
+DEF_RVV_I8_OPS (vint32m2_t, 0)
+DEF_RVV_I8_OPS (vint32m4_t, 0)
+DEF_RVV_I8_OPS (vint32m8_t, 0)
+DEF_RVV_I8_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I8_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I8_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I8_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_I16_OPS (vint16m1_t, 0)
+DEF_RVV_I16_OPS (vint16m2_t, 0)
+DEF_RVV_I16_OPS (vint16m4_t, 0)
+DEF_RVV_I16_OPS (vint16m8_t, 0)
+DEF_RVV_I16_OPS (vint32m1_t, 0)
+DEF_RVV_I16_OPS (vint32m2_t, 0)
+DEF_RVV_I16_OPS (vint32m4_t, 0)
+DEF_RVV_I16_OPS (vint32m8_t, 0)
+DEF_RVV_I16_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I16_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I16_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I16_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_I32_OPS (vint32m1_t, 0)
+DEF_RVV_I32_OPS (vint32m2_t, 0)
+DEF_RVV_I32_OPS (vint32m4_t, 0)
+DEF_RVV_I32_OPS (vint32m8_t, 0)
+DEF_RVV_I32_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I32_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I32_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_I32_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
+
DEF_RVV_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_U_OPS (vuint8mf4_t, 0)
DEF_RVV_U_OPS (vuint8mf2_t, 0)
@@ -385,6 +460,45 @@ DEF_RVV_U_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U8_OPS (vuint8m1_t, 0)
+DEF_RVV_U8_OPS (vuint8m2_t, 0)
+DEF_RVV_U8_OPS (vuint8m4_t, 0)
+DEF_RVV_U8_OPS (vuint8m8_t, 0)
+DEF_RVV_U8_OPS (vuint16m1_t, 0)
+DEF_RVV_U8_OPS (vuint16m2_t, 0)
+DEF_RVV_U8_OPS (vuint16m4_t, 0)
+DEF_RVV_U8_OPS (vuint16m8_t, 0)
+DEF_RVV_U8_OPS (vuint32m1_t, 0)
+DEF_RVV_U8_OPS (vuint32m2_t, 0)
+DEF_RVV_U8_OPS (vuint32m4_t, 0)
+DEF_RVV_U8_OPS (vuint32m8_t, 0)
+DEF_RVV_U8_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U8_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U8_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U8_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_U16_OPS (vuint16m1_t, 0)
+DEF_RVV_U16_OPS (vuint16m2_t, 0)
+DEF_RVV_U16_OPS (vuint16m4_t, 0)
+DEF_RVV_U16_OPS (vuint16m8_t, 0)
+DEF_RVV_U16_OPS (vuint32m1_t, 0)
+DEF_RVV_U16_OPS (vuint32m2_t, 0)
+DEF_RVV_U16_OPS (vuint32m4_t, 0)
+DEF_RVV_U16_OPS (vuint32m8_t, 0)
+DEF_RVV_U16_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U16_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U16_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U16_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_U32_OPS (vuint32m1_t, 0)
+DEF_RVV_U32_OPS (vuint32m2_t, 0)
+DEF_RVV_U32_OPS (vuint32m4_t, 0)
+DEF_RVV_U32_OPS (vuint32m8_t, 0)
+DEF_RVV_U32_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U32_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U32_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_U32_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
+
DEF_RVV_F_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_F_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_F_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
@@ -1356,7 +1470,13 @@ DEF_RVV_TUPLE_OPS (vfloat64m2x4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
#undef DEF_RVV_I_OPS
+#undef DEF_RVV_I8_OPS
+#undef DEF_RVV_I16_OPS
+#undef DEF_RVV_I32_OPS
#undef DEF_RVV_U_OPS
+#undef DEF_RVV_U8_OPS
+#undef DEF_RVV_U16_OPS
+#undef DEF_RVV_U32_OPS
#undef DEF_RVV_F_OPS
#undef DEF_RVV_B_OPS
#undef DEF_RVV_WEXTI_OPS
@@ -51,6 +51,7 @@
#include "riscv-vector-builtins.h"
#include "riscv-vector-builtins-shapes.h"
#include "riscv-vector-builtins-bases.h"
+#include "thead-vector-builtins.h"
using namespace riscv_vector;
@@ -246,6 +247,63 @@ static const rvv_type_info iu_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info i8_ops[] = {
+#define DEF_RVV_I8_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info i16_ops[] = {
+#define DEF_RVV_I16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info i32_ops[] = {
+#define DEF_RVV_I32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info u8_ops[] = {
+#define DEF_RVV_U8_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info u16_ops[] = {
+#define DEF_RVV_U16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info u32_ops[] = {
+#define DEF_RVV_U32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info iu8_ops[] = {
+#define DEF_RVV_I8_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#define DEF_RVV_U8_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info iu16_ops[] = {
+#define DEF_RVV_I16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#define DEF_RVV_U16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info iu32_ops[] = {
+#define DEF_RVV_I32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#define DEF_RVV_U32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of all types will be registered for intrinsic functions. */
static const rvv_type_info all_ops[] = {
#define DEF_RVV_I_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
@@ -913,7 +971,32 @@ static CONSTEXPR const rvv_arg_type_info tuple_vcreate_args[]
/* A list of args for vector_type func (vector_type) function. */
static CONSTEXPR const rvv_arg_type_info ext_vcreate_args[]
- = {rvv_arg_type_info (RVV_BASE_vector),
+ = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (const scalar_type *, size_t)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_size_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_size), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (const scalar_type *, eew8_index_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, eew8_index_type, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector),
+ rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, size_t, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_size_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_size), rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info_end};
/* A list of none preds that will be registered for intrinsic functions. */
@@ -1429,6 +1512,14 @@ static CONSTEXPR const rvv_op_info iu_shift_vvv_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
shift_vv_args /* Args */};
+/* A static operand information for scalar_type func (vector_type, size_t)
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu_x_s_u_ops
+ = {iu_ops, /* Types */
+ OP_TYPE_vx, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_scalar), /* Return type */
+ v_size_args /* Args */};
+
/* A static operand information for vector_type func (vector_type, size_t)
* function registration. */
static CONSTEXPR const rvv_op_info iu_shift_vvx_ops
@@ -2604,6 +2695,222 @@ static CONSTEXPR const rvv_op_info all_v_vcreate_lmul4_x2_ops
rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
ext_vcreate_args /* Args */};
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i8_v_scalar_const_ptr_ops
+ = {i8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i16_v_scalar_const_ptr_ops
+ = {i16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i32_v_scalar_const_ptr_ops
+ = {i32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u8_v_scalar_const_ptr_ops
+ = {u8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u16_v_scalar_const_ptr_ops
+ = {u16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u32_v_scalar_const_ptr_ops
+ = {u32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info i8_v_scalar_const_ptr_size_ops
+ = {i8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info i16_v_scalar_const_ptr_size_ops
+ = {i16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info i32_v_scalar_const_ptr_size_ops
+ = {i32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info u8_v_scalar_const_ptr_size_ops
+ = {u8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info u16_v_scalar_const_ptr_size_ops
+ = {u16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * size_t) function registration. */
+static CONSTEXPR const rvv_op_info u32_v_scalar_const_ptr_size_ops
+ = {u32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_size_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info i8_v_scalar_const_ptr_index_ops
+ = {i8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info u8_v_scalar_const_ptr_index_ops
+ = {u8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info i16_v_scalar_const_ptr_index_ops
+ = {i16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info u16_v_scalar_const_ptr_index_ops
+ = {u16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info i32_v_scalar_const_ptr_index_ops
+ = {i32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info u32_v_scalar_const_ptr_index_ops
+ = {u32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew8_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu8_v_scalar_ptr_index_ops
+ = {iu8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew16_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu16_v_scalar_ptr_index_ops
+ = {iu16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew32_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu32_v_scalar_ptr_index_ops
+ = {iu32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, vector_type,
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu8_v_scalar_ptr_ops
+ = {iu8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu16_v_scalar_ptr_ops
+ = {iu16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu32_v_scalar_ptr_ops
+ = {iu32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, size_t,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu8_v_scalar_ptr_size_ops
+ = {iu8_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_size_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, size_t,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu16_v_scalar_ptr_size_ops
+ = {iu16_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_size_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, size_t,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info iu32_v_scalar_ptr_size_ops
+ = {iu32_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_size_args /* Args */};
+
/* A list of all RVV base function types. */
static CONSTEXPR const function_type_info function_types[] = {
#define DEF_RVV_TYPE_INDEX( \
@@ -2687,6 +2994,10 @@ static function_group_info function_groups[] = {
#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \
{#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS},
#include "riscv-vector-builtins-functions.def"
+#undef DEF_RVV_FUNCTION
+#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \
+ {#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS},
+#include "thead-vector-builtins-functions.def"
};
/* The RVV types, with their built-in
@@ -123,6 +123,7 @@ enum required_ext
ZVKNHB_EXT, /* Crypto vector Zvknhb sub-ext */
ZVKSED_EXT, /* Crypto vector Zvksed sub-ext */
ZVKSH_EXT, /* Crypto vector Zvksh sub-ext */
+ XTHEADVECTOR_EXT, /* XTheadVector extension */
};
/* Enumerates the RVV operand types. */
@@ -252,6 +253,8 @@ struct function_group_info
return TARGET_ZVKSED;
case ZVKSH_EXT:
return TARGET_ZVKSH;
+ case XTHEADVECTOR_EXT:
+ return TARGET_XTHEADVECTOR;
default:
gcc_unreachable ();
}
@@ -23,6 +23,8 @@ riscv-vector-builtins.o: $(srcdir)/config/riscv/riscv-vector-builtins.cc \
$(srcdir)/config/riscv/riscv-vector-builtins-shapes.h \
$(srcdir)/config/riscv/riscv-vector-builtins-bases.h \
$(srcdir)/config/riscv/riscv-vector-builtins-types.def \
+ $(srcdir)/config/riscv/thead-vector-builtins.h \
+ $(srcdir)/config/riscv/thead-vector-builtins-functions.def \
$(RISCV_BUILTINS_H)
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/riscv/riscv-vector-builtins.cc
@@ -50,6 +52,20 @@ riscv-vector-builtins-bases.o: \
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/riscv/riscv-vector-builtins-bases.cc
+thead-vector-builtins.o: \
+ $(srcdir)/config/riscv/thead-vector-builtins.cc \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \
+ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) \
+ $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \
+ gimple-iterator.h gimplify.h explow.h $(EMIT_RTL_H) tree-vector-builder.h \
+ rtx-vector-builder.h \
+ $(srcdir)/config/riscv/riscv-vector-builtins-shapes.h \
+ $(srcdir)/config/riscv/riscv-vector-builtins-bases.h \
+ $(srcdir)/config/riscv/thead-vector-builtins.h \
+ $(RISCV_BUILTINS_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/riscv/thead-vector-builtins.cc
+
riscv-sr.o: $(srcdir)/config/riscv/riscv-sr.cc $(CONFIG_H) \
$(SYSTEM_H) $(TM_H)
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
new file mode 100644
@@ -0,0 +1,39 @@
+#ifndef DEF_RVV_FUNCTION
+#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO)
+#endif
+
+#define REQUIRED_EXTENSIONS XTHEADVECTOR_EXT
+DEF_RVV_FUNCTION (th_vlb, th_loadstore_width, full_preds, i8_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vlh, th_loadstore_width, full_preds, i16_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vlw, th_loadstore_width, full_preds, i32_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vlbu, th_loadstore_width, full_preds, u8_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vlhu, th_loadstore_width, full_preds, u16_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vlwu, th_loadstore_width, full_preds, u32_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (th_vsb, th_loadstore_width, none_m_preds, iu8_v_scalar_ptr_ops)
+DEF_RVV_FUNCTION (th_vsh, th_loadstore_width, none_m_preds, iu16_v_scalar_ptr_ops)
+DEF_RVV_FUNCTION (th_vsw, th_loadstore_width, none_m_preds, iu32_v_scalar_ptr_ops)
+DEF_RVV_FUNCTION (th_vlsb, th_loadstore_width, full_preds, i8_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlsh, th_loadstore_width, full_preds, i16_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlsw, th_loadstore_width, full_preds, i32_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlsbu, th_loadstore_width, full_preds, u8_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlshu, th_loadstore_width, full_preds, u16_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlswu, th_loadstore_width, full_preds, u32_v_scalar_const_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vssb, th_loadstore_width, none_m_preds, iu8_v_scalar_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vssh, th_loadstore_width, none_m_preds, iu16_v_scalar_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vssw, th_loadstore_width, none_m_preds, iu32_v_scalar_ptr_size_ops)
+DEF_RVV_FUNCTION (th_vlxb, th_indexed_loadstore_width, full_preds, i8_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vlxh, th_indexed_loadstore_width, full_preds, i16_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vlxw, th_indexed_loadstore_width, full_preds, i32_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vlxbu, th_indexed_loadstore_width, full_preds, u8_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vlxhu, th_indexed_loadstore_width, full_preds, u16_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vlxwu, th_indexed_loadstore_width, full_preds, u32_v_scalar_const_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsxb, th_indexed_loadstore_width, none_m_preds, iu8_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsxh, th_indexed_loadstore_width, none_m_preds, iu16_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsxw, th_indexed_loadstore_width, none_m_preds, iu32_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsuxb, th_indexed_loadstore_width, none_m_preds, iu8_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsuxh, th_indexed_loadstore_width, none_m_preds, iu16_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vsuxw, th_indexed_loadstore_width, none_m_preds, iu32_v_scalar_ptr_index_ops)
+DEF_RVV_FUNCTION (th_vext_x_v, th_extract, none_preds, iu_x_s_u_ops)
+#undef REQUIRED_EXTENSIONS
+
+#undef DEF_RVV_FUNCTION
new file mode 100644
@@ -0,0 +1,200 @@
+/* function_base implementation for RISC-V XTheadVector Extension
+ for GNU compiler.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Joshua (cooper.joshua@linux.alibaba.com), T-Head
+ Semiconductor Co., Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "memmodel.h"
+#include "insn-codes.h"
+#include "optabs.h"
+#include "recog.h"
+#include "expr.h"
+#include "basic-block.h"
+#include "function.h"
+#include "fold-const.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify.h"
+#include "explow.h"
+#include "emit-rtl.h"
+#include "tree-vector-builder.h"
+#include "rtx-vector-builder.h"
+#include "riscv-vector-builtins.h"
+#include "riscv-vector-builtins-shapes.h"
+#include "riscv-vector-builtins-bases.h"
+#include "thead-vector-builtins.h"
+
+using namespace riscv_vector;
+
+namespace riscv_vector {
+
+/* Implements
+ * th.vl(b/h/w)[u].v/th.vs(b/h/w)[u].v/th.vls(b/h/w)[u].v/th.vss(b/h/w)[u].v/
+ * th.vlx(b/h/w)[u].v/th.vs[u]x(b/h/w).v
+ * codegen. */
+template<bool STORE_P, lst_type LST_TYPE, int UNSPEC>
+class th_loadstore_width : public function_base
+{
+public:
+ bool apply_tail_policy_p () const override { return !STORE_P; }
+ bool apply_mask_policy_p () const override { return !STORE_P; }
+
+ unsigned int call_properties (const function_instance &) const override
+ {
+ if (STORE_P)
+ return CP_WRITE_MEMORY;
+ else
+ return CP_READ_MEMORY;
+ }
+
+ bool can_be_overloaded_p (enum predication_type_index pred) const override
+ {
+ if (STORE_P || LST_TYPE == LST_INDEXED)
+ return true;
+ return pred != PRED_TYPE_none;
+ }
+
+ rtx expand (function_expander &e) const override
+ {
+ gcc_assert (TARGET_XTHEADVECTOR);
+ if (LST_TYPE == LST_INDEXED)
+ {
+ if (STORE_P)
+ return e.use_exact_insn (
+ code_for_pred_indexed_store_width (UNSPEC, UNSPEC,
+ e.vector_mode ()));
+ else
+ return e.use_exact_insn (
+ code_for_pred_indexed_load_width (UNSPEC, e.vector_mode ()));
+ }
+ else if (LST_TYPE == LST_STRIDED)
+ {
+ if (STORE_P)
+ return e.use_contiguous_store_insn (
+ code_for_pred_strided_store_width (UNSPEC, e.vector_mode ()));
+ else
+ return e.use_contiguous_load_insn (
+ code_for_pred_strided_load_width (UNSPEC, e.vector_mode ()));
+ }
+ else
+ {
+ if (STORE_P)
+ return e.use_contiguous_store_insn (
+ code_for_pred_store_width (UNSPEC, e.vector_mode ()));
+ else
+ return e.use_contiguous_load_insn (
+ code_for_pred_mov_width (UNSPEC, e.vector_mode ()));
+ }
+ }
+};
+
+/* Implements vext.x.v. */
+class th_extract : public function_base
+{
+public:
+ bool apply_vl_p () const override { return false; }
+ bool apply_tail_policy_p () const override { return false; }
+ bool apply_mask_policy_p () const override { return false; }
+ bool use_mask_predication_p () const override { return false; }
+ bool has_merge_operand_p () const override { return false; }
+
+ rtx expand (function_expander &e) const override
+ {
+ gcc_assert (TARGET_XTHEADVECTOR);
+ return e.use_exact_insn (code_for_pred_th_extract (e.vector_mode ()));
+ }
+};
+
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLB> th_vlb_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLBU> th_vlbu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLH> th_vlh_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLHU> th_vlhu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLW> th_vlw_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLWU> th_vlwu_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLB> th_vsb_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLH> th_vsh_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLW> th_vsw_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSB> th_vlsb_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSBU> th_vlsbu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSH> th_vlsh_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSHU> th_vlshu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSW> th_vlsw_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSWU> th_vlswu_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSB> th_vssb_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSH> th_vssh_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSW> th_vssw_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXB> th_vlxb_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXBU> th_vlxbu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXH> th_vlxh_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXHU> th_vlxhu_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXW> th_vlxw_obj;
+static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXWU> th_vlxwu_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXB> th_vsxb_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXH> th_vsxh_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXW> th_vsxw_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXB> th_vsuxb_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXH> th_vsuxh_obj;
+static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXW> th_vsuxw_obj;
+static CONSTEXPR const th_extract th_vext_x_v_obj;
+
+/* Declare the function base NAME, pointing it to an instance
+ of class <NAME>_obj. */
+#define BASE(NAME) \
+ namespace bases { const function_base *const NAME = &NAME##_obj; }
+
+BASE (th_vlb)
+BASE (th_vlh)
+BASE (th_vlw)
+BASE (th_vlbu)
+BASE (th_vlhu)
+BASE (th_vlwu)
+BASE (th_vsb)
+BASE (th_vsh)
+BASE (th_vsw)
+BASE (th_vlsb)
+BASE (th_vlsh)
+BASE (th_vlsw)
+BASE (th_vlsbu)
+BASE (th_vlshu)
+BASE (th_vlswu)
+BASE (th_vssb)
+BASE (th_vssh)
+BASE (th_vssw)
+BASE (th_vlxb)
+BASE (th_vlxh)
+BASE (th_vlxw)
+BASE (th_vlxbu)
+BASE (th_vlxhu)
+BASE (th_vlxwu)
+BASE (th_vsxb)
+BASE (th_vsxh)
+BASE (th_vsxw)
+BASE (th_vsuxb)
+BASE (th_vsuxh)
+BASE (th_vsuxw)
+BASE (th_vext_x_v)
+
+} // end namespace riscv_vector
new file mode 100644
@@ -0,0 +1,64 @@
+/* function_base declaration for RISC-V XTheadVector Extension
+ for GNU compiler.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Joshua (cooper.joshua@linux.alibaba.com), T-Head
+ Semiconductor Co., Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_THEAD_VECTOR_BUILTINS_H
+#define GCC_THEAD_VECTOR_BUILTINS_H
+
+namespace riscv_vector {
+
+namespace bases {
+extern const function_base *const th_vlb;
+extern const function_base *const th_vlh;
+extern const function_base *const th_vlw;
+extern const function_base *const th_vlbu;
+extern const function_base *const th_vlhu;
+extern const function_base *const th_vlwu;
+extern const function_base *const th_vsb;
+extern const function_base *const th_vsh;
+extern const function_base *const th_vsw;
+extern const function_base *const th_vlsb;
+extern const function_base *const th_vlsh;
+extern const function_base *const th_vlsw;
+extern const function_base *const th_vlsbu;
+extern const function_base *const th_vlshu;
+extern const function_base *const th_vlswu;
+extern const function_base *const th_vssb;
+extern const function_base *const th_vssh;
+extern const function_base *const th_vssw;
+extern const function_base *const th_vlxb;
+extern const function_base *const th_vlxh;
+extern const function_base *const th_vlxw;
+extern const function_base *const th_vlxbu;
+extern const function_base *const th_vlxhu;
+extern const function_base *const th_vlxwu;
+extern const function_base *const th_vsxb;
+extern const function_base *const th_vsxh;
+extern const function_base *const th_vsxw;
+extern const function_base *const th_vsuxb;
+extern const function_base *const th_vsuxh;
+extern const function_base *const th_vsuxw;
+extern const function_base *const th_vext_x_v;
+}
+
+} // end namespace riscv_vector
+
+#endif
@@ -1,7 +1,95 @@
(define_c_enum "unspec" [
+ UNSPEC_TH_VLB
+ UNSPEC_TH_VLBU
+ UNSPEC_TH_VLH
+ UNSPEC_TH_VLHU
+ UNSPEC_TH_VLW
+ UNSPEC_TH_VLWU
+
+ UNSPEC_TH_VLSB
+ UNSPEC_TH_VLSBU
+ UNSPEC_TH_VLSH
+ UNSPEC_TH_VLSHU
+ UNSPEC_TH_VLSW
+ UNSPEC_TH_VLSWU
+
+ UNSPEC_TH_VLXB
+ UNSPEC_TH_VLXBU
+ UNSPEC_TH_VLXH
+ UNSPEC_TH_VLXHU
+ UNSPEC_TH_VLXW
+ UNSPEC_TH_VLXWU
+
+ UNSPEC_TH_VSUXB
+ UNSPEC_TH_VSUXH
+ UNSPEC_TH_VSUXW
+
UNSPEC_TH_VWLDST
])
+(define_int_iterator UNSPEC_TH_VLMEM_OP [
+ UNSPEC_TH_VLB UNSPEC_TH_VLBU
+ UNSPEC_TH_VLH UNSPEC_TH_VLHU
+ UNSPEC_TH_VLW UNSPEC_TH_VLWU
+])
+
+(define_int_iterator UNSPEC_TH_VLSMEM_OP [
+ UNSPEC_TH_VLSB UNSPEC_TH_VLSBU
+ UNSPEC_TH_VLSH UNSPEC_TH_VLSHU
+ UNSPEC_TH_VLSW UNSPEC_TH_VLSWU
+])
+
+(define_int_iterator UNSPEC_TH_VLXMEM_OP [
+ UNSPEC_TH_VLXB UNSPEC_TH_VLXBU
+ UNSPEC_TH_VLXH UNSPEC_TH_VLXHU
+ UNSPEC_TH_VLXW UNSPEC_TH_VLXWU
+])
+
+(define_int_attr vlmem_op_attr [
+ (UNSPEC_TH_VLB "b") (UNSPEC_TH_VLBU "bu")
+ (UNSPEC_TH_VLH "h") (UNSPEC_TH_VLHU "hu")
+ (UNSPEC_TH_VLW "w") (UNSPEC_TH_VLWU "wu")
+ (UNSPEC_TH_VLSB "b") (UNSPEC_TH_VLSBU "bu")
+ (UNSPEC_TH_VLSH "h") (UNSPEC_TH_VLSHU "hu")
+ (UNSPEC_TH_VLSW "w") (UNSPEC_TH_VLSWU "wu")
+ (UNSPEC_TH_VLXB "b") (UNSPEC_TH_VLXBU "bu")
+ (UNSPEC_TH_VLXH "h") (UNSPEC_TH_VLXHU "hu")
+ (UNSPEC_TH_VLXW "w") (UNSPEC_TH_VLXWU "wu")
+ (UNSPEC_TH_VSUXB "b")
+ (UNSPEC_TH_VSUXH "h")
+ (UNSPEC_TH_VSUXW "w")
+])
+
+(define_int_attr vlmem_order_attr [
+ (UNSPEC_TH_VLXB "")
+ (UNSPEC_TH_VLXH "")
+ (UNSPEC_TH_VLXW "")
+ (UNSPEC_TH_VSUXB "u")
+ (UNSPEC_TH_VSUXH "u")
+ (UNSPEC_TH_VSUXW "u")
+])
+
+(define_int_iterator UNSPEC_TH_VSMEM_OP [
+ UNSPEC_TH_VLB
+ UNSPEC_TH_VLH
+ UNSPEC_TH_VLW
+])
+
+(define_int_iterator UNSPEC_TH_VSSMEM_OP [
+ UNSPEC_TH_VLSB
+ UNSPEC_TH_VLSH
+ UNSPEC_TH_VLSW
+])
+
+(define_int_iterator UNSPEC_TH_VSXMEM_OP [
+ UNSPEC_TH_VLXB
+ UNSPEC_TH_VLXH
+ UNSPEC_TH_VLXW
+ UNSPEC_TH_VSUXB
+ UNSPEC_TH_VSUXH
+ UNSPEC_TH_VSUXW
+])
+
(define_mode_iterator V_VLS_VT [V VLS VT])
(define_mode_iterator V_VB_VLS_VT [V VB VLS VT])
@@ -100,3 +188,168 @@
}
[(set_attr "type" "vldm,vstm,vmalu,vmalu,vmalu")
(set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_mov_width<vlmem_op_attr><mode>"
+ [(set (match_operand:V_VLS 0 "nonimmediate_operand")
+ (if_then_else:V_VLS
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand")
+ (match_operand 4 "vector_length_operand")
+ (match_operand 5 "const_int_operand")
+ (match_operand 6 "const_int_operand")
+ (match_operand 7 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VLMEM_OP)
+ (match_operand:V_VLS 3 "vector_move_operand")
+ (match_operand:V_VLS 2 "vector_merge_operand")))]
+ "TARGET_XTHEADVECTOR"
+ {})
+
+(define_insn_and_split "*pred_mov_width<vlmem_op_attr><mode>"
+ [(set (match_operand:V_VLS 0 "nonimmediate_operand" "=vr, vr, vd, m, vr, vr")
+ (if_then_else:V_VLS
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm, vmWc1, Wc1, Wc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VLMEM_OP)
+ (match_operand:V_VLS 3 "reg_or_mem_operand" " m, m, m, vr, vr, vr")
+ (match_operand:V_VLS 2 "vector_merge_operand" " 0, vu, vu, vu, vu, 0")))]
+ "(TARGET_XTHEADVECTOR
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[3], <MODE>mode)))"
+ "@
+ vl<vlmem_op_attr>.v\t%0,%3%p1
+ vl<vlmem_op_attr>.v\t%0,%3
+ vl<vlmem_op_attr>.v\t%0,%3,%1.t
+ vs<vlmem_op_attr>.v\t%3,%0%p1
+ vmv.v.v\t%0,%3
+ vmv.v.v\t%0,%3"
+ "&& register_operand (operands[0], <MODE>mode)
+ && register_operand (operands[3], <MODE>mode)
+ && satisfies_constraint_vu (operands[2])
+ && INTVAL (operands[7]) == riscv_vector::VLMAX"
+ [(set (match_dup 0) (match_dup 3))]
+ ""
+ [(set_attr "type" "vlde,vlde,vlde,vste,vimov,vimov")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_store_width<vlmem_op_attr><mode>"
+ [(set (match_operand:VI 0 "memory_operand" "+m")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 3 "vector_length_operand" " rK")
+ (match_operand 4 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VSMEM_OP)
+ (match_operand:VI 2 "register_operand" " vr")
+ (match_dup 0)))]
+ "TARGET_XTHEADVECTOR"
+ "vs<vlmem_op_attr>.v\t%2,%0%p1"
+ [(set_attr "type" "vste")
+ (set_attr "mode" "<MODE>")
+ (set (attr "avl_type_idx") (const_int 4))
+ (set_attr "vl_op_idx" "3")])
+
+(define_insn "@pred_strided_load_width<vlmem_op_attr><mode>"
+ [(set (match_operand:VI 0 "register_operand" "=vr, vr, vd")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VLSMEM_OP)
+ (unspec:VI
+ [(match_operand:VI 3 "memory_operand" " m, m, m")
+ (match_operand 4 "pmode_reg_or_0_operand" " rJ, rJ, rJ")] UNSPEC_TH_VLSMEM_OP)
+ (match_operand:VI 2 "vector_merge_operand" " 0, vu, vu")))]
+ "TARGET_XTHEADVECTOR"
+ "vls<vlmem_op_attr>.v\t%0,%3,%z4%p1"
+ [(set_attr "type" "vlds")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_strided_store_width<vlmem_op_attr><mode>"
+ [(set (match_operand:VI 0 "memory_operand" "+m")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VSSMEM_OP)
+ (unspec:VI
+ [(match_operand 2 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VI 3 "register_operand" " vr")] UNSPEC_TH_VSSMEM_OP)
+ (match_dup 0)))]
+ "TARGET_XTHEADVECTOR"
+ "vss<vlmem_op_attr>.v\t%3,%0,%z2%p1"
+ [(set_attr "type" "vsts")
+ (set_attr "mode" "<MODE>")
+ (set (attr "avl_type_idx") (const_int 5))])
+
+(define_insn "@pred_indexed_load_width<vlmem_op_attr><mode>"
+ [(set (match_operand:VI 0 "register_operand" "=vd, vr,vd, vr")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1,vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK,rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VLXMEM_OP)
+ (unspec:VI
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ,rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:VI 4 "register_operand" " vr, vr,vr, vr")] UNSPEC_TH_VLXMEM_OP)
+ (match_operand:VI 2 "vector_merge_operand" " vu, vu, 0, 0")))]
+ "TARGET_XTHEADVECTOR"
+ "vlx<vlmem_op_attr>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vldux")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_indexed_<vlmem_order_attr>store_width<vlmem_op_attr><mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_TH_VSXMEM_OP)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VI 2 "register_operand" " vr")
+ (match_operand:VI 3 "register_operand" " vr")] UNSPEC_TH_VSXMEM_OP))]
+ "TARGET_XTHEADVECTOR"
+ "vs<vlmem_order_attr>x<vlmem_op_attr>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vstux")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_th_extract<mode>"
+ [(set (match_operand:<VEL> 0 "register_operand")
+ (unspec:<VEL>
+ [(vec_select:<VEL>
+ (match_operand:V_VLSI 1 "register_operand")
+ (parallel [(match_operand:DI 2 "register_operand" "r")]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE))]
+ "TARGET_XTHEADVECTOR"
+{})
+
+(define_insn "*pred_th_extract<mode>"
+ [(set (match_operand:<VEL> 0 "register_operand" "=r")
+ (unspec:<VEL>
+ [(vec_select:<VEL>
+ (match_operand:V_VLSI 1 "register_operand" "vr")
+ (parallel [(match_operand:DI 2 "register_operand" "r")]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE))]
+ "TARGET_XTHEADVECTOR"
+ "vext.x.v\t%0,%1,%2"
+ [(set_attr "type" "vimovvx")
+ (set_attr "mode" "<MODE>")])
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlb\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlb\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
+** th.vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
+** th.vsb\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out)
+{
+ vint32m1_t v = __riscv_th_vlb_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlb_v_i32m1_tu (v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vv_i32m1 (v2, v2, 4);
+ vint32m1_t v4 = __riscv_vadd_vv_i32m1_tu (v3, v2, v2, 4);
+ __riscv_th_vsb_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlb.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
+** th.vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** th.vsb.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlb_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlb_v_i32m1_m (mask, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vv_i32m1 (v2, v2, 4);
+ vint32m1_t v4 = __riscv_vadd_vv_i32m1_m (mask, v3, v3, 4);
+ __riscv_th_vsb_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlb\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlb.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+
+** th.vadd\.vv\tv[1-9][0-9]?,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t
+** th.vsb.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlb_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlb_v_i32m1_tumu (mask, v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vv_i32m1 (v2, v2, 4);
+ vint32m1_t v4 = __riscv_vadd_vv_i32m1_tumu (mask, v3, v2, v2, 4);
+ __riscv_th_vsb_v_i32m1 (out, v4, 4);
+}
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlbu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlbu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vsb\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+ vuint32m1_t v = __riscv_th_vlbu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlbu_v_u32m1_tu (v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tu (v3, v2, -16, 4);
+ __riscv_th_vsb_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlbu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsb.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlbu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlbu_v_u32m1_m (mask, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_m (mask, v3, -16, 4);
+ __riscv_th_vsb_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlbu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlbu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsb.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlbu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlbu_v_u32m1_tumu (mask, v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tumu (mask, v3, v2, -16, 4);
+ __riscv_th_vsb_v_u32m1 (out, v4, 4);
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlh\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlh\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vsh\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+ vint32m1_t v = __riscv_th_vlh_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlh_v_i32m1_tu (v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, -16, 4);
+ __riscv_th_vsh_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlh.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsh.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlh_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlh_v_i32m1_m (mask, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, -16, 4);
+ __riscv_th_vsh_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlh\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlh.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsh.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlh_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlh_v_i32m1_tumu (mask, v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+ __riscv_th_vsh_v_i32m1 (out, v4, 4);
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlhu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlhu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vsh\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+ vuint32m1_t v = __riscv_th_vlhu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlhu_v_u32m1_tu (v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tu (v3, v2, -16, 4);
+ __riscv_th_vsh_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlhu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsh.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlhu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlhu_v_u32m1_m (mask, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_m (mask, v3, -16, 4);
+ __riscv_th_vsh_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlhu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlhu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsh.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlhu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlhu_v_u32m1_tumu (mask, v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tumu (mask, v3, v2, -16, 4);
+ __riscv_th_vsh_v_u32m1 (out, v4, 4);
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlw\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlw\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+** th.vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+** th.vsw\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+ vint32m1_t v = __riscv_th_vlw_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlw_v_i32m1_tu (v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, x, 4);
+ __riscv_th_vsw_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlw.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+** th.vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+** th.vsw.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlw_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlw_v_i32m1_m (mask, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, x, 4);
+ __riscv_th_vsw_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlw\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlw.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+** th.vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+** th.vsw.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vint32m1_t v = __riscv_th_vlw_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_th_vlw_v_i32m1_tumu (mask, v, in, 4);
+ vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+ vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, x, 4);
+ __riscv_th_vsw_v_i32m1 (out, v4, 4);
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcxtheadvector -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+** th.vsetivli\tzero,4,e32,m1,tu,ma
+** th.vlwu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlwu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vsw\.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+ vuint32m1_t v = __riscv_th_vlwu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlwu_v_u32m1_tu (v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tu (v3, v2, -16, 4);
+ __riscv_th_vsw_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,ta,ma
+** th.vlwu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsw.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlwu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlwu_v_u32m1_m (mask, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_m (mask, v3, -16, 4);
+ __riscv_th_vsw_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+** th.vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+** th.vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vsetivli\tzero,4,e32,m1,tu,mu
+** th.vlwu\.v\tv[0-9]+,0\([a-x0-9]+\)
+** th.vlwu.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+** th.vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+** th.vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+** th.vsw.v\tv[0-9]+,0\([a-x0-9]+\)
+** ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+ vbool32_t mask = *(vbool32_t*)in;
+ asm volatile ("":::"memory");
+ vuint32m1_t v = __riscv_th_vlwu_v_u32m1 (in, 4);
+ vuint32m1_t v2 = __riscv_th_vlwu_v_u32m1_tumu (mask, v, in, 4);
+ vuint32m1_t v3 = __riscv_vadd_vx_u32m1 (v2, -16, 4);
+ vuint32m1_t v4 = __riscv_vadd_vx_u32m1_tumu (mask, v3, v2, -16, 4);
+ __riscv_th_vsw_v_u32m1 (out, v4, 4);
+}
\ No newline at end of file