@@ -345,7 +345,7 @@ m32c*-*-*)
;;
aarch64*-*-*)
cpu_type=aarch64
- extra_headers="arm_fp16.h arm_neon.h arm_bf16.h arm_acle.h arm_sve.h arm_sme.h"
+ extra_headers="arm_fp16.h arm_neon.h arm_bf16.h arm_acle.h arm_sve.h arm_sme.h arm_neon_sve_bridge.h"
c_target_objs="aarch64-c.o"
cxx_target_objs="aarch64-c.o"
d_target_objs="aarch64-d.o"
new file mode 100644
@@ -0,0 +1,99 @@
+/* Builtins' description for AArch64 SIMD architecture.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+#ifndef GCC_AARCH64_BUILTINS_H
+#define GCC_AARCH64_BUILTINS_H
+
+enum aarch64_type_qualifiers
+{
+ /* T foo. */
+ qualifier_none = 0x0,
+ /* unsigned T foo. */
+ qualifier_unsigned = 0x1, /* 1 << 0 */
+ /* const T foo. */
+ qualifier_const = 0x2, /* 1 << 1 */
+ /* T *foo. */
+ qualifier_pointer = 0x4, /* 1 << 2 */
+ /* Used when expanding arguments if an operand could
+ be an immediate. */
+ qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_maybe_immediate = 0x10, /* 1 << 4 */
+ /* void foo (...). */
+ qualifier_void = 0x20, /* 1 << 5 */
+ /* 1 << 6 is now unused */
+ /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum
+ rather than using the type of the operand. */
+ qualifier_map_mode = 0x80, /* 1 << 7 */
+ /* qualifier_pointer | qualifier_map_mode */
+ qualifier_pointer_map_mode = 0x84,
+ /* qualifier_const | qualifier_pointer | qualifier_map_mode */
+ qualifier_const_pointer_map_mode = 0x86,
+ /* Polynomial types. */
+ qualifier_poly = 0x100,
+ /* Lane indices - must be in range, and flipped for bigendian. */
+ qualifier_lane_index = 0x200,
+ /* Lane indices for single lane structure loads and stores. */
+ qualifier_struct_load_store_lane_index = 0x400,
+ /* Lane indices selected in pairs. - must be in range, and flipped for
+ bigendian. */
+ qualifier_lane_pair_index = 0x800,
+ /* Lane indices selected in quadtuplets. - must be in range, and flipped for
+ bigendian. */
+ qualifier_lane_quadtup_index = 0x1000,
+};
+
+#define ENTRY(E, M, Q, G) E,
+enum aarch64_simd_type
+{
+#include "aarch64-simd-builtin-types.def"
+ ARM_NEON_H_TYPES_LAST
+};
+#undef ENTRY
+
+struct GTY(()) aarch64_simd_type_info
+{
+ enum aarch64_simd_type type;
+
+ /* Internal type name. */
+ const char *name;
+
+ /* Internal type name(mangled). The mangled names conform to the
+ AAPCS64 (see "Procedure Call Standard for the ARM 64-bit Architecture",
+ Appendix A). To qualify for emission with the mangled names defined in
+ that document, a vector type must not only be of the correct mode but also
+ be of the correct internal AdvSIMD vector type (e.g. __Int8x8_t); these
+ types are registered by aarch64_init_simd_builtin_types (). In other
+ words, vector types defined in other ways e.g. via vector_size attribute
+ will get default mangled names. */
+ const char *mangle;
+
+ /* Internal type. */
+ tree itype;
+
+ /* Element type. */
+ tree eltype;
+
+ /* Machine mode the internal type maps to. */
+ enum machine_mode mode;
+
+ /* Qualifiers. */
+ enum aarch64_type_qualifiers q;
+};
+
+extern aarch64_simd_type_info aarch64_simd_types[];
+
+#endif
\ No newline at end of file
@@ -48,6 +48,7 @@
#include "attribs.h"
#include "gimple-fold.h"
#include "builtins.h"
+#include "aarch64-builtins.h"
#define v8qi_UP E_V8QImode
#define v8di_UP E_V8DImode
@@ -184,47 +185,8 @@
#define SIMD_INTR_QUAL(suffix) QUAL_##suffix
#define SIMD_INTR_LENGTH_CHAR(length) LENGTH_##length
-
#define SIMD_MAX_BUILTIN_ARGS 5
-enum aarch64_type_qualifiers
-{
- /* T foo. */
- qualifier_none = 0x0,
- /* unsigned T foo. */
- qualifier_unsigned = 0x1, /* 1 << 0 */
- /* const T foo. */
- qualifier_const = 0x2, /* 1 << 1 */
- /* T *foo. */
- qualifier_pointer = 0x4, /* 1 << 2 */
- /* Used when expanding arguments if an operand could
- be an immediate. */
- qualifier_immediate = 0x8, /* 1 << 3 */
- qualifier_maybe_immediate = 0x10, /* 1 << 4 */
- /* void foo (...). */
- qualifier_void = 0x20, /* 1 << 5 */
- /* 1 << 6 is now unused */
- /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum
- rather than using the type of the operand. */
- qualifier_map_mode = 0x80, /* 1 << 7 */
- /* qualifier_pointer | qualifier_map_mode */
- qualifier_pointer_map_mode = 0x84,
- /* qualifier_const | qualifier_pointer | qualifier_map_mode */
- qualifier_const_pointer_map_mode = 0x86,
- /* Polynomial types. */
- qualifier_poly = 0x100,
- /* Lane indices - must be in range, and flipped for bigendian. */
- qualifier_lane_index = 0x200,
- /* Lane indices for single lane structure loads and stores. */
- qualifier_struct_load_store_lane_index = 0x400,
- /* Lane indices selected in pairs. - must be in range, and flipped for
- bigendian. */
- qualifier_lane_pair_index = 0x800,
- /* Lane indices selected in quadtuplets. - must be in range, and flipped for
- bigendian. */
- qualifier_lane_quadtup_index = 0x1000,
-};
-
/* Flags that describe what a function might do. */
const unsigned int FLAG_NONE = 0U;
const unsigned int FLAG_READ_FPCR = 1U << 0;
@@ -897,47 +859,9 @@ const char *aarch64_scalar_builtin_types[] = {
NULL
};
-#define ENTRY(E, M, Q, G) E,
-enum aarch64_simd_type
-{
-#include "aarch64-simd-builtin-types.def"
- ARM_NEON_H_TYPES_LAST
-};
-#undef ENTRY
-
-struct GTY(()) aarch64_simd_type_info
-{
- enum aarch64_simd_type type;
-
- /* Internal type name. */
- const char *name;
-
- /* Internal type name(mangled). The mangled names conform to the
- AAPCS64 (see "Procedure Call Standard for the ARM 64-bit Architecture",
- Appendix A). To qualify for emission with the mangled names defined in
- that document, a vector type must not only be of the correct mode but also
- be of the correct internal AdvSIMD vector type (e.g. __Int8x8_t); these
- types are registered by aarch64_init_simd_builtin_types (). In other
- words, vector types defined in other ways e.g. via vector_size attribute
- will get default mangled names. */
- const char *mangle;
-
- /* Internal type. */
- tree itype;
-
- /* Element type. */
- tree eltype;
-
- /* Machine mode the internal type maps to. */
- enum machine_mode mode;
-
- /* Qualifiers. */
- enum aarch64_type_qualifiers q;
-};
-
#define ENTRY(E, M, Q, G) \
{E, "__" #E, #G "__" #E, NULL_TREE, NULL_TREE, E_##M##mode, qualifier_##Q},
-static GTY(()) struct aarch64_simd_type_info aarch64_simd_types [] = {
+GTY(()) struct aarch64_simd_type_info aarch64_simd_types [] = {
#include "aarch64-simd-builtin-types.def"
};
#undef ENTRY
@@ -351,6 +351,8 @@ aarch64_pragma_aarch64 (cpp_reader *)
handle_arm_neon_h ();
else if (strcmp (name, "arm_acle.h") == 0)
handle_arm_acle_h ();
+ else if (strcmp (name, "arm_neon_sve_bridge.h") == 0)
+ aarch64_sve::handle_arm_neon_sve_bridge_h ();
else
error ("unknown %<#pragma GCC aarch64%> option %qs", name);
}
new file mode 100644
@@ -0,0 +1,28 @@
+/* Builtin lists for AArch64 NEON-SVE-Bridge
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef DEF_NEON_SVE_FUNCTION
+#define DEF_NEON_SVE_FUNCTION(A, B, C, D, E)
+#endif
+
+DEF_NEON_SVE_FUNCTION (svset_neonq, set_neonq, all_data, none, none)
+DEF_NEON_SVE_FUNCTION (svget_neonq, get_neonq, all_data, none, none)
+DEF_NEON_SVE_FUNCTION (svdup_neonq, dup_neonq, all_data, none, none)
+
+#undef DEF_NEON_SVE_FUNCTION
\ No newline at end of file
@@ -1009,8 +1009,10 @@ bool aarch64_general_check_builtin_call (location_t, vec<location_t>,
namespace aarch64_sve {
void init_builtins ();
+ void init_neon_sve_builtins ();
void handle_arm_sve_h ();
void handle_arm_sme_h ();
+ void handle_arm_neon_sve_bridge_h ();
tree builtin_decl (unsigned, bool);
bool builtin_type_p (const_tree);
bool builtin_type_p (const_tree, unsigned int *, unsigned int *);
@@ -299,6 +299,12 @@ namespace aarch64_sve
extern const function_base *const svzip2;
extern const function_base *const svzip2q;
}
+ namespace neon_sve_bridge_functions
+ {
+ extern const function_base *const svset_neonq;
+ extern const function_base *const svget_neonq;
+ extern const function_base *const svdup_neonq;
+ }
}
#endif
@@ -44,6 +44,7 @@
#include "aarch64-sve-builtins-shapes.h"
#include "aarch64-sve-builtins-base.h"
#include "aarch64-sve-builtins-functions.h"
+#include "aarch64-builtins.h"
#include "ssa.h"
#include "gimple-fold.h"
@@ -1099,6 +1100,116 @@ public:
}
};
+class svget_neonq_impl : public function_base
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (BYTES_BIG_ENDIAN)
+ return NULL;
+ tree rhs_sve_vector = gimple_call_arg (f.call, 0);
+ tree rhs_vector = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs),
+ rhs_sve_vector, bitsize_int (128), bitsize_int (0));
+ return gimple_build_assign (f.lhs, rhs_vector);
+ }
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (BYTES_BIG_ENDIAN)
+ {
+ machine_mode mode = e.vector_mode (0);
+ insn_code icode = code_for_aarch64_sve_get_neonq (mode);
+ unsigned int nunits = 128 / GET_MODE_UNIT_BITSIZE (mode);
+ rtx indices = aarch64_gen_stepped_int_parallel
+ (nunits, nunits - 1 , -1);
+
+ e.add_output_operand (icode);
+ e.add_input_operand (icode, e.args[0]);
+ e.add_fixed_operand (indices);
+ return e.generate_insn (icode);
+ }
+ return simplify_gen_subreg (e.result_mode (), e.args[0],
+ GET_MODE (e.args[0]), 0);
+ }
+};
+
+class svset_neonq_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ machine_mode mode = e.vector_mode (0);
+ rtx_vector_builder builder (VNx16BImode, 16, 2);
+ for (unsigned int i = 0; i < 16; i++)
+ builder.quick_push (CONST1_RTX (BImode));
+ for (unsigned int i = 0; i < 16; i++)
+ builder.quick_push (CONST0_RTX (BImode));
+ e.args.quick_push (builder.build ());
+ if (BYTES_BIG_ENDIAN)
+ return e.use_exact_insn (code_for_aarch64_sve_set_neonq (mode));
+ insn_code icode = code_for_vcond_mask (mode, mode);
+ e.args[1] = lowpart_subreg (mode, e.args[1], GET_MODE (e.args[1]));
+ e.add_output_operand (icode);
+ e.add_input_operand (icode, e.args[1]);
+ e.add_input_operand (icode, e.args[0]);
+ e.add_input_operand (icode, e.args[2]);
+ return e.generate_insn (icode);
+ }
+};
+
+class svdup_neonq_impl : public function_base
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (BYTES_BIG_ENDIAN)
+ {
+ return NULL;
+ }
+ tree rhs_vector = gimple_call_arg (f.call, 0);
+ unsigned HOST_WIDE_INT neon_nelts
+ = TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs_vector)).to_constant ();
+ poly_uint64 sve_nelts;
+ sve_nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (f.lhs));
+ vec_perm_builder builder (sve_nelts, neon_nelts, 1);
+ for (unsigned int i = 0; i < neon_nelts; i++)
+ {
+ builder.quick_push (i);
+ }
+ vec_perm_indices indices (builder, 1, neon_nelts);
+ tree perm_type = build_vector_type (ssizetype, sve_nelts);
+ return gimple_build_assign (f.lhs, VEC_PERM_EXPR,
+ rhs_vector,
+ rhs_vector,
+ vec_perm_indices_to_tree (perm_type, indices));
+ }
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode;
+ machine_mode mode = e.vector_mode (0);
+ if (BYTES_BIG_ENDIAN)
+ {
+ icode = code_for_aarch64_vec_duplicate_vq_be (mode);
+ unsigned int nunits = 128 / GET_MODE_UNIT_BITSIZE (mode);
+ rtx indices = aarch64_gen_stepped_int_parallel
+ (nunits, nunits - 1 , -1);
+
+ e.add_output_operand (icode);
+ e.add_input_operand (icode, e.args[0]);
+ e.add_fixed_operand (indices);
+ return e.generate_insn (icode);
+ }
+ icode = code_for_aarch64_vec_duplicate_vq_le (mode);
+ e.add_output_operand (icode);
+ e.add_input_operand (icode, e.args[0]);
+ return e.generate_insn (icode);
+ }
+};
+
class svindex_impl : public function_base
{
public:
@@ -3122,5 +3233,8 @@ FUNCTION (svzip1q, unspec_based_function, (UNSPEC_ZIP1Q, UNSPEC_ZIP1Q,
FUNCTION (svzip2, svzip_impl, (1))
FUNCTION (svzip2q, unspec_based_function, (UNSPEC_ZIP2Q, UNSPEC_ZIP2Q,
UNSPEC_ZIP2Q))
+NEON_SVE_BRIDGE_FUNCTION (svget_neonq, svget_neonq_impl,)
+NEON_SVE_BRIDGE_FUNCTION (svset_neonq, svset_neonq_impl,)
+NEON_SVE_BRIDGE_FUNCTION (svdup_neonq, svdup_neonq_impl,)
} /* end namespace aarch64_sve */
@@ -840,4 +840,8 @@ public:
namespace { static CONSTEXPR const CLASS NAME##_obj ARGS; } \
namespace functions { const function_base *const NAME = &NAME##_obj; }
+#define NEON_SVE_BRIDGE_FUNCTION(NAME, CLASS, ARGS) \
+ namespace { static CONSTEXPR const CLASS NAME##_obj ARGS; } \
+ namespace neon_sve_bridge_functions { const function_base *const NAME = &NAME##_obj; }
+
#endif
@@ -126,10 +126,12 @@ namespace aarch64_sve
extern const function_shape *const dot_za_slice_lane;
extern const function_shape *const dot_za_slice_uint_lane;
extern const function_shape *const dupq;
+ extern const function_shape *const dup_neonq;
extern const function_shape *const ext;
extern const function_shape *const extract_pred;
extern const function_shape *const fold_left;
extern const function_shape *const get;
+ extern const function_shape *const get_neonq;
extern const function_shape *const inc_dec;
extern const function_shape *const inc_dec_pat;
extern const function_shape *const inc_dec_pred;
@@ -170,6 +172,7 @@ namespace aarch64_sve
extern const function_shape *const select_pred;
extern const function_shape *const set;
extern const function_shape *const setffr;
+ extern const function_shape *const set_neonq;
extern const function_shape *const shift_left_imm_long;
extern const function_shape *const shift_left_imm_to_uint;
extern const function_shape *const shift_right_imm;
@@ -29,6 +29,7 @@
#include "optabs.h"
#include "aarch64-sve-builtins.h"
#include "aarch64-sve-builtins-shapes.h"
+#include "aarch64-builtins.h"
/* In the comments below, _t0 represents the first type suffix and _t1
represents the second. Square brackets enclose characters that are
@@ -178,6 +179,8 @@ parse_element_type (const function_instance &instance, const char *&format)
s<elt> - a scalar type with the given element suffix
t<elt> - a vector or tuple type with given element suffix [*1]
v<elt> - a vector with the given element suffix
+ D<elt> - a 64 bit neon vector
+ Q<elt> - a 128 bit neon vector
where <elt> has the format described above parse_element_type
@@ -261,6 +264,20 @@ parse_type (const function_instance &instance, const char *&format)
return acle_vector_types[0][type_suffixes[suffix].vector_type];
}
+ if (ch == 'D')
+ {
+ type_suffix_index suffix = parse_element_type (instance, format);
+ int neon_index = type_suffixes[suffix].neon64_type;
+ return aarch64_simd_types[neon_index].itype;
+ }
+
+ if (ch == 'Q')
+ {
+ type_suffix_index suffix = parse_element_type (instance, format);
+ int neon_index = type_suffixes[suffix].neon128_type;
+ return aarch64_simd_types[neon_index].itype;
+ }
+
gcc_unreachable ();
}
@@ -2476,6 +2493,67 @@ struct get_def : public overloaded_base<0>
};
SHAPE (get)
+/* <t0>xN_t svfoo[_t0](sv<t0>_t). */
+struct get_neonq_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "Q0,v0", group, MODE_none);
+ }
+ tree
+ resolve (function_resolver &r) const override
+ {
+ return r.resolve_unary ();
+ }
+};
+SHAPE (get_neonq)
+
+/* sv<t0>_t svfoo[_t0](sv<t0>_t, <t0>xN_t). */
+struct set_neonq_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "v0,v0,Q0", group, MODE_none);
+ }
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ type_suffix_index type;
+ if (!r.check_gp_argument (2, i, nargs)
+ || (type = r.infer_neon128_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
+ return error_mark_node;
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (set_neonq)
+
+/* sv<t0>_t svfoo[_t0](<t0>xN_t). */
+struct dup_neonq_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "v0,Q0", group, MODE_none);
+ }
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ type_suffix_index type;
+ if (!r.check_gp_argument (1, i, nargs)
+ || (type = r.infer_neon128_vector_type (i)) == NUM_TYPE_SUFFIXES)
+ return error_mark_node;
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (dup_neonq)
+
/* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t)
<t0>_t svfoo[_n_t0](<t0>_t, uint64_t)
@@ -53,6 +53,7 @@
#include "aarch64-sve-builtins-sve2.h"
#include "aarch64-sve-builtins-sme.h"
#include "aarch64-sve-builtins-shapes.h"
+#include "aarch64-builtins.h"
namespace aarch64_sve {
@@ -129,7 +130,8 @@ CONSTEXPR const mode_suffix_info mode_suffixes[] = {
/* Static information about each type_suffix_index. */
CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
-#define DEF_SVE_TYPE_SUFFIX(NAME, ACLE_TYPE, CLASS, BITS, MODE) \
+#define DEF_SVE_NEON_TYPE_SUFFIX(NAME, ACLE_TYPE, CLASS, BITS, MODE, \
+ NEON64, NEON128) \
{ "_" #NAME, \
VECTOR_TYPE_##ACLE_TYPE, \
TYPE_##CLASS, \
@@ -142,7 +144,12 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
TYPE_##CLASS == TYPE_bool, \
false, \
0, \
- MODE },
+ MODE, \
+ NEON64, \
+ NEON128 },
+#define DEF_SVE_TYPE_SUFFIX(NAME, ACLE_TYPE, CLASS, BITS, MODE) \
+ DEF_SVE_NEON_TYPE_SUFFIX (NAME, ACLE_TYPE, CLASS, BITS, MODE, \
+ ARM_NEON_H_TYPES_LAST, ARM_NEON_H_TYPES_LAST)
#define DEF_SME_ZA_SUFFIX(NAME, BITS, MODE) \
{ "_" #NAME, \
NUM_VECTOR_TYPES, \
@@ -156,10 +163,12 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
false, \
true, \
0, \
- MODE },
+ MODE, \
+ ARM_NEON_H_TYPES_LAST, \
+ ARM_NEON_H_TYPES_LAST },
#include "aarch64-sve-builtins.def"
{ "", NUM_VECTOR_TYPES, TYPE_bool, 0, 0, false, false, false, false,
- false, false, 0, VOIDmode }
+ false, false, 0, VOIDmode, ARM_NEON_H_TYPES_LAST, ARM_NEON_H_TYPES_LAST }
};
CONSTEXPR const group_suffix_info group_suffixes[] = {
@@ -884,6 +893,14 @@ static CONSTEXPR const function_group_info function_groups[] = {
#include "aarch64-sve-builtins.def"
};
+/* A list of all NEON-SVE-Bridge ACLE functions. */
+static CONSTEXPR const function_group_info neon_sve_function_groups[] = {
+#define DEF_NEON_SVE_FUNCTION(NAME, SHAPE, TYPES, GROUPS, PREDS) \
+ { #NAME, &neon_sve_bridge_functions::NAME, &shapes::SHAPE, types_##TYPES, \
+ groups_##GROUPS, preds_##PREDS, 0 },
+#include "aarch64-neon-sve-bridge-builtins.def"
+};
+
/* The scalar type associated with each vector type. */
extern GTY(()) tree scalar_types[NUM_VECTOR_TYPES + 1];
tree scalar_types[NUM_VECTOR_TYPES + 1];
@@ -2092,6 +2109,35 @@ function_resolver::infer_integer_vector_type (unsigned int argno)
return type;
}
+/* Require argument ARGNO to have some form of NEON128 vector type. Return the
+ associated type suffix on success.
+ Report an error and return NUM_TYPE_SUFFIXES on failure. */
+type_suffix_index
+function_resolver::infer_neon128_vector_type (unsigned int argno)
+{
+ tree actual = get_argument_type (argno);
+ if (actual == error_mark_node)
+ return NUM_TYPE_SUFFIXES;
+
+ for (unsigned int suffix_i = 0; suffix_i < NUM_TYPE_SUFFIXES; ++suffix_i)
+ {
+ int neon_index = type_suffixes[suffix_i].neon128_type;
+ if (neon_index != ARM_NEON_H_TYPES_LAST)
+ {
+ tree type = aarch64_simd_types[neon_index].itype;
+ if (type && matches_type_p (type, actual))
+ {
+ return type_suffix_index (suffix_i);
+ }
+ }
+ }
+
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects a 128 bit NEON vector type", actual, argno + 1, fndecl);
+ return NUM_TYPE_SUFFIXES;
+}
+
+
/* Like infer_vector_type, but also require the type to be an unsigned
integer. */
type_suffix_index
@@ -4457,6 +4503,14 @@ init_builtins ()
}
}
+/* Initialize the SVE-NEON Bridge at start-up, if LTO is required. */
+void
+init_neon_sve_builtins ()
+{
+ if (in_lto_p)
+ handle_arm_neon_sve_bridge_h ();
+}
+
/* Register vector type TYPE under its arm_sve.h name. */
static void
register_vector_type (vector_type_index type)
@@ -4588,6 +4642,16 @@ handle_arm_sve_h ()
builder.register_function_group (function_groups[i]);
}
+/* Implement #pragma GCC aarch64 "arm_neon_sve_bridge.h". */
+void
+handle_arm_neon_sve_bridge_h ()
+{
+ /* Define the functions. */
+ function_builder builder;
+ for (unsigned int i = 0; i < ARRAY_SIZE (neon_sve_function_groups); ++i)
+ builder.register_function_group (neon_sve_function_groups[i]);
+}
+
/* Return the function decl with SVE function subcode CODE, or error_mark_node
if no such function exists. */
tree
@@ -41,6 +41,11 @@
#define DEF_SVE_FUNCTION_GS(A, B, C, D, E)
#endif
+#ifndef DEF_SVE_NEON_TYPE_SUFFIX
+#define DEF_SVE_NEON_TYPE_SUFFIX(A, B, C, D, E, F, G) \
+ DEF_SVE_TYPE_SUFFIX(A, B, C, D, E)
+#endif
+
#ifndef DEF_SVE_FUNCTION
#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
DEF_SVE_FUNCTION_GS (NAME, SHAPE, TYPES, none, PREDS)
@@ -107,23 +112,35 @@ DEF_SVE_TYPE_SUFFIX (b8, svbool_t, bool, 8, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (b16, svbool_t, bool, 16, VNx8BImode)
DEF_SVE_TYPE_SUFFIX (b32, svbool_t, bool, 32, VNx4BImode)
DEF_SVE_TYPE_SUFFIX (b64, svbool_t, bool, 64, VNx2BImode)
-DEF_SVE_TYPE_SUFFIX (bf16, svbfloat16_t, bfloat, 16, VNx8BFmode)
DEF_SVE_TYPE_SUFFIX (c, svcount_t, count, 8, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (c8, svcount_t, count, 8, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (c16, svcount_t, count, 16, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (c32, svcount_t, count, 32, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (c64, svcount_t, count, 64, VNx16BImode)
-DEF_SVE_TYPE_SUFFIX (f16, svfloat16_t, float, 16, VNx8HFmode)
-DEF_SVE_TYPE_SUFFIX (f32, svfloat32_t, float, 32, VNx4SFmode)
-DEF_SVE_TYPE_SUFFIX (f64, svfloat64_t, float, 64, VNx2DFmode)
-DEF_SVE_TYPE_SUFFIX (s8, svint8_t, signed, 8, VNx16QImode)
-DEF_SVE_TYPE_SUFFIX (s16, svint16_t, signed, 16, VNx8HImode)
-DEF_SVE_TYPE_SUFFIX (s32, svint32_t, signed, 32, VNx4SImode)
-DEF_SVE_TYPE_SUFFIX (s64, svint64_t, signed, 64, VNx2DImode)
-DEF_SVE_TYPE_SUFFIX (u8, svuint8_t, unsigned, 8, VNx16QImode)
-DEF_SVE_TYPE_SUFFIX (u16, svuint16_t, unsigned, 16, VNx8HImode)
-DEF_SVE_TYPE_SUFFIX (u32, svuint32_t, unsigned, 32, VNx4SImode)
-DEF_SVE_TYPE_SUFFIX (u64, svuint64_t, unsigned, 64, VNx2DImode)
+DEF_SVE_NEON_TYPE_SUFFIX (bf16, svbfloat16_t, bfloat, 16, VNx8BFmode,
+ Bfloat16x4_t, Bfloat16x8_t)
+DEF_SVE_NEON_TYPE_SUFFIX (f16, svfloat16_t, float, 16, VNx8HFmode,
+ Float16x4_t, Float16x8_t)
+DEF_SVE_NEON_TYPE_SUFFIX (f32, svfloat32_t, float, 32, VNx4SFmode,
+ Float32x2_t, Float32x4_t)
+DEF_SVE_NEON_TYPE_SUFFIX (f64, svfloat64_t, float, 64, VNx2DFmode,
+ Float64x1_t, Float64x2_t)
+DEF_SVE_NEON_TYPE_SUFFIX (s8, svint8_t, signed, 8, VNx16QImode,
+ Int8x8_t, Int8x16_t)
+DEF_SVE_NEON_TYPE_SUFFIX (s16, svint16_t, signed, 16, VNx8HImode,
+ Int16x4_t, Int16x8_t)
+DEF_SVE_NEON_TYPE_SUFFIX (s32, svint32_t, signed, 32, VNx4SImode,
+ Int32x2_t, Int32x4_t)
+DEF_SVE_NEON_TYPE_SUFFIX (s64, svint64_t, signed, 64, VNx2DImode,
+ Int64x1_t, Int64x2_t)
+DEF_SVE_NEON_TYPE_SUFFIX (u8, svuint8_t, unsigned, 8, VNx16QImode,
+ Uint8x8_t, Uint8x16_t)
+DEF_SVE_NEON_TYPE_SUFFIX (u16, svuint16_t, unsigned, 16, VNx8HImode,
+ Uint16x4_t, Uint16x8_t)
+DEF_SVE_NEON_TYPE_SUFFIX (u32, svuint32_t, unsigned, 32, VNx4SImode,
+ Uint32x2_t, Uint32x4_t)
+DEF_SVE_NEON_TYPE_SUFFIX (u64, svuint64_t, unsigned, 64, VNx2DImode,
+ Uint64x1_t, Uint64x2_t)
/* Associate _za with bytes. This is needed for svldr_vnum_za and
svstr_vnum_za, whose ZA offset can be in the range [0, 15], as for za8. */
@@ -159,6 +176,7 @@ DEF_SVE_GROUP_SUFFIX (vg4x4, 4, 4)
#undef DEF_SVE_FUNCTION_GS
#undef DEF_SVE_GROUP_SUFFIX
#undef DEF_SME_ZA_SUFFIX
+#undef DEF_SVE_NEON_TYPE_SUFFIX
#undef DEF_SVE_TYPE_SUFFIX
#undef DEF_SVE_TYPE
#undef DEF_SVE_MODE
@@ -20,6 +20,8 @@
#ifndef GCC_AARCH64_SVE_BUILTINS_H
#define GCC_AARCH64_SVE_BUILTINS_H
+#include "aarch64-builtins.h"
+
/* The full name of an SVE ACLE function is the concatenation of:
- the base name ("svadd", etc.)
@@ -229,6 +231,14 @@ struct mode_suffix_info
units_index displacement_units;
};
+#define ENTRY(E, M, Q, G) E,
+enum aarch64_simd_type
+{
+#include "aarch64-simd-builtin-types.def"
+ ARM_NEON_H_TYPES_LAST
+};
+#undef ENTRY
+
/* Static information about a type suffix. */
struct type_suffix_info
{
@@ -262,6 +272,11 @@ struct type_suffix_info
/* The associated vector or predicate mode. */
machine_mode vector_mode : 16;
+
+ /* The corresponding 64-bit and 128-bit arm_neon.h types, or
+ ARM_NEON_H_TYPES_LAST if none. */
+ aarch64_simd_type neon64_type;
+ aarch64_simd_type neon128_type;
};
/* Static information about a group suffix. */
@@ -498,6 +513,7 @@ public:
sve_type infer_vector_or_tuple_type (unsigned int, unsigned int);
type_suffix_index infer_vector_type (unsigned int);
type_suffix_index infer_integer_vector_type (unsigned int);
+ type_suffix_index infer_neon128_vector_type (unsigned int);
type_suffix_index infer_unsigned_vector_type (unsigned int);
type_suffix_index infer_sd_vector_type (unsigned int);
sve_type infer_tuple_type (unsigned int);
@@ -10950,3 +10950,36 @@
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
)
+
+(define_insn_and_split "@aarch64_sve_get_neonq_<mode>"
+ [(set (match_operand:<V128> 0 "register_operand" "=w")
+ (vec_select:<V128>
+ (match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_operand 2 "descending_int_parallel")))]
+ "TARGET_SVE
+ && BYTES_BIG_ENDIAN
+ && known_eq (INTVAL (XVECEXP (operands[2], 0, 0)),
+ GET_MODE_NUNITS (<V128>mode) - 1)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+ {
+ operands[1] = gen_rtx_REG (<V128>mode, REGNO (operands[1]));
+ }
+)
+
+(define_insn "@aarch64_sve_set_neonq_<mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_operand:<V128> 2 "register_operand" "w")
+ (match_operand:<VPRED> 3 "register_operand" "Upl")]
+ UNSPEC_SET_NEONQ))]
+ "TARGET_SVE
+ && BYTES_BIG_ENDIAN"
+ {
+ operands[2] = lowpart_subreg (<MODE>mode, operands[2],
+ GET_MODE (operands[2]));
+ return "sel\t%0.<Vetype>, %3, %2.<Vetype>, %1.<Vetype>";
+ }
+)
\ No newline at end of file
@@ -15378,6 +15378,7 @@ aarch64_init_builtins ()
{
aarch64_general_init_builtins ();
aarch64_sve::init_builtins ();
+ aarch64_sve::init_neon_sve_builtins ();
#ifdef SUBTARGET_INIT_BUILTINS
SUBTARGET_INIT_BUILTINS;
#endif
new file mode 100644
@@ -0,0 +1,38 @@
+/* AArch64 NEON-SVE Bridge intrinsics include file.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _ARM_NEON_SVE_BRIDGE_H_
+#define _ARM_NEON_SVE_BRIDGE_H_
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+/* NOTE: This implementation of arm_neon_sve_bridge.h is intentionally short. It does
+ not define the types and intrinsic functions directly in C and C++
+ code, but instead uses the following pragma to tell GCC to insert the
+ necessary type and function definitions itself. The net effect is the
+ same, and the file is a complete implementation of arm_neon_sve_bridge.h. */
+#pragma GCC aarch64 "arm_neon_sve_bridge.h"
+
+#endif
\ No newline at end of file
@@ -806,6 +806,7 @@
UNSPEC_FTSMUL ; Used in aarch64-sve.md.
UNSPEC_FTSSEL ; Used in aarch64-sve.md.
UNSPEC_SMATMUL ; Used in aarch64-sve.md.
+ UNSPEC_SET_NEONQ ; Used in aarch64-sve.md.
UNSPEC_UMATMUL ; Used in aarch64-sve.md.
UNSPEC_USMATMUL ; Used in aarch64-sve.md.
UNSPEC_TRN1Q ; Used in aarch64-sve.md.
new file mode 100644
@@ -0,0 +1,64 @@
+// { dg-options "-march=armv8.2-a+sve" }
+// { dg-do run { target aarch64_sve_hw } }
+
+#include <arm_neon_sve_bridge.h>
+
+extern void abort (void);
+
+int
+svget_neonq_test ()
+{
+ int64_t val1 = 987654321;
+ int64_t val2 = 123456789;
+ svint64_t sveInput = svdupq_n_s64 (val1, val2);
+ int64x2_t neonReturn = svget_neonq_s64 (sveInput);
+ int64_t val1Return = vgetq_lane_s64 (neonReturn, 0);
+ int64_t val2Return = vgetq_lane_s64 (neonReturn, 1);
+ if (val1 == val1Return && val2 == val2Return)
+ return 0;
+ return 1;
+}
+
+int
+svset_neonq_test ()
+{
+ int64_t val1 = 987654321;
+ int64_t val2 = 123456789;
+ int64x2_t NeonInput;
+ NeonInput = vsetq_lane_s64 (val1, NeonInput, 0);
+ NeonInput = vsetq_lane_s64 (val2, NeonInput, 1);
+ svint64_t sveReturn = svset_neonq_s64 (sveReturn, NeonInput);
+ int64_t val1Return = svlasta_s64 (svptrue_b64(), sveReturn);
+ int64_t val2Return = svlastb_s64 (svptrue_pat_b8(SV_VL16), sveReturn);
+ if (val1 == val1Return && val2 == val2Return)
+ return 0;
+ return 1;
+}
+
+int
+svdup_neonq_test ()
+{
+ int64_t val1 = 987654321;
+ int64_t val2 = 123456789;
+ int64x2_t NeonInput;
+ NeonInput = vsetq_lane_s64 (val1, NeonInput, 0);
+ NeonInput = vsetq_lane_s64 (val2, NeonInput, 1);
+ svint64_t sveReturn = svdup_neonq_s64 (NeonInput);
+ int64_t val1Return = svlasta_s64 (svptrue_b64(), sveReturn);
+ int64_t val2Return = svlastb_s64 (svptrue_b64(), sveReturn);
+ if (val1 == val1Return && val2 == val2Return)
+ return 0;
+ return 1;
+}
+
+int
+main ()
+{
+ if (svget_neonq_test () == 1)
+ abort ();
+ if (svset_neonq_test () == 1)
+ abort ();
+ if (svdup_neonq_test () == 1)
+ abort ();
+ return 0;
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_bf16_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_bf16_z0, bfloat16x8_t, svbfloat16_t,
+ z0 = svdup_neonq_bf16 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_bf16_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_bf16_z4, bfloat16x8_t, svbfloat16_t,
+ z4_res = svdup_neonq_bf16 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_bf16_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_bf16_z5, bfloat16x8_t, svbfloat16_t,
+ z5_res = svdup_neonq_bf16 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_f16_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f16_z0, float16x8_t, svfloat16_t,
+ z0 = svdup_neonq_f16 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_f16_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f16_z4, float16x8_t, svfloat16_t,
+ z4_res = svdup_neonq_f16 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_f16_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f16_z5, float16x8_t, svfloat16_t,
+ z5_res = svdup_neonq_f16 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_f32_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f32_z0, float32x4_t, svfloat32_t,
+ z0 = svdup_neonq_f32 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_f32_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f32_z4, float32x4_t, svfloat32_t,
+ z4_res = svdup_neonq_f32 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_f32_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f32_z5, float32x4_t, svfloat32_t,
+ z5_res = svdup_neonq_f32 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_f64_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f64_z0, float64x2_t, svfloat64_t,
+ z0 = svdup_neonq_f64 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_f64_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f64_z4, float64x2_t, svfloat64_t,
+ z4_res = svdup_neonq_f64 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_f64_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_f64_z5, float64x2_t, svfloat64_t,
+ z5_res = svdup_neonq_f64 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_s16_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s16_z0, int16x8_t, svint16_t,
+ z0 = svdup_neonq_s16 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_s16_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s16_z4, int16x8_t, svint16_t,
+ z4_res = svdup_neonq_s16 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_s16_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s16_z5, int16x8_t, svint16_t,
+ z5_res = svdup_neonq_s16 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_s32_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s32_z0, int32x4_t, svint32_t,
+ z0 = svdup_neonq_s32 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_s32_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s32_z4, int32x4_t, svint32_t,
+ z4_res = svdup_neonq_s32 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_s32_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s32_z5, int32x4_t, svint32_t,
+ z5_res = svdup_neonq_s32 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_s64_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s64_z0, int64x2_t, svint64_t,
+ z0 = svdup_neonq_s64 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_s64_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s64_z4, int64x2_t, svint64_t,
+ z4_res = svdup_neonq_s64 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_s64_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s64_z5, int64x2_t, svint64_t,
+ z5_res = svdup_neonq_s64 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_s8_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s8_z0, int8x16_t, svint8_t,
+ z0 = svdup_neonq_s8 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_s8_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s8_z4, int8x16_t, svint8_t,
+ z4_res = svdup_neonq_s8 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_s8_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_s8_z5, int8x16_t, svint8_t,
+ z5_res = svdup_neonq_s8 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_u16_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u16_z0, uint16x8_t, svuint16_t,
+ z0 = svdup_neonq_u16 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_u16_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u16_z4, uint16x8_t, svuint16_t,
+ z4_res = svdup_neonq_u16 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_u16_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u16_z5, uint16x8_t, svuint16_t,
+ z5_res = svdup_neonq_u16 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_u32_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u32_z0, uint32x4_t, svuint32_t,
+ z0 = svdup_neonq_u32 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_u32_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u32_z4, uint32x4_t, svuint32_t,
+ z4_res = svdup_neonq_u32 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_u32_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u32_z5, uint32x4_t, svuint32_t,
+ z5_res = svdup_neonq_u32 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_u64_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u64_z0, uint64x2_t, svuint64_t,
+ z0 = svdup_neonq_u64 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_u64_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u64_z4, uint64x2_t, svuint64_t,
+ z4_res = svdup_neonq_u64 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_u64_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u64_z5, uint64x2_t, svuint64_t,
+ z5_res = svdup_neonq_u64 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** dup_neonq_u8_z0:
+** dup z0.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u8_z0, uint8x16_t, svuint8_t,
+ z0 = svdup_neonq_u8 (z4),
+ z0 = svdup_neonq (z4))
+
+/*
+** dup_neonq_u8_z4:
+** dup z4.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u8_z4, uint8x16_t, svuint8_t,
+ z4_res = svdup_neonq_u8 (z4),
+ z4_res = svdup_neonq (z4))
+
+/*
+** dup_neonq_u8_z5:
+** dup z5.q, z4.q\[0\]
+** ret
+*/
+TEST_DUP_NEONQ (dup_neonq_u8_z5, uint8x16_t, svuint8_t,
+ z5_res = svdup_neonq_u8 (z4),
+ z5_res = svdup_neonq (z4))
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_bf16_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_bf16_z0, svbfloat16_t, bfloat16x8_t,
+ z0 = svget_neonq_bf16 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_bf16_z4:
+** ret
+*/
+TEST_GET (get_neonq_bf16_z4, svbfloat16_t, bfloat16x8_t,
+ z4_res = svget_neonq_bf16 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_bf16_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_bf16_z5, svbfloat16_t, bfloat16x8_t,
+ z5_res = svget_neonq_bf16 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_f16_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_f16_z0, svfloat16_t, float16x8_t,
+ z0 = svget_neonq_f16 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_f16_z4:
+** ret
+*/
+TEST_GET (get_neonq_f16_z4, svfloat16_t, float16x8_t,
+ z4_res = svget_neonq_f16 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_f16_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_f16_z5, svfloat16_t, float16x8_t,
+ z5_res = svget_neonq_f16 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_f32_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_f32_z0, svfloat32_t, float32x4_t,
+ z0 = svget_neonq_f32 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_f32_z4:
+** ret
+*/
+TEST_GET (get_neonq_f32_z4, svfloat32_t, float32x4_t,
+ z4_res = svget_neonq_f32 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_f32_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_f32_z5, svfloat32_t, float32x4_t,
+ z5_res = svget_neonq_f32 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_f64_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_f64_z0, svfloat64_t, float64x2_t,
+ z0 = svget_neonq_f64 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_f64_z4:
+** ret
+*/
+TEST_GET (get_neonq_f64_z4, svfloat64_t, float64x2_t,
+ z4_res = svget_neonq_f64 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_f64_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_f64_z5, svfloat64_t, float64x2_t,
+ z5_res = svget_neonq_f64 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_s16_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_s16_z0, svint16_t, int16x8_t,
+ z0 = svget_neonq_s16 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_s16_z4:
+** ret
+*/
+TEST_GET (get_neonq_s16_z4, svint16_t, int16x8_t,
+ z4_res = svget_neonq_s16 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_s16_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_s16_z5, svint16_t, int16x8_t,
+ z5_res = svget_neonq_s16 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_s32_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_s32_z0, svint32_t, int32x4_t,
+ z0 = svget_neonq_s32 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_s32_z4:
+** ret
+*/
+TEST_GET (get_neonq_s32_z4, svint32_t, int32x4_t,
+ z4_res = svget_neonq_s32 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_s32_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_s32_z5, svint32_t, int32x4_t,
+ z5_res = svget_neonq_s32 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_s64_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_s64_z0, svint64_t, int64x2_t,
+ z0 = svget_neonq_s64 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_s64_z4:
+** ret
+*/
+TEST_GET (get_neonq_s64_z4, svint64_t, int64x2_t,
+ z4_res = svget_neonq_s64 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_s64_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_s64_z5, svint64_t, int64x2_t,
+ z5_res = svget_neonq_s64 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_s8_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_s8_z0, svint8_t, int8x16_t,
+ z0 = svget_neonq_s8 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_s8_z4:
+** ret
+*/
+TEST_GET (get_neonq_s8_z4, svint8_t, int8x16_t,
+ z4_res = svget_neonq_s8 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_s8_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_s8_z5, svint8_t, int8x16_t,
+ z5_res = svget_neonq_s8 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_u16_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_u16_z0, svuint16_t, uint16x8_t,
+ z0 = svget_neonq_u16 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_u16_z4:
+** ret
+*/
+TEST_GET (get_neonq_u16_z4, svuint16_t, uint16x8_t,
+ z4_res = svget_neonq_u16 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_u16_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_u16_z5, svuint16_t, uint16x8_t,
+ z5_res = svget_neonq_u16 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_u32_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_u32_z0, svuint32_t, uint32x4_t,
+ z0 = svget_neonq_u32 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_u32_z4:
+** ret
+*/
+TEST_GET (get_neonq_u32_z4, svuint32_t, uint32x4_t,
+ z4_res = svget_neonq_u32 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_u32_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_u32_z5, svuint32_t, uint32x4_t,
+ z5_res = svget_neonq_u32 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_u64_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_u64_z0, svuint64_t, uint64x2_t,
+ z0 = svget_neonq_u64 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_u64_z4:
+** ret
+*/
+TEST_GET (get_neonq_u64_z4, svuint64_t, uint64x2_t,
+ z4_res = svget_neonq_u64 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_u64_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_u64_z5, svuint64_t, uint64x2_t,
+ z5_res = svget_neonq_u64 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get_neonq_u8_z0:
+** mov v0.16b, v4.16b
+** ret
+*/
+TEST_GET (get_neonq_u8_z0, svuint8_t, uint8x16_t,
+ z0 = svget_neonq_u8 (z4),
+ z0 = svget_neonq (z4))
+
+/*
+** get_neonq_u8_z4:
+** ret
+*/
+TEST_GET (get_neonq_u8_z4, svuint8_t, uint8x16_t,
+ z4_res = svget_neonq_u8 (z4),
+ z4_res = svget_neonq (z4))
+
+/*
+** get_neonq_u8_z5:
+** (
+** mov z5.d, z4.d
+** |
+** mov v5.16b, v4.16b
+** )
+** ret
+*/
+TEST_GET (get_neonq_u8_z5, svuint8_t, uint8x16_t,
+ z5_res = svget_neonq_u8 (z4),
+ z5_res = svget_neonq (z4))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_bf16_z24:
+** ptrue (p[0-9]+).h, vl8
+** sel z24.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_bf16_z24, svbfloat16_t, bfloat16x8_t,
+ z24 = svset_neonq_bf16 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_bf16_z4:
+** ptrue (p[0-9]+).h, vl8
+** sel z4.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_bf16_z4, svbfloat16_t, bfloat16x8_t,
+ z4_res = svset_neonq_bf16 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_f16_z24:
+** ptrue (p[0-9]+).h, vl8
+** sel z24.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f16_z24, svfloat16_t, float16x8_t,
+ z24 = svset_neonq_f16 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_f16_z4:
+** ptrue (p[0-9]+).h, vl8
+** sel z4.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f16_z4, svfloat16_t, float16x8_t,
+ z4_res = svset_neonq_f16 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_f32_z24:
+** ptrue (p[0-9]+).s, vl4
+** sel z24.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f32_z24, svfloat32_t, float32x4_t,
+ z24 = svset_neonq_f32 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_f32_z4:
+** ptrue (p[0-9]+).s, vl4
+** sel z4.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f32_z4, svfloat32_t, float32x4_t,
+ z4_res = svset_neonq_f32 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_f64_z24:
+** ptrue (p[0-9]+).d, vl2
+** sel z24.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f64_z24, svfloat64_t, float64x2_t,
+ z24 = svset_neonq_f64 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_f64_z4:
+** ptrue (p[0-9]+).d, vl2
+** sel z4.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_f64_z4, svfloat64_t, float64x2_t,
+ z4_res = svset_neonq_f64 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_s16_z24:
+** ptrue (p[0-9]+).h, vl8
+** sel z24.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s16_z24, svint16_t, int16x8_t,
+ z24 = svset_neonq_s16 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_s16_z4:
+** ptrue (p[0-9]+).h, vl8
+** sel z4.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s16_z4, svint16_t, int16x8_t,
+ z4_res = svset_neonq_s16 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_s32_z24:
+** ptrue (p[0-9]+).s, vl4
+** sel z24.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s32_z24, svint32_t, int32x4_t,
+ z24 = svset_neonq_s32 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_s32_z4:
+** ptrue (p[0-9]+).s, vl4
+** sel z4.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s32_z4, svint32_t, int32x4_t,
+ z4_res = svset_neonq_s32 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_s64_z24:
+** ptrue (p[0-9]+).d, vl2
+** sel z24.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s64_z24, svint64_t, int64x2_t,
+ z24 = svset_neonq_s64 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_s64_z4:
+** ptrue (p[0-9]+).d, vl2
+** sel z4.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s64_z4, svint64_t, int64x2_t,
+ z4_res = svset_neonq_s64 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_s8_z24:
+** ptrue (p[0-9]+).b, vl16
+** sel z24.b, \1, z0.b, z4.b
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s8_z24, svint8_t, int8x16_t,
+ z24 = svset_neonq_s8 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_s8_z4:
+** ptrue (p[0-9]+).b, vl16
+** sel z4.b, \1, z0.b, z4.b
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_s8_z4, svint8_t, int8x16_t,
+ z4_res = svset_neonq_s8 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_u16_z24:
+** ptrue (p[0-9]+).h, vl8
+** sel z24.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u16_z24, svuint16_t, uint16x8_t,
+ z24 = svset_neonq_u16 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_u16_z4:
+** ptrue (p[0-9]+).h, vl8
+** sel z4.h, \1, z0.h, z4.h
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u16_z4, svuint16_t, uint16x8_t,
+ z4_res = svset_neonq_u16 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_u32_z24:
+** ptrue (p[0-9]+).s, vl4
+** sel z24.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u32_z24, svuint32_t, uint32x4_t,
+ z24 = svset_neonq_u32 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_u32_z4:
+** ptrue (p[0-9]+).s, vl4
+** sel z4.s, \1, z0.s, z4.s
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u32_z4, svuint32_t, uint32x4_t,
+ z4_res = svset_neonq_u32 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_u64_z24:
+** ptrue (p[0-9]+).d, vl2
+** sel z24.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u64_z24, svuint64_t, uint64x2_t,
+ z24 = svset_neonq_u64 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_u64_z4:
+** ptrue (p[0-9]+).d, vl2
+** sel z4.d, \1, z0.d, z4.d
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u64_z4, svuint64_t, uint64x2_t,
+ z4_res = svset_neonq_u64 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set_neonq_u8_z24:
+** ptrue (p[0-9]+).b, vl16
+** sel z24.b, \1, z0.b, z4.b
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u8_z24, svuint8_t, uint8x16_t,
+ z24 = svset_neonq_u8 (z4, z0),
+ z24 = svset_neonq (z4, z0))
+
+/*
+** set_neonq_u8_z4:
+** ptrue (p[0-9]+).b, vl16
+** sel z4.b, \1, z0.b, z4.b
+** ret
+*/
+TEST_SET_NEONQ (set_neonq_u8_z4, svuint8_t, uint8x16_t,
+ z4_res = svset_neonq_u8 (z4, z0),
+ z4_res = svset_neonq (z4, z0))
\ No newline at end of file
@@ -1,7 +1,7 @@
#ifndef TEST_SVE_ACLE_H
#define TEST_SVE_ACLE_H 1
-#include <arm_sve.h>
+#include <arm_neon_sve_bridge.h>
#if defined (TEST_OVERLOADS)
#define INVOKE(CODE1, CODE2) CODE2
@@ -615,6 +615,28 @@
__asm volatile ("" :: "Upa" (p4), "Upa" (p8)); \
}
+#define TEST_SET_NEONQ(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (ZTYPE z0, ZTYPE z1, ZTYPE z2, ZTYPE z3, \
+ TTYPE z4)) \
+ { \
+ register TTYPE z24 __asm ("z24"); \
+ register TTYPE z4_res __asm ("z4"); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z24), "w" (z4_res)); \
+ }
+
+#define TEST_DUP_NEONQ(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (ZTYPE unused0, ZTYPE unused1, \
+ ZTYPE unused2, ZTYPE unused3, TTYPE z4)) \
+ { \
+ register ZTYPE z0 __asm ("z0"); \
+ register ZTYPE z4_res __asm ("z4"); \
+ register ZTYPE z5_res __asm ("z5"); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z0), "w" (z4_res), \
+ "w" (z5_res)); \
+ }
+
#define TEST_TBL2(NAME, TTYPE, ZTYPE, UTYPE, CODE1, CODE2) \
PROTO (NAME, ZTYPE, (TTYPE z0, TTYPE z2, UTYPE z4)) \
{ \
new file mode 100644
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.2-a+sve -std=c99 -Wall -Wextra" } */
+
+#include <arm_neon_sve_bridge.h>
+
+float64x2_t
+f1 (int8x16_t s8, svint8_t sveS8, int64x2_t s64, int8x8x2_t s8x2)
+{
+ float64x2_t f64;
+
+ sveS8 = svdup_neonq (s8);
+ sveS8 = svdup_neonq (); /* { dg-error {too few arguments to function 'svdup_neonq'} } */
+ sveS8 = svdup_neonq (s8, 1); /* { dg-error {too many arguments to function 'svdup_neonq'} } */
+ sveS8 = svdup_neonq (sveS8); /* { dg-error {passing 'svint8_t' to argument 1 of 'svdup_neonq', which expects a 128 bit NEON vector type} } */
+ f64 = svdup_neonq (s8); /* { dg-error {incompatible types when assigning to type 'float64x2_t' from type 'svint8_t'} } */
+ sveS8 = svdup_neonq (s8x2); /* { dg-error {passing 'int8x8x2_t' to argument 1 of 'svdup_neonq', which expects a 128 bit NEON vector type} } */
+ sveS8 = svdup_neonq (s64); /* { dg-error {incompatible types when assigning to type 'svint8_t' from type 'svint64_t'} } */
+
+ return f64;
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.2-a+sve -std=c99 -Wall -Wextra" } */
+
+#include <arm_neon_sve_bridge.h>
+
+float64x2_t
+f1 (int8x16_t s8, svint8_t sveS8, svint8x2_t sveS8x2, svint64_t sveS64)
+{
+ float64x2_t f64;
+
+ s8 = svget_neonq (sveS8);
+ s8 = svget_neonq (); /* { dg-error {too few arguments to function 'svget_neonq'} } */
+ s8 = svget_neonq (sveS8, 1); /* { dg-error {too many arguments to function 'svget_neonq'} } */
+ s8 = svget_neonq (s8); /* { dg-error {passing 'int8x16_t' to argument 1 of 'svget_neonq', which expects an SVE type} } */
+ f64 = svget_neonq (sveS8); /* { dg-error {incompatible types when assigning to type 'float64x2_t' from type '__Int8x16_t'} } */
+ s8 = svget_neonq (sveS8x2); /* { dg-error {passing 'svint8x2_t' to argument 1 of 'svget_neonq', which expects a single SVE vector rather than a tuple} } */
+ s8 = svget_neonq (sveS64); /* { dg-error {incompatible types when assigning to type 'int8x16_t' from type '__Int64x2_t} } */
+
+ return f64;
+}
\ No newline at end of file
new file mode 100644
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.2-a+sve -std=c99 -Wall -Wextra" } */
+
+#include <arm_neon_sve_bridge.h>
+
+float64x2_t
+f1 (int8x16_t s8, svint8_t sveS8, svint8x2_t sveS8x2, svint64_t sveS64,
+ int64x2_t s64, svbfloat16_t sveBF16, bfloat16x8_t bf16, int8x8_t s8_64bit,
+ svbool_t svbool)
+{
+ float64x2_t f64;
+
+ sveS8 = svset_neonq (sveS8, s8);
+ sveS64 = svset_neonq (sveS64, s64);
+ sveBF16 = svset_neonq (sveBF16, bf16);
+ sveS8 = svset_neonq (); /* { dg-error {too few arguments to function 'svset_neonq'} } */
+ sveS8 = svset_neonq (sveS8, s8, 1); /* { dg-error {too many arguments to function 'svset_neonq'} } */
+ sveS8 = svset_neonq (s8, s8); /* { dg-error {incompatible type for argument 1 of 'svset_neonq_s8'} } */
+ f64 = svset_neonq (sveS8, s8); /* { dg-error {incompatible types when assigning to type 'float64x2_t' from type 'svint8_t'} } */
+ sveS8 = svset_neonq (sveS8x2, s8); /* { dg-error {incompatible type for argument 1 of 'svset_neonq_s8'} } */
+ sveS8 = svset_neonq (sveS8, sveS8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svset_neonq', which expects a 128 bit NEON vector type} } */
+ sveS8 = svset_neonq (sveS8, s8_64bit); /* { dg-error {passing 'int8x8_t' to argument 2 of 'svset_neonq', which expects a 128 bit NEON vector type} } */
+ sveS8 = svset_neonq (sveS64, s64); /* { dg-error {incompatible types when assigning to type 'svint8_t' from type 'svint64_t} } */
+ sveS8 = svset_neonq (svbool, svbool); /* { dg-error {passing 'svbool_t' to argument 2 of 'svset_neonq', which expects a 128 bit NEON vector type} } */
+
+ return f64;
+}
\ No newline at end of file