@@ -1056,6 +1056,139 @@ class svlast_impl : public quiet<function_base>
public:
CONSTEXPR svlast_impl (int unspec) : m_unspec (unspec) {}
+ bool is_lasta () const { return m_unspec == UNSPEC_LASTA; }
+ bool is_lastb () const { return m_unspec == UNSPEC_LASTB; }
+
+ bool vect_all_same (tree v, int step) const
+ {
+ int i;
+ int nelts = vector_cst_encoded_nelts (v);
+ tree first_el = VECTOR_CST_ENCODED_ELT (v, 0);
+
+ for (i = 0; i < nelts; i += step)
+ if (!operand_equal_p (VECTOR_CST_ENCODED_ELT (v, i), first_el, 0))
+ return false;
+
+ return true;
+ }
+
+ /* Fold a svlast{a/b} call with constant predicate to a BIT_FIELD_REF.
+ BIT_FIELD_REF lowers to Advanced SIMD element extract, so we have to
+ ensure the index of the element being accessed is in the range of a
+ Advanced SIMD vector width. */
+ gimple *fold (gimple_folder & f) const override
+ {
+ tree pred = gimple_call_arg (f.call, 0);
+ tree val = gimple_call_arg (f.call, 1);
+
+ if (TREE_CODE (pred) == VECTOR_CST)
+ {
+ HOST_WIDE_INT pos;
+ int i = 0;
+ int step = f.type_suffix (0).element_bytes;
+ int step_1 = gcd (step, VECTOR_CST_NPATTERNS (pred));
+ int npats = VECTOR_CST_NPATTERNS (pred);
+ unsigned HOST_WIDE_INT enelts = vector_cst_encoded_nelts (pred);
+ tree b = NULL_TREE;
+ unsigned HOST_WIDE_INT nelts;
+
+ /* We can optimize 2 cases common to variable and fixed-length cases
+ without a linear search of the predicate vector:
+ 1. LASTA if predicate is all true, return element 0.
+ 2. LASTA if predicate all false, return element 0. */
+ if (is_lasta () && vect_all_same (pred, step_1))
+ {
+ b = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs), val,
+ bitsize_int (step * BITS_PER_UNIT), bitsize_int (0));
+ return gimple_build_assign (f.lhs, b);
+ }
+
+ /* Handle the all-false case for LASTB where SVE VL == 128b -
+ return the highest numbered element. */
+ if (is_lastb () && known_eq (BYTES_PER_SVE_VECTOR, 16)
+ && vect_all_same (pred, step_1)
+ && integer_zerop (VECTOR_CST_ENCODED_ELT (pred, 0)))
+ {
+ b = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs), val,
+ bitsize_int (step * BITS_PER_UNIT),
+ bitsize_int ((16 - step) * BITS_PER_UNIT));
+
+ return gimple_build_assign (f.lhs, b);
+ }
+
+ /* Determine if there are any repeating non-zero elements in variable
+ length vectors. */
+ if (!VECTOR_CST_NELTS (pred).is_constant (&nelts))
+ {
+ /* If VECTOR_CST_NELTS_PER_PATTERN (pred) == 2 and every multiple of
+ 'step_1' in
+ [VECTOR_CST_NPATTERNS .. VECTOR_CST_ENCODED_NELTS - 1]
+ is zero, then we can treat the vector as VECTOR_CST_NPATTERNS
+ elements followed by all inactive elements. */
+ if (VECTOR_CST_NELTS_PER_PATTERN (pred) == 2)
+ {
+ /* Restrict the scope of search to NPATS if vector is
+ variable-length for linear search later. */
+ nelts = npats;
+ for (i = npats; i < enelts; i += step_1)
+ {
+ /* If there are active elements in the repeated pattern of a
+ variable-length vector, then return NULL as there is no
+ way to be sure statically if this falls within the
+ Advanced SIMD range. */
+ if (!integer_zerop (VECTOR_CST_ENCODED_ELT (pred, i)))
+ return NULL;
+ }
+ }
+ else
+ /* If we're here, it means that for NELTS_PER_PATTERN != 2, there
+ is a repeating non-zero element. */
+ return NULL;
+ }
+
+ /* If we're here, it means either:
+ 1. The vector is variable-length and there's no active element in the
+ repeated part of the pattern, or
+ 2. The vector is fixed-length.
+
+ Fall through to finding the last active element linearly for
+ for all cases where the last active element is known to be
+ within a statically-determinable range. */
+ i = MAX ((int)nelts - step, 0);
+ for (; i >= 0; i -= step)
+ if (!integer_zerop (VECTOR_CST_ELT (pred, i)))
+ break;
+
+ if (is_lastb ())
+ {
+ /* For LASTB, the element is the last active element. */
+ pos = i;
+ }
+ else
+ {
+ /* For LASTA, the element is one after last active element. */
+ pos = i + step;
+
+ /* If last active element is
+ last element, wrap-around and return first Advanced SIMD
+ element. */
+ if (known_ge (pos, BYTES_PER_SVE_VECTOR))
+ pos = 0;
+ }
+
+ /* Out of Advanced SIMD range. */
+ if (pos < 0 || pos > 15)
+ return NULL;
+
+ b = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs), val,
+ bitsize_int (step * BITS_PER_UNIT),
+ bitsize_int (pos * BITS_PER_UNIT));
+
+ return gimple_build_assign (f.lhs, b);
+ }
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
new file mode 100644
@@ -0,0 +1,63 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -msve-vector-bits=256" } */
+
+#include <stdint.h>
+#include "arm_sve.h"
+
+#define NAME(name, size, pat, sign, ab) \
+ name ## _ ## size ## _ ## pat ## _ ## sign ## _ ## ab
+
+#define NAMEF(name, size, pat, sign, ab) \
+ name ## _ ## size ## _ ## pat ## _ ## sign ## _ ## ab ## _false
+
+#define SVTYPE(size, sign) \
+ sv ## sign ## int ## size ## _t
+
+#define STYPE(size, sign) sign ## int ## size ##_t
+
+#define SVELAST_DEF(size, pat, sign, ab, su) \
+ STYPE (size, sign) __attribute__((noinline)) \
+ NAME (foo, size, pat, sign, ab) (SVTYPE (size, sign) x) \
+ { \
+ return svlast ## ab (svptrue_pat_b ## size (pat), x); \
+ } \
+ STYPE (size, sign) __attribute__((noinline)) \
+ NAMEF (foo, size, pat, sign, ab) (SVTYPE (size, sign) x) \
+ { \
+ return svlast ## ab (svpfalse (), x); \
+ }
+
+#define ALL_PATS(SIZE, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL1, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL2, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL3, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL4, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL5, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL6, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL7, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL8, SIGN, AB, SU) \
+ SVELAST_DEF (SIZE, SV_VL16, SIGN, AB, SU)
+
+#define ALL_SIGN(SIZE, AB) \
+ ALL_PATS (SIZE, , AB, s) \
+ ALL_PATS (SIZE, u, AB, u)
+
+#define ALL_SIZE(AB) \
+ ALL_SIGN (8, AB) \
+ ALL_SIGN (16, AB) \
+ ALL_SIGN (32, AB) \
+ ALL_SIGN (64, AB)
+
+#define ALL_POS() \
+ ALL_SIZE (a) \
+ ALL_SIZE (b)
+
+
+ALL_POS()
+
+/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.b} 52 } } */
+/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.h} 50 } } */
+/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s} 12 } } */
+/* { dg-final { scan-assembler-times {\tfmov\tw0, s0} 24 } } */
+/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d} 4 } } */
+/* { dg-final { scan-assembler-times {\tfmov\tx0, d0} 32 } } */
new file mode 100644
@@ -0,0 +1,313 @@
+/* { dg-do run { target aarch64_sve128_hw } } */
+/* { dg-options "-O3 -msve-vector-bits=128 -std=gnu99" } */
+
+#include "svlast.c"
+
+int
+main (void)
+{
+ int8_t res_8_SV_VL1__a = 1;
+ int8_t res_8_SV_VL2__a = 2;
+ int8_t res_8_SV_VL3__a = 3;
+ int8_t res_8_SV_VL4__a = 4;
+ int8_t res_8_SV_VL5__a = 5;
+ int8_t res_8_SV_VL6__a = 6;
+ int8_t res_8_SV_VL7__a = 7;
+ int8_t res_8_SV_VL8__a = 8;
+ int8_t res_8_SV_VL16__a = 0;
+ uint8_t res_8_SV_VL1_u_a = 1;
+ uint8_t res_8_SV_VL2_u_a = 2;
+ uint8_t res_8_SV_VL3_u_a = 3;
+ uint8_t res_8_SV_VL4_u_a = 4;
+ uint8_t res_8_SV_VL5_u_a = 5;
+ uint8_t res_8_SV_VL6_u_a = 6;
+ uint8_t res_8_SV_VL7_u_a = 7;
+ uint8_t res_8_SV_VL8_u_a = 8;
+ uint8_t res_8_SV_VL16_u_a = 0;
+ int16_t res_16_SV_VL1__a = 1;
+ int16_t res_16_SV_VL2__a = 2;
+ int16_t res_16_SV_VL3__a = 3;
+ int16_t res_16_SV_VL4__a = 4;
+ int16_t res_16_SV_VL5__a = 5;
+ int16_t res_16_SV_VL6__a = 6;
+ int16_t res_16_SV_VL7__a = 7;
+ int16_t res_16_SV_VL8__a = 0;
+ int16_t res_16_SV_VL16__a = 0;
+ uint16_t res_16_SV_VL1_u_a = 1;
+ uint16_t res_16_SV_VL2_u_a = 2;
+ uint16_t res_16_SV_VL3_u_a = 3;
+ uint16_t res_16_SV_VL4_u_a = 4;
+ uint16_t res_16_SV_VL5_u_a = 5;
+ uint16_t res_16_SV_VL6_u_a = 6;
+ uint16_t res_16_SV_VL7_u_a = 7;
+ uint16_t res_16_SV_VL8_u_a = 0;
+ uint16_t res_16_SV_VL16_u_a = 0;
+ int32_t res_32_SV_VL1__a = 1;
+ int32_t res_32_SV_VL2__a = 2;
+ int32_t res_32_SV_VL3__a = 3;
+ int32_t res_32_SV_VL4__a = 0;
+ int32_t res_32_SV_VL5__a = 0;
+ int32_t res_32_SV_VL6__a = 0;
+ int32_t res_32_SV_VL7__a = 0;
+ int32_t res_32_SV_VL8__a = 0;
+ int32_t res_32_SV_VL16__a = 0;
+ uint32_t res_32_SV_VL1_u_a = 1;
+ uint32_t res_32_SV_VL2_u_a = 2;
+ uint32_t res_32_SV_VL3_u_a = 3;
+ uint32_t res_32_SV_VL4_u_a = 0;
+ uint32_t res_32_SV_VL5_u_a = 0;
+ uint32_t res_32_SV_VL6_u_a = 0;
+ uint32_t res_32_SV_VL7_u_a = 0;
+ uint32_t res_32_SV_VL8_u_a = 0;
+ uint32_t res_32_SV_VL16_u_a = 0;
+ int64_t res_64_SV_VL1__a = 1;
+ int64_t res_64_SV_VL2__a = 0;
+ int64_t res_64_SV_VL3__a = 0;
+ int64_t res_64_SV_VL4__a = 0;
+ int64_t res_64_SV_VL5__a = 0;
+ int64_t res_64_SV_VL6__a = 0;
+ int64_t res_64_SV_VL7__a = 0;
+ int64_t res_64_SV_VL8__a = 0;
+ int64_t res_64_SV_VL16__a = 0;
+ uint64_t res_64_SV_VL1_u_a = 1;
+ uint64_t res_64_SV_VL2_u_a = 0;
+ uint64_t res_64_SV_VL3_u_a = 0;
+ uint64_t res_64_SV_VL4_u_a = 0;
+ uint64_t res_64_SV_VL5_u_a = 0;
+ uint64_t res_64_SV_VL6_u_a = 0;
+ uint64_t res_64_SV_VL7_u_a = 0;
+ uint64_t res_64_SV_VL8_u_a = 0;
+ uint64_t res_64_SV_VL16_u_a = 0;
+ int8_t res_8_SV_VL1__b = 0;
+ int8_t res_8_SV_VL2__b = 1;
+ int8_t res_8_SV_VL3__b = 2;
+ int8_t res_8_SV_VL4__b = 3;
+ int8_t res_8_SV_VL5__b = 4;
+ int8_t res_8_SV_VL6__b = 5;
+ int8_t res_8_SV_VL7__b = 6;
+ int8_t res_8_SV_VL8__b = 7;
+ int8_t res_8_SV_VL16__b = 15;
+ uint8_t res_8_SV_VL1_u_b = 0;
+ uint8_t res_8_SV_VL2_u_b = 1;
+ uint8_t res_8_SV_VL3_u_b = 2;
+ uint8_t res_8_SV_VL4_u_b = 3;
+ uint8_t res_8_SV_VL5_u_b = 4;
+ uint8_t res_8_SV_VL6_u_b = 5;
+ uint8_t res_8_SV_VL7_u_b = 6;
+ uint8_t res_8_SV_VL8_u_b = 7;
+ uint8_t res_8_SV_VL16_u_b = 15;
+ int16_t res_16_SV_VL1__b = 0;
+ int16_t res_16_SV_VL2__b = 1;
+ int16_t res_16_SV_VL3__b = 2;
+ int16_t res_16_SV_VL4__b = 3;
+ int16_t res_16_SV_VL5__b = 4;
+ int16_t res_16_SV_VL6__b = 5;
+ int16_t res_16_SV_VL7__b = 6;
+ int16_t res_16_SV_VL8__b = 7;
+ int16_t res_16_SV_VL16__b = 7;
+ uint16_t res_16_SV_VL1_u_b = 0;
+ uint16_t res_16_SV_VL2_u_b = 1;
+ uint16_t res_16_SV_VL3_u_b = 2;
+ uint16_t res_16_SV_VL4_u_b = 3;
+ uint16_t res_16_SV_VL5_u_b = 4;
+ uint16_t res_16_SV_VL6_u_b = 5;
+ uint16_t res_16_SV_VL7_u_b = 6;
+ uint16_t res_16_SV_VL8_u_b = 7;
+ uint16_t res_16_SV_VL16_u_b = 7;
+ int32_t res_32_SV_VL1__b = 0;
+ int32_t res_32_SV_VL2__b = 1;
+ int32_t res_32_SV_VL3__b = 2;
+ int32_t res_32_SV_VL4__b = 3;
+ int32_t res_32_SV_VL5__b = 3;
+ int32_t res_32_SV_VL6__b = 3;
+ int32_t res_32_SV_VL7__b = 3;
+ int32_t res_32_SV_VL8__b = 3;
+ int32_t res_32_SV_VL16__b = 3;
+ uint32_t res_32_SV_VL1_u_b = 0;
+ uint32_t res_32_SV_VL2_u_b = 1;
+ uint32_t res_32_SV_VL3_u_b = 2;
+ uint32_t res_32_SV_VL4_u_b = 3;
+ uint32_t res_32_SV_VL5_u_b = 3;
+ uint32_t res_32_SV_VL6_u_b = 3;
+ uint32_t res_32_SV_VL7_u_b = 3;
+ uint32_t res_32_SV_VL8_u_b = 3;
+ uint32_t res_32_SV_VL16_u_b = 3;
+ int64_t res_64_SV_VL1__b = 0;
+ int64_t res_64_SV_VL2__b = 1;
+ int64_t res_64_SV_VL3__b = 1;
+ int64_t res_64_SV_VL4__b = 1;
+ int64_t res_64_SV_VL5__b = 1;
+ int64_t res_64_SV_VL6__b = 1;
+ int64_t res_64_SV_VL7__b = 1;
+ int64_t res_64_SV_VL8__b = 1;
+ int64_t res_64_SV_VL16__b = 1;
+ uint64_t res_64_SV_VL1_u_b = 0;
+ uint64_t res_64_SV_VL2_u_b = 1;
+ uint64_t res_64_SV_VL3_u_b = 1;
+ uint64_t res_64_SV_VL4_u_b = 1;
+ uint64_t res_64_SV_VL5_u_b = 1;
+ uint64_t res_64_SV_VL6_u_b = 1;
+ uint64_t res_64_SV_VL7_u_b = 1;
+ uint64_t res_64_SV_VL8_u_b = 1;
+ uint64_t res_64_SV_VL16_u_b = 1;
+
+ int8_t res_8_SV_VL1__a_false = 0;
+ int8_t res_8_SV_VL2__a_false = 0;
+ int8_t res_8_SV_VL3__a_false = 0;
+ int8_t res_8_SV_VL4__a_false = 0;
+ int8_t res_8_SV_VL5__a_false = 0;
+ int8_t res_8_SV_VL6__a_false = 0;
+ int8_t res_8_SV_VL7__a_false = 0;
+ int8_t res_8_SV_VL8__a_false = 0;
+ int8_t res_8_SV_VL16__a_false = 0;
+ uint8_t res_8_SV_VL1_u_a_false = 0;
+ uint8_t res_8_SV_VL2_u_a_false = 0;
+ uint8_t res_8_SV_VL3_u_a_false = 0;
+ uint8_t res_8_SV_VL4_u_a_false = 0;
+ uint8_t res_8_SV_VL5_u_a_false = 0;
+ uint8_t res_8_SV_VL6_u_a_false = 0;
+ uint8_t res_8_SV_VL7_u_a_false = 0;
+ uint8_t res_8_SV_VL8_u_a_false = 0;
+ uint8_t res_8_SV_VL16_u_a_false = 0;
+ int16_t res_16_SV_VL1__a_false = 0;
+ int16_t res_16_SV_VL2__a_false = 0;
+ int16_t res_16_SV_VL3__a_false = 0;
+ int16_t res_16_SV_VL4__a_false = 0;
+ int16_t res_16_SV_VL5__a_false = 0;
+ int16_t res_16_SV_VL6__a_false = 0;
+ int16_t res_16_SV_VL7__a_false = 0;
+ int16_t res_16_SV_VL8__a_false = 0;
+ int16_t res_16_SV_VL16__a_false = 0;
+ uint16_t res_16_SV_VL1_u_a_false = 0;
+ uint16_t res_16_SV_VL2_u_a_false = 0;
+ uint16_t res_16_SV_VL3_u_a_false = 0;
+ uint16_t res_16_SV_VL4_u_a_false = 0;
+ uint16_t res_16_SV_VL5_u_a_false = 0;
+ uint16_t res_16_SV_VL6_u_a_false = 0;
+ uint16_t res_16_SV_VL7_u_a_false = 0;
+ uint16_t res_16_SV_VL8_u_a_false = 0;
+ uint16_t res_16_SV_VL16_u_a_false = 0;
+ int32_t res_32_SV_VL1__a_false = 0;
+ int32_t res_32_SV_VL2__a_false = 0;
+ int32_t res_32_SV_VL3__a_false = 0;
+ int32_t res_32_SV_VL4__a_false = 0;
+ int32_t res_32_SV_VL5__a_false = 0;
+ int32_t res_32_SV_VL6__a_false = 0;
+ int32_t res_32_SV_VL7__a_false = 0;
+ int32_t res_32_SV_VL8__a_false = 0;
+ int32_t res_32_SV_VL16__a_false = 0;
+ uint32_t res_32_SV_VL1_u_a_false = 0;
+ uint32_t res_32_SV_VL2_u_a_false = 0;
+ uint32_t res_32_SV_VL3_u_a_false = 0;
+ uint32_t res_32_SV_VL4_u_a_false = 0;
+ uint32_t res_32_SV_VL5_u_a_false = 0;
+ uint32_t res_32_SV_VL6_u_a_false = 0;
+ uint32_t res_32_SV_VL7_u_a_false = 0;
+ uint32_t res_32_SV_VL8_u_a_false = 0;
+ uint32_t res_32_SV_VL16_u_a_false = 0;
+ int64_t res_64_SV_VL1__a_false = 0;
+ int64_t res_64_SV_VL2__a_false = 0;
+ int64_t res_64_SV_VL3__a_false = 0;
+ int64_t res_64_SV_VL4__a_false = 0;
+ int64_t res_64_SV_VL5__a_false = 0;
+ int64_t res_64_SV_VL6__a_false = 0;
+ int64_t res_64_SV_VL7__a_false = 0;
+ int64_t res_64_SV_VL8__a_false = 0;
+ int64_t res_64_SV_VL16__a_false = 0;
+ uint64_t res_64_SV_VL1_u_a_false = 0;
+ uint64_t res_64_SV_VL2_u_a_false = 0;
+ uint64_t res_64_SV_VL3_u_a_false = 0;
+ uint64_t res_64_SV_VL4_u_a_false = 0;
+ uint64_t res_64_SV_VL5_u_a_false = 0;
+ uint64_t res_64_SV_VL6_u_a_false = 0;
+ uint64_t res_64_SV_VL7_u_a_false = 0;
+ uint64_t res_64_SV_VL8_u_a_false = 0;
+ uint64_t res_64_SV_VL16_u_a_false = 0;
+ int8_t res_8_SV_VL1__b_false = 15;
+ int8_t res_8_SV_VL2__b_false = 15;
+ int8_t res_8_SV_VL3__b_false = 15;
+ int8_t res_8_SV_VL4__b_false = 15;
+ int8_t res_8_SV_VL5__b_false = 15;
+ int8_t res_8_SV_VL6__b_false = 15;
+ int8_t res_8_SV_VL7__b_false = 15;
+ int8_t res_8_SV_VL8__b_false = 15;
+ int8_t res_8_SV_VL16__b_false = 15;
+ uint8_t res_8_SV_VL1_u_b_false = 15;
+ uint8_t res_8_SV_VL2_u_b_false = 15;
+ uint8_t res_8_SV_VL3_u_b_false = 15;
+ uint8_t res_8_SV_VL4_u_b_false = 15;
+ uint8_t res_8_SV_VL5_u_b_false = 15;
+ uint8_t res_8_SV_VL6_u_b_false = 15;
+ uint8_t res_8_SV_VL7_u_b_false = 15;
+ uint8_t res_8_SV_VL8_u_b_false = 15;
+ uint8_t res_8_SV_VL16_u_b_false = 15;
+ int16_t res_16_SV_VL1__b_false = 7;
+ int16_t res_16_SV_VL2__b_false = 7;
+ int16_t res_16_SV_VL3__b_false = 7;
+ int16_t res_16_SV_VL4__b_false = 7;
+ int16_t res_16_SV_VL5__b_false = 7;
+ int16_t res_16_SV_VL6__b_false = 7;
+ int16_t res_16_SV_VL7__b_false = 7;
+ int16_t res_16_SV_VL8__b_false = 7;
+ int16_t res_16_SV_VL16__b_false = 7;
+ uint16_t res_16_SV_VL1_u_b_false = 7;
+ uint16_t res_16_SV_VL2_u_b_false = 7;
+ uint16_t res_16_SV_VL3_u_b_false = 7;
+ uint16_t res_16_SV_VL4_u_b_false = 7;
+ uint16_t res_16_SV_VL5_u_b_false = 7;
+ uint16_t res_16_SV_VL6_u_b_false = 7;
+ uint16_t res_16_SV_VL7_u_b_false = 7;
+ uint16_t res_16_SV_VL8_u_b_false = 7;
+ uint16_t res_16_SV_VL16_u_b_false = 7;
+ int32_t res_32_SV_VL1__b_false = 3;
+ int32_t res_32_SV_VL2__b_false = 3;
+ int32_t res_32_SV_VL3__b_false = 3;
+ int32_t res_32_SV_VL4__b_false = 3;
+ int32_t res_32_SV_VL5__b_false = 3;
+ int32_t res_32_SV_VL6__b_false = 3;
+ int32_t res_32_SV_VL7__b_false = 3;
+ int32_t res_32_SV_VL8__b_false = 3;
+ int32_t res_32_SV_VL16__b_false = 3;
+ uint32_t res_32_SV_VL1_u_b_false = 3;
+ uint32_t res_32_SV_VL2_u_b_false = 3;
+ uint32_t res_32_SV_VL3_u_b_false = 3;
+ uint32_t res_32_SV_VL4_u_b_false = 3;
+ uint32_t res_32_SV_VL5_u_b_false = 3;
+ uint32_t res_32_SV_VL6_u_b_false = 3;
+ uint32_t res_32_SV_VL7_u_b_false = 3;
+ uint32_t res_32_SV_VL8_u_b_false = 3;
+ uint32_t res_32_SV_VL16_u_b_false = 3;
+ int64_t res_64_SV_VL1__b_false = 1;
+ int64_t res_64_SV_VL2__b_false = 1;
+ int64_t res_64_SV_VL3__b_false = 1;
+ int64_t res_64_SV_VL4__b_false = 1;
+ int64_t res_64_SV_VL5__b_false = 1;
+ int64_t res_64_SV_VL6__b_false = 1;
+ int64_t res_64_SV_VL7__b_false = 1;
+ int64_t res_64_SV_VL8__b_false = 1;
+ int64_t res_64_SV_VL16__b_false = 1;
+ uint64_t res_64_SV_VL1_u_b_false = 1;
+ uint64_t res_64_SV_VL2_u_b_false = 1;
+ uint64_t res_64_SV_VL3_u_b_false = 1;
+ uint64_t res_64_SV_VL4_u_b_false = 1;
+ uint64_t res_64_SV_VL5_u_b_false = 1;
+ uint64_t res_64_SV_VL6_u_b_false = 1;
+ uint64_t res_64_SV_VL7_u_b_false = 1;
+ uint64_t res_64_SV_VL8_u_b_false = 1;
+ uint64_t res_64_SV_VL16_u_b_false = 1;
+
+#undef SVELAST_DEF
+#define SVELAST_DEF(size, pat, sign, ab, su) \
+ if (NAME (foo, size, pat, sign, ab) \
+ (svindex_ ## su ## size (0, 1)) != \
+ NAME (res, size, pat, sign, ab)) \
+ __builtin_abort (); \
+ if (NAMEF (foo, size, pat, sign, ab) \
+ (svindex_ ## su ## size (0, 1)) != \
+ NAMEF (res, size, pat, sign, ab)) \
+ __builtin_abort ();
+
+ ALL_POS ()
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,314 @@
+/* { dg-do run { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -msve-vector-bits=256 -std=gnu99" } */
+
+#include "svlast.c"
+
+int
+main (void)
+{
+ int8_t res_8_SV_VL1__a = 1;
+ int8_t res_8_SV_VL2__a = 2;
+ int8_t res_8_SV_VL3__a = 3;
+ int8_t res_8_SV_VL4__a = 4;
+ int8_t res_8_SV_VL5__a = 5;
+ int8_t res_8_SV_VL6__a = 6;
+ int8_t res_8_SV_VL7__a = 7;
+ int8_t res_8_SV_VL8__a = 8;
+ int8_t res_8_SV_VL16__a = 16;
+ uint8_t res_8_SV_VL1_u_a = 1;
+ uint8_t res_8_SV_VL2_u_a = 2;
+ uint8_t res_8_SV_VL3_u_a = 3;
+ uint8_t res_8_SV_VL4_u_a = 4;
+ uint8_t res_8_SV_VL5_u_a = 5;
+ uint8_t res_8_SV_VL6_u_a = 6;
+ uint8_t res_8_SV_VL7_u_a = 7;
+ uint8_t res_8_SV_VL8_u_a = 8;
+ uint8_t res_8_SV_VL16_u_a = 16;
+ int16_t res_16_SV_VL1__a = 1;
+ int16_t res_16_SV_VL2__a = 2;
+ int16_t res_16_SV_VL3__a = 3;
+ int16_t res_16_SV_VL4__a = 4;
+ int16_t res_16_SV_VL5__a = 5;
+ int16_t res_16_SV_VL6__a = 6;
+ int16_t res_16_SV_VL7__a = 7;
+ int16_t res_16_SV_VL8__a = 8;
+ int16_t res_16_SV_VL16__a = 0;
+ uint16_t res_16_SV_VL1_u_a = 1;
+ uint16_t res_16_SV_VL2_u_a = 2;
+ uint16_t res_16_SV_VL3_u_a = 3;
+ uint16_t res_16_SV_VL4_u_a = 4;
+ uint16_t res_16_SV_VL5_u_a = 5;
+ uint16_t res_16_SV_VL6_u_a = 6;
+ uint16_t res_16_SV_VL7_u_a = 7;
+ uint16_t res_16_SV_VL8_u_a = 8;
+ uint16_t res_16_SV_VL16_u_a = 0;
+ int32_t res_32_SV_VL1__a = 1;
+ int32_t res_32_SV_VL2__a = 2;
+ int32_t res_32_SV_VL3__a = 3;
+ int32_t res_32_SV_VL4__a = 4;
+ int32_t res_32_SV_VL5__a = 5;
+ int32_t res_32_SV_VL6__a = 6;
+ int32_t res_32_SV_VL7__a = 7;
+ int32_t res_32_SV_VL8__a = 0;
+ int32_t res_32_SV_VL16__a = 0;
+ uint32_t res_32_SV_VL1_u_a = 1;
+ uint32_t res_32_SV_VL2_u_a = 2;
+ uint32_t res_32_SV_VL3_u_a = 3;
+ uint32_t res_32_SV_VL4_u_a = 4;
+ uint32_t res_32_SV_VL5_u_a = 5;
+ uint32_t res_32_SV_VL6_u_a = 6;
+ uint32_t res_32_SV_VL7_u_a = 7;
+ uint32_t res_32_SV_VL8_u_a = 0;
+ uint32_t res_32_SV_VL16_u_a = 0;
+ int64_t res_64_SV_VL1__a = 1;
+ int64_t res_64_SV_VL2__a = 2;
+ int64_t res_64_SV_VL3__a = 3;
+ int64_t res_64_SV_VL4__a = 0;
+ int64_t res_64_SV_VL5__a = 0;
+ int64_t res_64_SV_VL6__a = 0;
+ int64_t res_64_SV_VL7__a = 0;
+ int64_t res_64_SV_VL8__a = 0;
+ int64_t res_64_SV_VL16__a = 0;
+ uint64_t res_64_SV_VL1_u_a = 1;
+ uint64_t res_64_SV_VL2_u_a = 2;
+ uint64_t res_64_SV_VL3_u_a = 3;
+ uint64_t res_64_SV_VL4_u_a = 0;
+ uint64_t res_64_SV_VL5_u_a = 0;
+ uint64_t res_64_SV_VL6_u_a = 0;
+ uint64_t res_64_SV_VL7_u_a = 0;
+ uint64_t res_64_SV_VL8_u_a = 0;
+ uint64_t res_64_SV_VL16_u_a = 0;
+ int8_t res_8_SV_VL1__b = 0;
+ int8_t res_8_SV_VL2__b = 1;
+ int8_t res_8_SV_VL3__b = 2;
+ int8_t res_8_SV_VL4__b = 3;
+ int8_t res_8_SV_VL5__b = 4;
+ int8_t res_8_SV_VL6__b = 5;
+ int8_t res_8_SV_VL7__b = 6;
+ int8_t res_8_SV_VL8__b = 7;
+ int8_t res_8_SV_VL16__b = 15;
+ uint8_t res_8_SV_VL1_u_b = 0;
+ uint8_t res_8_SV_VL2_u_b = 1;
+ uint8_t res_8_SV_VL3_u_b = 2;
+ uint8_t res_8_SV_VL4_u_b = 3;
+ uint8_t res_8_SV_VL5_u_b = 4;
+ uint8_t res_8_SV_VL6_u_b = 5;
+ uint8_t res_8_SV_VL7_u_b = 6;
+ uint8_t res_8_SV_VL8_u_b = 7;
+ uint8_t res_8_SV_VL16_u_b = 15;
+ int16_t res_16_SV_VL1__b = 0;
+ int16_t res_16_SV_VL2__b = 1;
+ int16_t res_16_SV_VL3__b = 2;
+ int16_t res_16_SV_VL4__b = 3;
+ int16_t res_16_SV_VL5__b = 4;
+ int16_t res_16_SV_VL6__b = 5;
+ int16_t res_16_SV_VL7__b = 6;
+ int16_t res_16_SV_VL8__b = 7;
+ int16_t res_16_SV_VL16__b = 15;
+ uint16_t res_16_SV_VL1_u_b = 0;
+ uint16_t res_16_SV_VL2_u_b = 1;
+ uint16_t res_16_SV_VL3_u_b = 2;
+ uint16_t res_16_SV_VL4_u_b = 3;
+ uint16_t res_16_SV_VL5_u_b = 4;
+ uint16_t res_16_SV_VL6_u_b = 5;
+ uint16_t res_16_SV_VL7_u_b = 6;
+ uint16_t res_16_SV_VL8_u_b = 7;
+ uint16_t res_16_SV_VL16_u_b = 15;
+ int32_t res_32_SV_VL1__b = 0;
+ int32_t res_32_SV_VL2__b = 1;
+ int32_t res_32_SV_VL3__b = 2;
+ int32_t res_32_SV_VL4__b = 3;
+ int32_t res_32_SV_VL5__b = 4;
+ int32_t res_32_SV_VL6__b = 5;
+ int32_t res_32_SV_VL7__b = 6;
+ int32_t res_32_SV_VL8__b = 7;
+ int32_t res_32_SV_VL16__b = 7;
+ uint32_t res_32_SV_VL1_u_b = 0;
+ uint32_t res_32_SV_VL2_u_b = 1;
+ uint32_t res_32_SV_VL3_u_b = 2;
+ uint32_t res_32_SV_VL4_u_b = 3;
+ uint32_t res_32_SV_VL5_u_b = 4;
+ uint32_t res_32_SV_VL6_u_b = 5;
+ uint32_t res_32_SV_VL7_u_b = 6;
+ uint32_t res_32_SV_VL8_u_b = 7;
+ uint32_t res_32_SV_VL16_u_b = 7;
+ int64_t res_64_SV_VL1__b = 0;
+ int64_t res_64_SV_VL2__b = 1;
+ int64_t res_64_SV_VL3__b = 2;
+ int64_t res_64_SV_VL4__b = 3;
+ int64_t res_64_SV_VL5__b = 3;
+ int64_t res_64_SV_VL6__b = 3;
+ int64_t res_64_SV_VL7__b = 3;
+ int64_t res_64_SV_VL8__b = 3;
+ int64_t res_64_SV_VL16__b = 3;
+ uint64_t res_64_SV_VL1_u_b = 0;
+ uint64_t res_64_SV_VL2_u_b = 1;
+ uint64_t res_64_SV_VL3_u_b = 2;
+ uint64_t res_64_SV_VL4_u_b = 3;
+ uint64_t res_64_SV_VL5_u_b = 3;
+ uint64_t res_64_SV_VL6_u_b = 3;
+ uint64_t res_64_SV_VL7_u_b = 3;
+ uint64_t res_64_SV_VL8_u_b = 3;
+ uint64_t res_64_SV_VL16_u_b = 3;
+
+ int8_t res_8_SV_VL1__a_false = 0;
+ int8_t res_8_SV_VL2__a_false = 0;
+ int8_t res_8_SV_VL3__a_false = 0;
+ int8_t res_8_SV_VL4__a_false = 0;
+ int8_t res_8_SV_VL5__a_false = 0;
+ int8_t res_8_SV_VL6__a_false = 0;
+ int8_t res_8_SV_VL7__a_false = 0;
+ int8_t res_8_SV_VL8__a_false = 0;
+ int8_t res_8_SV_VL16__a_false = 0;
+ uint8_t res_8_SV_VL1_u_a_false = 0;
+ uint8_t res_8_SV_VL2_u_a_false = 0;
+ uint8_t res_8_SV_VL3_u_a_false = 0;
+ uint8_t res_8_SV_VL4_u_a_false = 0;
+ uint8_t res_8_SV_VL5_u_a_false = 0;
+ uint8_t res_8_SV_VL6_u_a_false = 0;
+ uint8_t res_8_SV_VL7_u_a_false = 0;
+ uint8_t res_8_SV_VL8_u_a_false = 0;
+ uint8_t res_8_SV_VL16_u_a_false = 0;
+ int16_t res_16_SV_VL1__a_false = 0;
+ int16_t res_16_SV_VL2__a_false = 0;
+ int16_t res_16_SV_VL3__a_false = 0;
+ int16_t res_16_SV_VL4__a_false = 0;
+ int16_t res_16_SV_VL5__a_false = 0;
+ int16_t res_16_SV_VL6__a_false = 0;
+ int16_t res_16_SV_VL7__a_false = 0;
+ int16_t res_16_SV_VL8__a_false = 0;
+ int16_t res_16_SV_VL16__a_false = 0;
+ uint16_t res_16_SV_VL1_u_a_false = 0;
+ uint16_t res_16_SV_VL2_u_a_false = 0;
+ uint16_t res_16_SV_VL3_u_a_false = 0;
+ uint16_t res_16_SV_VL4_u_a_false = 0;
+ uint16_t res_16_SV_VL5_u_a_false = 0;
+ uint16_t res_16_SV_VL6_u_a_false = 0;
+ uint16_t res_16_SV_VL7_u_a_false = 0;
+ uint16_t res_16_SV_VL8_u_a_false = 0;
+ uint16_t res_16_SV_VL16_u_a_false = 0;
+ int32_t res_32_SV_VL1__a_false = 0;
+ int32_t res_32_SV_VL2__a_false = 0;
+ int32_t res_32_SV_VL3__a_false = 0;
+ int32_t res_32_SV_VL4__a_false = 0;
+ int32_t res_32_SV_VL5__a_false = 0;
+ int32_t res_32_SV_VL6__a_false = 0;
+ int32_t res_32_SV_VL7__a_false = 0;
+ int32_t res_32_SV_VL8__a_false = 0;
+ int32_t res_32_SV_VL16__a_false = 0;
+ uint32_t res_32_SV_VL1_u_a_false = 0;
+ uint32_t res_32_SV_VL2_u_a_false = 0;
+ uint32_t res_32_SV_VL3_u_a_false = 0;
+ uint32_t res_32_SV_VL4_u_a_false = 0;
+ uint32_t res_32_SV_VL5_u_a_false = 0;
+ uint32_t res_32_SV_VL6_u_a_false = 0;
+ uint32_t res_32_SV_VL7_u_a_false = 0;
+ uint32_t res_32_SV_VL8_u_a_false = 0;
+ uint32_t res_32_SV_VL16_u_a_false = 0;
+ int64_t res_64_SV_VL1__a_false = 0;
+ int64_t res_64_SV_VL2__a_false = 0;
+ int64_t res_64_SV_VL3__a_false = 0;
+ int64_t res_64_SV_VL4__a_false = 0;
+ int64_t res_64_SV_VL5__a_false = 0;
+ int64_t res_64_SV_VL6__a_false = 0;
+ int64_t res_64_SV_VL7__a_false = 0;
+ int64_t res_64_SV_VL8__a_false = 0;
+ int64_t res_64_SV_VL16__a_false = 0;
+ uint64_t res_64_SV_VL1_u_a_false = 0;
+ uint64_t res_64_SV_VL2_u_a_false = 0;
+ uint64_t res_64_SV_VL3_u_a_false = 0;
+ uint64_t res_64_SV_VL4_u_a_false = 0;
+ uint64_t res_64_SV_VL5_u_a_false = 0;
+ uint64_t res_64_SV_VL6_u_a_false = 0;
+ uint64_t res_64_SV_VL7_u_a_false = 0;
+ uint64_t res_64_SV_VL8_u_a_false = 0;
+ uint64_t res_64_SV_VL16_u_a_false = 0;
+ int8_t res_8_SV_VL1__b_false = 31;
+ int8_t res_8_SV_VL2__b_false = 31;
+ int8_t res_8_SV_VL3__b_false = 31;
+ int8_t res_8_SV_VL4__b_false = 31;
+ int8_t res_8_SV_VL5__b_false = 31;
+ int8_t res_8_SV_VL6__b_false = 31;
+ int8_t res_8_SV_VL7__b_false = 31;
+ int8_t res_8_SV_VL8__b_false = 31;
+ int8_t res_8_SV_VL16__b_false = 31;
+ uint8_t res_8_SV_VL1_u_b_false = 31;
+ uint8_t res_8_SV_VL2_u_b_false = 31;
+ uint8_t res_8_SV_VL3_u_b_false = 31;
+ uint8_t res_8_SV_VL4_u_b_false = 31;
+ uint8_t res_8_SV_VL5_u_b_false = 31;
+ uint8_t res_8_SV_VL6_u_b_false = 31;
+ uint8_t res_8_SV_VL7_u_b_false = 31;
+ uint8_t res_8_SV_VL8_u_b_false = 31;
+ uint8_t res_8_SV_VL16_u_b_false = 31;
+ int16_t res_16_SV_VL1__b_false = 15;
+ int16_t res_16_SV_VL2__b_false = 15;
+ int16_t res_16_SV_VL3__b_false = 15;
+ int16_t res_16_SV_VL4__b_false = 15;
+ int16_t res_16_SV_VL5__b_false = 15;
+ int16_t res_16_SV_VL6__b_false = 15;
+ int16_t res_16_SV_VL7__b_false = 15;
+ int16_t res_16_SV_VL8__b_false = 15;
+ int16_t res_16_SV_VL16__b_false = 15;
+ uint16_t res_16_SV_VL1_u_b_false = 15;
+ uint16_t res_16_SV_VL2_u_b_false = 15;
+ uint16_t res_16_SV_VL3_u_b_false = 15;
+ uint16_t res_16_SV_VL4_u_b_false = 15;
+ uint16_t res_16_SV_VL5_u_b_false = 15;
+ uint16_t res_16_SV_VL6_u_b_false = 15;
+ uint16_t res_16_SV_VL7_u_b_false = 15;
+ uint16_t res_16_SV_VL8_u_b_false = 15;
+ uint16_t res_16_SV_VL16_u_b_false = 15;
+ int32_t res_32_SV_VL1__b_false = 7;
+ int32_t res_32_SV_VL2__b_false = 7;
+ int32_t res_32_SV_VL3__b_false = 7;
+ int32_t res_32_SV_VL4__b_false = 7;
+ int32_t res_32_SV_VL5__b_false = 7;
+ int32_t res_32_SV_VL6__b_false = 7;
+ int32_t res_32_SV_VL7__b_false = 7;
+ int32_t res_32_SV_VL8__b_false = 7;
+ int32_t res_32_SV_VL16__b_false = 7;
+ uint32_t res_32_SV_VL1_u_b_false = 7;
+ uint32_t res_32_SV_VL2_u_b_false = 7;
+ uint32_t res_32_SV_VL3_u_b_false = 7;
+ uint32_t res_32_SV_VL4_u_b_false = 7;
+ uint32_t res_32_SV_VL5_u_b_false = 7;
+ uint32_t res_32_SV_VL6_u_b_false = 7;
+ uint32_t res_32_SV_VL7_u_b_false = 7;
+ uint32_t res_32_SV_VL8_u_b_false = 7;
+ uint32_t res_32_SV_VL16_u_b_false = 7;
+ int64_t res_64_SV_VL1__b_false = 3;
+ int64_t res_64_SV_VL2__b_false = 3;
+ int64_t res_64_SV_VL3__b_false = 3;
+ int64_t res_64_SV_VL4__b_false = 3;
+ int64_t res_64_SV_VL5__b_false = 3;
+ int64_t res_64_SV_VL6__b_false = 3;
+ int64_t res_64_SV_VL7__b_false = 3;
+ int64_t res_64_SV_VL8__b_false = 3;
+ int64_t res_64_SV_VL16__b_false = 3;
+ uint64_t res_64_SV_VL1_u_b_false = 3;
+ uint64_t res_64_SV_VL2_u_b_false = 3;
+ uint64_t res_64_SV_VL3_u_b_false = 3;
+ uint64_t res_64_SV_VL4_u_b_false = 3;
+ uint64_t res_64_SV_VL5_u_b_false = 3;
+ uint64_t res_64_SV_VL6_u_b_false = 3;
+ uint64_t res_64_SV_VL7_u_b_false = 3;
+ uint64_t res_64_SV_VL8_u_b_false = 3;
+ uint64_t res_64_SV_VL16_u_b_false = 3;
+
+
+#undef SVELAST_DEF
+#define SVELAST_DEF(size, pat, sign, ab, su) \
+ if (NAME (foo, size, pat, sign, ab) \
+ (svindex_ ## su ## size (0 ,1)) != \
+ NAME (res, size, pat, sign, ab)) \
+ __builtin_abort (); \
+ if (NAMEF (foo, size, pat, sign, ab) \
+ (svindex_ ## su ## size (0 ,1)) != \
+ NAMEF (res, size, pat, sign, ab)) \
+ __builtin_abort ();
+
+ ALL_POS ()
+
+ return 0;
+}
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, all
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl128
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl16
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl256
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl32
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, __SVFloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl64
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, all
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl128
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl16
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl256
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl32
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/
@@ -186,8 +186,6 @@ CALLER (f16, svfloat16_t)
** caller_bf16:
** ...
** bl callee_bf16
-** ptrue (p[0-7])\.b, vl64
-** lasta h0, \1, z0\.h
** ldp x29, x30, \[sp\], 16
** ret
*/