[SVE] Fold svdupq to VEC_PERM_EXPR if elements are not constant
Checks
Commit Message
Hi Richard,
Sorry I forgot to commit this patch, which you had approved in:
https://gcc.gnu.org/pipermail/gcc-patches/2023-April/615308.html
Just for context for the following test:
svint32_t f_s32(int32x4_t x)
{
return svdupq_s32 (x[0], x[1], x[2], x[3]);
}
-O3 -mcpu=generic+sve generates following code after interleave+zip1 patch:
f_s32:
dup s31, v0.s[1]
mov v30.8b, v0.8b
ins v31.s[1], v0.s[3]
ins v30.s[1], v0.s[2]
zip1 v0.4s, v30.4s, v31.4s
dup z0.q, z0.q[0]
ret
Code-gen with attached patch:
f_s32:
dup z0.q, z0.q[0]
ret
Bootstrapped+tested on aarch64-linux-gnu.
OK to commit ?
Thanks,
Prathamesh
[SVE] Fold svdupq to VEC_PERM_EXPR if elements are not constant.
gcc/ChangeLog:
* config/aarch64/aarch64-sve-builtins-base.cc
(svdupq_impl::fold_nonconst_dupq): New method.
(svdupq_impl::fold): Call fold_nonconst_dupq.
gcc/testsuite/ChangeLog:
* gcc.target/aarch64/sve/acle/general/dupq_11.c: New test.
Comments
Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org> writes:
> Hi Richard,
> Sorry I forgot to commit this patch, which you had approved in:
> https://gcc.gnu.org/pipermail/gcc-patches/2023-April/615308.html
>
> Just for context for the following test:
> svint32_t f_s32(int32x4_t x)
> {
> return svdupq_s32 (x[0], x[1], x[2], x[3]);
> }
>
> -O3 -mcpu=generic+sve generates following code after interleave+zip1 patch:
> f_s32:
> dup s31, v0.s[1]
> mov v30.8b, v0.8b
> ins v31.s[1], v0.s[3]
> ins v30.s[1], v0.s[2]
> zip1 v0.4s, v30.4s, v31.4s
> dup z0.q, z0.q[0]
> ret
>
> Code-gen with attached patch:
> f_s32:
> dup z0.q, z0.q[0]
> ret
>
> Bootstrapped+tested on aarch64-linux-gnu.
> OK to commit ?
>
> Thanks,
> Prathamesh
>
> [SVE] Fold svdupq to VEC_PERM_EXPR if elements are not constant.
>
> gcc/ChangeLog:
> * config/aarch64/aarch64-sve-builtins-base.cc
> (svdupq_impl::fold_nonconst_dupq): New method.
> (svdupq_impl::fold): Call fold_nonconst_dupq.
>
> gcc/testsuite/ChangeLog:
> * gcc.target/aarch64/sve/acle/general/dupq_11.c: New test.
OK, thanks.
Richard
> diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> index 95b4cb8a943..9010ecca6da 100644
> --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> @@ -817,6 +817,52 @@ public:
>
> class svdupq_impl : public quiet<function_base>
> {
> +private:
> + gimple *
> + fold_nonconst_dupq (gimple_folder &f) const
> + {
> + /* Lower lhs = svdupq (arg0, arg1, ..., argN} into:
> + tmp = {arg0, arg1, ..., arg<N-1>}
> + lhs = VEC_PERM_EXPR (tmp, tmp, {0, 1, 2, N-1, ...}) */
> +
> + if (f.type_suffix (0).bool_p
> + || BYTES_BIG_ENDIAN)
> + return NULL;
> +
> + tree lhs = gimple_call_lhs (f.call);
> + tree lhs_type = TREE_TYPE (lhs);
> + tree elt_type = TREE_TYPE (lhs_type);
> + scalar_mode elt_mode = SCALAR_TYPE_MODE (elt_type);
> + machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
> + tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
> +
> + unsigned nargs = gimple_call_num_args (f.call);
> + vec<constructor_elt, va_gc> *v;
> + vec_alloc (v, nargs);
> + for (unsigned i = 0; i < nargs; i++)
> + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_call_arg (f.call, i));
> + tree vec = build_constructor (vq_type, v);
> + tree tmp = make_ssa_name_fn (cfun, vq_type, 0);
> + gimple *g = gimple_build_assign (tmp, vec);
> +
> + gimple_seq stmts = NULL;
> + gimple_seq_add_stmt_without_update (&stmts, g);
> +
> + poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
> + vec_perm_builder sel (lhs_len, nargs, 1);
> + for (unsigned i = 0; i < nargs; i++)
> + sel.quick_push (i);
> +
> + vec_perm_indices indices (sel, 1, nargs);
> + tree mask_type = build_vector_type (ssizetype, lhs_len);
> + tree mask = vec_perm_indices_to_tree (mask_type, indices);
> +
> + gimple *g2 = gimple_build_assign (lhs, VEC_PERM_EXPR, tmp, tmp, mask);
> + gimple_seq_add_stmt_without_update (&stmts, g2);
> + gsi_replace_with_seq (f.gsi, stmts, false);
> + return g2;
> + }
> +
> public:
> gimple *
> fold (gimple_folder &f) const override
> @@ -832,7 +878,7 @@ public:
> {
> tree elt = gimple_call_arg (f.call, i);
> if (!CONSTANT_CLASS_P (elt))
> - return NULL;
> + return fold_nonconst_dupq (f);
> builder.quick_push (elt);
> for (unsigned int j = 1; j < factor; ++j)
> builder.quick_push (build_zero_cst (TREE_TYPE (vec_type)));
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> new file mode 100644
> index 00000000000..f19f8deb1e5
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> @@ -0,0 +1,31 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O3 -fdump-tree-optimized" } */
> +
> +#include <arm_sve.h>
> +#include <arm_neon.h>
> +
> +svint8_t f_s8(int8x16_t x)
> +{
> + return svdupq_s8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
> + x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]);
> +}
> +
> +svint16_t f_s16(int16x8_t x)
> +{
> + return svdupq_s16 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]);
> +}
> +
> +svint32_t f_s32(int32x4_t x)
> +{
> + return svdupq_s32 (x[0], x[1], x[2], x[3]);
> +}
> +
> +svint64_t f_s64(int64x2_t x)
> +{
> + return svdupq_s64 (x[0], x[1]);
> +}
> +
> +/* { dg-final { scan-tree-dump "VEC_PERM_EXPR" "optimized" } } */
> +/* { dg-final { scan-tree-dump-not "svdupq" "optimized" } } */
> +
> +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, z[0-9]+\.q\[0\]\n} 4 } } */
On Wed, 28 Jun 2023 at 00:05, Richard Sandiford
<richard.sandiford@arm.com> wrote:
>
> Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org> writes:
> > Hi Richard,
> > Sorry I forgot to commit this patch, which you had approved in:
> > https://gcc.gnu.org/pipermail/gcc-patches/2023-April/615308.html
> >
> > Just for context for the following test:
> > svint32_t f_s32(int32x4_t x)
> > {
> > return svdupq_s32 (x[0], x[1], x[2], x[3]);
> > }
> >
> > -O3 -mcpu=generic+sve generates following code after interleave+zip1 patch:
> > f_s32:
> > dup s31, v0.s[1]
> > mov v30.8b, v0.8b
> > ins v31.s[1], v0.s[3]
> > ins v30.s[1], v0.s[2]
> > zip1 v0.4s, v30.4s, v31.4s
> > dup z0.q, z0.q[0]
> > ret
> >
> > Code-gen with attached patch:
> > f_s32:
> > dup z0.q, z0.q[0]
> > ret
> >
> > Bootstrapped+tested on aarch64-linux-gnu.
> > OK to commit ?
> >
> > Thanks,
> > Prathamesh
> >
> > [SVE] Fold svdupq to VEC_PERM_EXPR if elements are not constant.
> >
> > gcc/ChangeLog:
> > * config/aarch64/aarch64-sve-builtins-base.cc
> > (svdupq_impl::fold_nonconst_dupq): New method.
> > (svdupq_impl::fold): Call fold_nonconst_dupq.
> >
> > gcc/testsuite/ChangeLog:
> > * gcc.target/aarch64/sve/acle/general/dupq_11.c: New test.
>
> OK, thanks.
Thanks, pushed to trunk in 231f6b56c77c50f337f2529b3ae51e2083ce461d
Thanks,
Prathamesh
>
> Richard
>
> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > index 95b4cb8a943..9010ecca6da 100644
> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > @@ -817,6 +817,52 @@ public:
> >
> > class svdupq_impl : public quiet<function_base>
> > {
> > +private:
> > + gimple *
> > + fold_nonconst_dupq (gimple_folder &f) const
> > + {
> > + /* Lower lhs = svdupq (arg0, arg1, ..., argN} into:
> > + tmp = {arg0, arg1, ..., arg<N-1>}
> > + lhs = VEC_PERM_EXPR (tmp, tmp, {0, 1, 2, N-1, ...}) */
> > +
> > + if (f.type_suffix (0).bool_p
> > + || BYTES_BIG_ENDIAN)
> > + return NULL;
> > +
> > + tree lhs = gimple_call_lhs (f.call);
> > + tree lhs_type = TREE_TYPE (lhs);
> > + tree elt_type = TREE_TYPE (lhs_type);
> > + scalar_mode elt_mode = SCALAR_TYPE_MODE (elt_type);
> > + machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
> > + tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
> > +
> > + unsigned nargs = gimple_call_num_args (f.call);
> > + vec<constructor_elt, va_gc> *v;
> > + vec_alloc (v, nargs);
> > + for (unsigned i = 0; i < nargs; i++)
> > + CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_call_arg (f.call, i));
> > + tree vec = build_constructor (vq_type, v);
> > + tree tmp = make_ssa_name_fn (cfun, vq_type, 0);
> > + gimple *g = gimple_build_assign (tmp, vec);
> > +
> > + gimple_seq stmts = NULL;
> > + gimple_seq_add_stmt_without_update (&stmts, g);
> > +
> > + poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
> > + vec_perm_builder sel (lhs_len, nargs, 1);
> > + for (unsigned i = 0; i < nargs; i++)
> > + sel.quick_push (i);
> > +
> > + vec_perm_indices indices (sel, 1, nargs);
> > + tree mask_type = build_vector_type (ssizetype, lhs_len);
> > + tree mask = vec_perm_indices_to_tree (mask_type, indices);
> > +
> > + gimple *g2 = gimple_build_assign (lhs, VEC_PERM_EXPR, tmp, tmp, mask);
> > + gimple_seq_add_stmt_without_update (&stmts, g2);
> > + gsi_replace_with_seq (f.gsi, stmts, false);
> > + return g2;
> > + }
> > +
> > public:
> > gimple *
> > fold (gimple_folder &f) const override
> > @@ -832,7 +878,7 @@ public:
> > {
> > tree elt = gimple_call_arg (f.call, i);
> > if (!CONSTANT_CLASS_P (elt))
> > - return NULL;
> > + return fold_nonconst_dupq (f);
> > builder.quick_push (elt);
> > for (unsigned int j = 1; j < factor; ++j)
> > builder.quick_push (build_zero_cst (TREE_TYPE (vec_type)));
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> > new file mode 100644
> > index 00000000000..f19f8deb1e5
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> > @@ -0,0 +1,31 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O3 -fdump-tree-optimized" } */
> > +
> > +#include <arm_sve.h>
> > +#include <arm_neon.h>
> > +
> > +svint8_t f_s8(int8x16_t x)
> > +{
> > + return svdupq_s8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
> > + x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]);
> > +}
> > +
> > +svint16_t f_s16(int16x8_t x)
> > +{
> > + return svdupq_s16 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]);
> > +}
> > +
> > +svint32_t f_s32(int32x4_t x)
> > +{
> > + return svdupq_s32 (x[0], x[1], x[2], x[3]);
> > +}
> > +
> > +svint64_t f_s64(int64x2_t x)
> > +{
> > + return svdupq_s64 (x[0], x[1]);
> > +}
> > +
> > +/* { dg-final { scan-tree-dump "VEC_PERM_EXPR" "optimized" } } */
> > +/* { dg-final { scan-tree-dump-not "svdupq" "optimized" } } */
> > +
> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, z[0-9]+\.q\[0\]\n} 4 } } */
@@ -817,6 +817,52 @@ public:
class svdupq_impl : public quiet<function_base>
{
+private:
+ gimple *
+ fold_nonconst_dupq (gimple_folder &f) const
+ {
+ /* Lower lhs = svdupq (arg0, arg1, ..., argN} into:
+ tmp = {arg0, arg1, ..., arg<N-1>}
+ lhs = VEC_PERM_EXPR (tmp, tmp, {0, 1, 2, N-1, ...}) */
+
+ if (f.type_suffix (0).bool_p
+ || BYTES_BIG_ENDIAN)
+ return NULL;
+
+ tree lhs = gimple_call_lhs (f.call);
+ tree lhs_type = TREE_TYPE (lhs);
+ tree elt_type = TREE_TYPE (lhs_type);
+ scalar_mode elt_mode = SCALAR_TYPE_MODE (elt_type);
+ machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
+ tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
+
+ unsigned nargs = gimple_call_num_args (f.call);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, nargs);
+ for (unsigned i = 0; i < nargs; i++)
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_call_arg (f.call, i));
+ tree vec = build_constructor (vq_type, v);
+ tree tmp = make_ssa_name_fn (cfun, vq_type, 0);
+ gimple *g = gimple_build_assign (tmp, vec);
+
+ gimple_seq stmts = NULL;
+ gimple_seq_add_stmt_without_update (&stmts, g);
+
+ poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
+ vec_perm_builder sel (lhs_len, nargs, 1);
+ for (unsigned i = 0; i < nargs; i++)
+ sel.quick_push (i);
+
+ vec_perm_indices indices (sel, 1, nargs);
+ tree mask_type = build_vector_type (ssizetype, lhs_len);
+ tree mask = vec_perm_indices_to_tree (mask_type, indices);
+
+ gimple *g2 = gimple_build_assign (lhs, VEC_PERM_EXPR, tmp, tmp, mask);
+ gimple_seq_add_stmt_without_update (&stmts, g2);
+ gsi_replace_with_seq (f.gsi, stmts, false);
+ return g2;
+ }
+
public:
gimple *
fold (gimple_folder &f) const override
@@ -832,7 +878,7 @@ public:
{
tree elt = gimple_call_arg (f.call, i);
if (!CONSTANT_CLASS_P (elt))
- return NULL;
+ return fold_nonconst_dupq (f);
builder.quick_push (elt);
for (unsigned int j = 1; j < factor; ++j)
builder.quick_push (build_zero_cst (TREE_TYPE (vec_type)));
new file mode 100644
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -fdump-tree-optimized" } */
+
+#include <arm_sve.h>
+#include <arm_neon.h>
+
+svint8_t f_s8(int8x16_t x)
+{
+ return svdupq_s8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
+ x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]);
+}
+
+svint16_t f_s16(int16x8_t x)
+{
+ return svdupq_s16 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]);
+}
+
+svint32_t f_s32(int32x4_t x)
+{
+ return svdupq_s32 (x[0], x[1], x[2], x[3]);
+}
+
+svint64_t f_s64(int64x2_t x)
+{
+ return svdupq_s64 (x[0], x[1]);
+}
+
+/* { dg-final { scan-tree-dump "VEC_PERM_EXPR" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "svdupq" "optimized" } } */
+
+/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, z[0-9]+\.q\[0\]\n} 4 } } */