[20/21] Arm: Add Advanced SIMD cbranch implementation
Checks
Commit Message
Hi All,
This adds an implementation for conditional branch optab for AArch32.
For e.g.
void f1 ()
{
for (int i = 0; i < N; i++)
{
b[i] += a[i];
if (a[i] > 0)
break;
}
}
For 128-bit vectors we generate:
vcgt.s32 q8, q9, #0
vpmax.u32 d7, d16, d17
vpmax.u32 d7, d7, d7
vmov r3, s14 @ int
cmp r3, #0
and of 64-bit vector we can omit one vpmax as we still need to compress to
32-bits.
Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
Ok for master?
Thanks,
Tamar
gcc/ChangeLog:
* config/arm/neon.md (cbranch<mode>4): New.
gcc/testsuite/ChangeLog:
* lib/target-supports.exp (vect_early_break): Add AArch32.
* gcc.target/arm/vect-early-break-cbranch.c: New test.
--- inline copy of patch --
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc344d2243dcb63 100644
--
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc344d2243dcb63 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
[(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
)
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation. To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "expandable_comparison_operator"
+ [(match_operand:VDQI 1 "register_operand")
+ (match_operand:VDQI 2 "zero_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_NEON"
+{
+ rtx mask = operands[1];
+
+ /* For 128-bit vectors we need an additional reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ mask = gen_reg_rtx (V2SImode);
+ rtx low = gen_reg_rtx (V2SImode);
+ rtx high = gen_reg_rtx (V2SImode);
+ emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+ emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+ emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+ }
+
+ emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+ rtx val = gen_reg_rtx (SImode);
+ emit_move_insn (val, gen_lowpart (SImode, mask));
+ emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+ DONE;
+})
+
;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..2c05aa10d26ed4ac9785672e6e3b4355cef046dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+** ...
+** vcgt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f1 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] > 0)
+ break;
+ }
+}
+
+/*
+** f2:
+** ...
+** vcge.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f2 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] >= 0)
+ break;
+ }
+}
+
+/*
+** f3:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f3 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] == 0)
+ break;
+ }
+}
+
+/*
+** f4:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vmvn q[0-9]+, q[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f4 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] != 0)
+ break;
+ }
+}
+
+/*
+** f5:
+** ...
+** vclt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f5 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] < 0)
+ break;
+ }
+}
+
+/*
+** f6:
+** ...
+** vcle.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f6 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] <= 0)
+ break;
+ }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 5516188dc0aa86d161d67dea5a7769e3c3d72f85..8f58671e6cfd3546c6a98e40341fe31c6492594b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
return [check_cached_effective_target_indexed vect_early_break {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_neon_ok]
}}]
}
# Return 1 if the target supports hardware vectorization of complex additions of
Comments
Hi Tamar,
> -----Original Message-----
> From: Tamar Christina <Tamar.Christina@arm.com>
> Sent: Monday, November 6, 2023 7:43 AM
> To: gcc-patches@gcc.gnu.org
> Cc: nd <nd@arm.com>; Ramana Radhakrishnan
> <Ramana.Radhakrishnan@arm.com>; Richard Earnshaw
> <Richard.Earnshaw@arm.com>; nickc@redhat.com; Kyrylo Tkachov
> <Kyrylo.Tkachov@arm.com>
> Subject: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
>
> Hi All,
>
> This adds an implementation for conditional branch optab for AArch32.
>
> For e.g.
>
> void f1 ()
> {
> for (int i = 0; i < N; i++)
> {
> b[i] += a[i];
> if (a[i] > 0)
> break;
> }
> }
>
> For 128-bit vectors we generate:
>
> vcgt.s32 q8, q9, #0
> vpmax.u32 d7, d16, d17
> vpmax.u32 d7, d7, d7
> vmov r3, s14 @ int
> cmp r3, #0
>
> and of 64-bit vector we can omit one vpmax as we still need to compress to
> 32-bits.
>
> Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
>
> Ok for master?
>
This is okay once the prerequisites go in.
Thanks,
Kyrill
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
> * config/arm/neon.md (cbranch<mode>4): New.
>
> gcc/testsuite/ChangeLog:
>
> * lib/target-supports.exp (vect_early_break): Add AArch32.
> * gcc.target/arm/vect-early-break-cbranch.c: New test.
>
> --- inline copy of patch --
> diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
> index
> d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc
> 344d2243dcb63 100644
> --- a/gcc/config/arm/neon.md
> +++ b/gcc/config/arm/neon.md
> @@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
> [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
> )
>
> +;; Patterns comparing two vectors and conditionally jump.
> +;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
> +;; operation. To not pay the penalty for inverting == we can map our any
> +;; comparisons to all i.e. any(~x) => all(x).
> +;;
> +;; However unlike the AArch64 version, we can't optimize this further as the
> +;; chain is too long for combine due to these being unspecs so it doesn't fold
> +;; the operation to something simpler.
> +(define_expand "cbranch<mode>4"
> + [(set (pc) (if_then_else
> + (match_operator 0 "expandable_comparison_operator"
> + [(match_operand:VDQI 1 "register_operand")
> + (match_operand:VDQI 2 "zero_operand")])
> + (label_ref (match_operand 3 "" ""))
> + (pc)))]
> + "TARGET_NEON"
> +{
> + rtx mask = operands[1];
> +
> + /* For 128-bit vectors we need an additional reductions. */
> + if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
> + {
> + /* Always reduce using a V4SI. */
> + mask = gen_reg_rtx (V2SImode);
> + rtx low = gen_reg_rtx (V2SImode);
> + rtx high = gen_reg_rtx (V2SImode);
> + emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
> + emit_insn (gen_neon_vget_highv4si (high, operands[1]));
> + emit_insn (gen_neon_vpumaxv2si (mask, low, high));
> + }
> +
> + emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
> +
> + rtx val = gen_reg_rtx (SImode);
> + emit_move_insn (val, gen_lowpart (SImode, mask));
> + emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
> + DONE;
> +})
> +
> ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
> ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
> ;; by define_expand in vec-common.md file.
> diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..2c05aa10d26ed4ac9785672e
> 6e3b4355cef046dc
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> @@ -0,0 +1,136 @@
> +/* { dg-do compile } */
> +/* { dg-require-effective-target arm_neon_ok } */
> +/* { dg-require-effective-target arm32 } */
> +/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
> +/* { dg-final { check-function-bodies "**" "" "" } } */
> +
> +#define N 640
> +int a[N] = {0};
> +int b[N] = {0};
> +
> +/* f1:
> +** ...
> +** vcgt.s32 q[0-9]+, q[0-9]+, #0
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f1 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] > 0)
> + break;
> + }
> +}
> +
> +/*
> +** f2:
> +** ...
> +** vcge.s32 q[0-9]+, q[0-9]+, #0
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f2 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] >= 0)
> + break;
> + }
> +}
> +
> +/*
> +** f3:
> +** ...
> +** vceq.i32 q[0-9]+, q[0-9]+, #0
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f3 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] == 0)
> + break;
> + }
> +}
> +
> +/*
> +** f4:
> +** ...
> +** vceq.i32 q[0-9]+, q[0-9]+, #0
> +** vmvn q[0-9]+, q[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f4 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] != 0)
> + break;
> + }
> +}
> +
> +/*
> +** f5:
> +** ...
> +** vclt.s32 q[0-9]+, q[0-9]+, #0
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f5 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] < 0)
> + break;
> + }
> +}
> +
> +/*
> +** f6:
> +** ...
> +** vcle.s32 q[0-9]+, q[0-9]+, #0
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
> +** vmov r[0-9]+, s[0-9]+ @ int
> +** cmp r[0-9]+, #0
> +** bne \.L[0-9]+
> +** ...
> +*/
> +void f6 ()
> +{
> + for (int i = 0; i < N; i++)
> + {
> + b[i] += a[i];
> + if (a[i] <= 0)
> + break;
> + }
> +}
> +
> diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-
> supports.exp
> index
> 5516188dc0aa86d161d67dea5a7769e3c3d72f85..8f58671e6cfd3546c6a98e4034
> 1fe31c6492594b 100644
> --- a/gcc/testsuite/lib/target-supports.exp
> +++ b/gcc/testsuite/lib/target-supports.exp
> @@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
> return [check_cached_effective_target_indexed vect_early_break {
> expr {
> [istarget aarch64*-*-*]
> + || [check_effective_target_arm_neon_ok]
> }}]
> }
> # Return 1 if the target supports hardware vectorization of complex additions of
>
>
>
>
> --
@@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
[(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
)
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation. To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "expandable_comparison_operator"
+ [(match_operand:VDQI 1 "register_operand")
+ (match_operand:VDQI 2 "zero_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_NEON"
+{
+ rtx mask = operands[1];
+
+ /* For 128-bit vectors we need an additional reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ mask = gen_reg_rtx (V2SImode);
+ rtx low = gen_reg_rtx (V2SImode);
+ rtx high = gen_reg_rtx (V2SImode);
+ emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+ emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+ emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+ }
+
+ emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+ rtx val = gen_reg_rtx (SImode);
+ emit_move_insn (val, gen_lowpart (SImode, mask));
+ emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+ DONE;
+})
+
;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
;; by define_expand in vec-common.md file.
new file mode 100644
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+** ...
+** vcgt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f1 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] > 0)
+ break;
+ }
+}
+
+/*
+** f2:
+** ...
+** vcge.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f2 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] >= 0)
+ break;
+ }
+}
+
+/*
+** f3:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f3 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] == 0)
+ break;
+ }
+}
+
+/*
+** f4:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vmvn q[0-9]+, q[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f4 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] != 0)
+ break;
+ }
+}
+
+/*
+** f5:
+** ...
+** vclt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f5 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] < 0)
+ break;
+ }
+}
+
+/*
+** f6:
+** ...
+** vcle.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f6 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] <= 0)
+ break;
+ }
+}
+
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
return [check_cached_effective_target_indexed vect_early_break {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_neon_ok]
}}]
}
# Return 1 if the target supports hardware vectorization of complex additions of