[04/10] arm: Stop vadcq, vsbcq intrinsics from overwriting the FPSCR NZ flags
Checks
Commit Message
From: Stam Markianos-Wright <stam.markianos-wright@arm.com>
Hi all,
We noticed that calls to the vadcq and vsbcq intrinsics, both of
which use __builtin_arm_set_fpscr_nzcvqc to set the Carry flag in
the FPSCR, would produce the following code:
```
< r2 is the *carry input >
vmrs r3, FPSCR_nzcvqc
bic r3, r3, #536870912
orr r3, r3, r2, lsl #29
vmsr FPSCR_nzcvqc, r3
```
when the MVE ACLE instead gives a different instruction sequence of:
```
< Rt is the *carry input >
VMRS Rs,FPSCR_nzcvqc
BFI Rs,Rt,#29,#1
VMSR FPSCR_nzcvqc,Rs
```
the bic + orr pair is slower and it's also wrong, because, if the
*carry input is greater than 1, then we risk overwriting the top two
bits of the FPSCR register (the N and Z flags).
This turned out to be a problem in the header file and the solution was
to simply add a `& 1x0u` to the `*carry` input: then the compiler knows
that we only care about the lowest bit and can optimise to a BFI.
Ok for trunk?
Thanks,
Stam Markianos-Wright
gcc/ChangeLog:
* config/arm/arm_mve.h (__arm_vadcq_s32): Fix arithmetic.
(__arm_vadcq_u32): Likewise.
(__arm_vadcq_m_s32): Likewise.
(__arm_vadcq_m_u32): Likewise.
(__arm_vsbcq_s32): Likewise.
(__arm_vsbcq_u32): Likewise.
(__arm_vsbcq_m_s32): Likewise.
(__arm_vsbcq_m_u32): Likewise.
---
gcc/config/arm/arm_mve.h | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
Comments
Hi Andrea, Stam,
> -----Original Message-----
> From: Andrea Corallo <andrea.corallo@arm.com>
> Sent: Friday, April 28, 2023 12:30 PM
> To: gcc-patches@gcc.gnu.org
> Cc: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>; Richard Earnshaw
> <Richard.Earnshaw@arm.com>; Stam Markianos-Wright <Stam.Markianos-
> Wright@arm.com>
> Subject: [PATCH 04/10] arm: Stop vadcq, vsbcq intrinsics from overwriting the
> FPSCR NZ flags
>
> From: Stam Markianos-Wright <stam.markianos-wright@arm.com>
>
> Hi all,
>
> We noticed that calls to the vadcq and vsbcq intrinsics, both of
> which use __builtin_arm_set_fpscr_nzcvqc to set the Carry flag in
> the FPSCR, would produce the following code:
>
> ```
> < r2 is the *carry input >
> vmrs r3, FPSCR_nzcvqc
> bic r3, r3, #536870912
> orr r3, r3, r2, lsl #29
> vmsr FPSCR_nzcvqc, r3
> ```
>
> when the MVE ACLE instead gives a different instruction sequence of:
> ```
> < Rt is the *carry input >
> VMRS Rs,FPSCR_nzcvqc
> BFI Rs,Rt,#29,#1
> VMSR FPSCR_nzcvqc,Rs
> ```
>
> the bic + orr pair is slower and it's also wrong, because, if the
> *carry input is greater than 1, then we risk overwriting the top two
> bits of the FPSCR register (the N and Z flags).
>
> This turned out to be a problem in the header file and the solution was
> to simply add a `& 1x0u` to the `*carry` input: then the compiler knows
> that we only care about the lowest bit and can optimise to a BFI.
>
> Ok for trunk?
Ok, but I think this needs testsuite coverage for the bug?
Thanks,
Kyrill
>
> Thanks,
> Stam Markianos-Wright
>
> gcc/ChangeLog:
>
> * config/arm/arm_mve.h (__arm_vadcq_s32): Fix arithmetic.
> (__arm_vadcq_u32): Likewise.
> (__arm_vadcq_m_s32): Likewise.
> (__arm_vadcq_m_u32): Likewise.
> (__arm_vsbcq_s32): Likewise.
> (__arm_vsbcq_u32): Likewise.
> (__arm_vsbcq_m_s32): Likewise.
> (__arm_vsbcq_m_u32): Likewise.
> ---
> gcc/config/arm/arm_mve.h | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> index 1262d668121..8778216304b 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -16055,7 +16055,7 @@ __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16065,7 +16065,7 @@ __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16075,7 +16075,7 @@ __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> unsigned * __carry, mve_pred16_t __p)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16085,7 +16085,7 @@ __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
> unsigned * __carry, mve_pred16_t __p)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b,
> __p);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16131,7 +16131,7 @@ __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16141,7 +16141,7 @@ __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16151,7 +16151,7 @@ __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> unsigned * __carry, mve_pred16_t __p)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> @@ -16161,7 +16161,7 @@ __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
> unsigned * __carry, mve_pred16_t __p)
> {
> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | (*__carry << 29));
> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b,
> __p);
> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> return __res;
> --
> 2.25.1
On 28/04/2023 17:45, Kyrylo Tkachov wrote:
> Hi Andrea, Stam,
>
>> -----Original Message-----
>> From: Andrea Corallo <andrea.corallo@arm.com>
>> Sent: Friday, April 28, 2023 12:30 PM
>> To: gcc-patches@gcc.gnu.org
>> Cc: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>; Richard Earnshaw
>> <Richard.Earnshaw@arm.com>; Stam Markianos-Wright <Stam.Markianos-
>> Wright@arm.com>
>> Subject: [PATCH 04/10] arm: Stop vadcq, vsbcq intrinsics from overwriting the
>> FPSCR NZ flags
>>
>> From: Stam Markianos-Wright <stam.markianos-wright@arm.com>
>>
>> Hi all,
>>
>> We noticed that calls to the vadcq and vsbcq intrinsics, both of
>> which use __builtin_arm_set_fpscr_nzcvqc to set the Carry flag in
>> the FPSCR, would produce the following code:
>>
>> ```
>> < r2 is the *carry input >
>> vmrs r3, FPSCR_nzcvqc
>> bic r3, r3, #536870912
>> orr r3, r3, r2, lsl #29
>> vmsr FPSCR_nzcvqc, r3
>> ```
>>
>> when the MVE ACLE instead gives a different instruction sequence of:
>> ```
>> < Rt is the *carry input >
>> VMRS Rs,FPSCR_nzcvqc
>> BFI Rs,Rt,#29,#1
>> VMSR FPSCR_nzcvqc,Rs
>> ```
>>
>> the bic + orr pair is slower and it's also wrong, because, if the
>> *carry input is greater than 1, then we risk overwriting the top two
>> bits of the FPSCR register (the N and Z flags).
>>
>> This turned out to be a problem in the header file and the solution was
>> to simply add a `& 1x0u` to the `*carry` input: then the compiler knows
>> that we only care about the lowest bit and can optimise to a BFI.
>>
>> Ok for trunk?
> Ok, but I think this needs testsuite coverage for the bug?
> Thanks,
> Kyrill
So this can be seen in the new vadcq* , vsbcq* tests:
** ...
** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
** ...
** bfi (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
** ...
** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
** ...
The fact that there's a BFI there rather than the BIC + ORR shows
that this has now been optimised by the compiler and the bug isn't
present in those intrinsics any longer... Sorry, I should have linked
that in better in our patch series!
Added a runtest, also, as it was fairly trivial to write it out :)
Thanks,
Stam
>
>> Thanks,
>> Stam Markianos-Wright
>>
>> gcc/ChangeLog:
>>
>> * config/arm/arm_mve.h (__arm_vadcq_s32): Fix arithmetic.
>> (__arm_vadcq_u32): Likewise.
>> (__arm_vadcq_m_s32): Likewise.
>> (__arm_vadcq_m_u32): Likewise.
>> (__arm_vsbcq_s32): Likewise.
>> (__arm_vsbcq_u32): Likewise.
>> (__arm_vsbcq_m_s32): Likewise.
>> (__arm_vsbcq_m_u32): Likewise.
>> ---
>> gcc/config/arm/arm_mve.h | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
>> index 1262d668121..8778216304b 100644
>> --- a/gcc/config/arm/arm_mve.h
>> +++ b/gcc/config/arm/arm_mve.h
>> @@ -16055,7 +16055,7 @@ __extension__ extern __inline int32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16065,7 +16065,7 @@ __extension__ extern __inline uint32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16075,7 +16075,7 @@ __extension__ extern __inline int32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
>> unsigned * __carry, mve_pred16_t __p)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16085,7 +16085,7 @@ __extension__ extern __inline uint32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
>> unsigned * __carry, mve_pred16_t __p)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b,
>> __p);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16131,7 +16131,7 @@ __extension__ extern __inline int32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16141,7 +16141,7 @@ __extension__ extern __inline uint32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16151,7 +16151,7 @@ __extension__ extern __inline int32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
>> unsigned * __carry, mve_pred16_t __p)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> @@ -16161,7 +16161,7 @@ __extension__ extern __inline uint32x4_t
>> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>> __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
>> unsigned * __carry, mve_pred16_t __p)
>> {
>> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | (*__carry << 29));
>> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
>> ~0x20000000u) | ((*__carry & 0x1u) << 29));
>> uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b,
>> __p);
>> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
>> return __res;
>> --
>> 2.25.1
> -----Original Message-----
> From: Stam Markianos-Wright <Stam.Markianos-Wright@arm.com>
> Sent: Wednesday, May 3, 2023 1:19 PM
> To: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>; Andrea Corallo
> <Andrea.Corallo@arm.com>; gcc-patches@gcc.gnu.org
> Cc: Richard Earnshaw <Richard.Earnshaw@arm.com>
> Subject: Re: [PATCH 04/10] arm: Stop vadcq, vsbcq intrinsics from overwriting
> the FPSCR NZ flags
>
>
> On 28/04/2023 17:45, Kyrylo Tkachov wrote:
> > Hi Andrea, Stam,
> >
> >> -----Original Message-----
> >> From: Andrea Corallo <andrea.corallo@arm.com>
> >> Sent: Friday, April 28, 2023 12:30 PM
> >> To: gcc-patches@gcc.gnu.org
> >> Cc: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>; Richard Earnshaw
> >> <Richard.Earnshaw@arm.com>; Stam Markianos-Wright
> <Stam.Markianos-
> >> Wright@arm.com>
> >> Subject: [PATCH 04/10] arm: Stop vadcq, vsbcq intrinsics from overwriting
> the
> >> FPSCR NZ flags
> >>
> >> From: Stam Markianos-Wright <stam.markianos-wright@arm.com>
> >>
> >> Hi all,
> >>
> >> We noticed that calls to the vadcq and vsbcq intrinsics, both of
> >> which use __builtin_arm_set_fpscr_nzcvqc to set the Carry flag in
> >> the FPSCR, would produce the following code:
> >>
> >> ```
> >> < r2 is the *carry input >
> >> vmrs r3, FPSCR_nzcvqc
> >> bic r3, r3, #536870912
> >> orr r3, r3, r2, lsl #29
> >> vmsr FPSCR_nzcvqc, r3
> >> ```
> >>
> >> when the MVE ACLE instead gives a different instruction sequence of:
> >> ```
> >> < Rt is the *carry input >
> >> VMRS Rs,FPSCR_nzcvqc
> >> BFI Rs,Rt,#29,#1
> >> VMSR FPSCR_nzcvqc,Rs
> >> ```
> >>
> >> the bic + orr pair is slower and it's also wrong, because, if the
> >> *carry input is greater than 1, then we risk overwriting the top two
> >> bits of the FPSCR register (the N and Z flags).
> >>
> >> This turned out to be a problem in the header file and the solution was
> >> to simply add a `& 1x0u` to the `*carry` input: then the compiler knows
> >> that we only care about the lowest bit and can optimise to a BFI.
> >>
> >> Ok for trunk?
> > Ok, but I think this needs testsuite coverage for the bug?
> > Thanks,
> > Kyrill
>
> So this can be seen in the new vadcq* , vsbcq* tests:
>
> ** ...
> ** vmrs (?:ip|fp|r[0-9]+), FPSCR_nzcvqc(?: @.*|)
> ** ...
> ** bfi (?:ip|fp|r[0-9]+), (?:ip|fp|r[0-9]+), #29, #1(?: @.*|)
> ** ...
> ** vmsr FPSCR_nzcvqc, (?:ip|fp|r[0-9]+)(?: @.*|)
> ** ...
>
> The fact that there's a BFI there rather than the BIC + ORR shows
> that this has now been optimised by the compiler and the bug isn't
> present in those intrinsics any longer... Sorry, I should have linked
> that in better in our patch series!
>
> Added a runtest, also, as it was fairly trivial to write it out :)
Ok.
Thanks,
Kyrill
>
> Thanks,
> Stam
>
> >
> >> Thanks,
> >> Stam Markianos-Wright
> >>
> >> gcc/ChangeLog:
> >>
> >> * config/arm/arm_mve.h (__arm_vadcq_s32): Fix arithmetic.
> >> (__arm_vadcq_u32): Likewise.
> >> (__arm_vadcq_m_s32): Likewise.
> >> (__arm_vadcq_m_u32): Likewise.
> >> (__arm_vsbcq_s32): Likewise.
> >> (__arm_vsbcq_u32): Likewise.
> >> (__arm_vsbcq_m_s32): Likewise.
> >> (__arm_vsbcq_m_u32): Likewise.
> >> ---
> >> gcc/config/arm/arm_mve.h | 16 ++++++++--------
> >> 1 file changed, 8 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> >> index 1262d668121..8778216304b 100644
> >> --- a/gcc/config/arm/arm_mve.h
> >> +++ b/gcc/config/arm/arm_mve.h
> >> @@ -16055,7 +16055,7 @@ __extension__ extern __inline int32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16065,7 +16065,7 @@ __extension__ extern __inline uint32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16075,7 +16075,7 @@ __extension__ extern __inline int32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> >> unsigned * __carry, mve_pred16_t __p)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b,
> __p);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16085,7 +16085,7 @@ __extension__ extern __inline uint32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t
> __b,
> >> unsigned * __carry, mve_pred16_t __p)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b,
> >> __p);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16131,7 +16131,7 @@ __extension__ extern __inline int32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16141,7 +16141,7 @@ __extension__ extern __inline uint32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16151,7 +16151,7 @@ __extension__ extern __inline int32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> >> unsigned * __carry, mve_pred16_t __p)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b,
> __p);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> @@ -16161,7 +16161,7 @@ __extension__ extern __inline uint32x4_t
> >> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> >> __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t
> __b,
> >> unsigned * __carry, mve_pred16_t __p)
> >> {
> >> - __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | (*__carry << 29));
> >> + __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () &
> >> ~0x20000000u) | ((*__carry & 0x1u) << 29));
> >> uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b,
> >> __p);
> >> *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> >> return __res;
> >> --
> >> 2.25.1
@@ -16055,7 +16055,7 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16065,7 +16065,7 @@ __extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16075,7 +16075,7 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16085,7 +16085,7 @@ __extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16131,7 +16131,7 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16141,7 +16141,7 @@ __extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16151,7 +16151,7 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;
@@ -16161,7 +16161,7 @@ __extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
{
- __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p);
*__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
return __res;