[03/23] arm: [MVE intrinsics] rework vrshlq vqrshlq
Checks
Commit Message
Implement vrshlq, vqrshlq using the new MVE builtins framework.
2022-09-08 Christophe Lyon <christophe.lyon@arm.com>
gcc/
* config/arm/arm-mve-builtins-base.cc (vqrshlq, vrshlq): New.
* config/arm/arm-mve-builtins-base.def (vqrshlq, vrshlq): New.
* config/arm/arm-mve-builtins-base.h (vqrshlq, vrshlq): New.
* config/arm/arm-mve-builtins.cc (has_inactive_argument): Handle
vqrshlq, vrshlq.
* config/arm/arm_mve.h (vrshlq): Remove.
(vrshlq_m_n): Remove.
(vrshlq_m): Remove.
(vrshlq_x): Remove.
(vrshlq_u8): Remove.
(vrshlq_n_u8): Remove.
(vrshlq_s8): Remove.
(vrshlq_n_s8): Remove.
(vrshlq_u16): Remove.
(vrshlq_n_u16): Remove.
(vrshlq_s16): Remove.
(vrshlq_n_s16): Remove.
(vrshlq_u32): Remove.
(vrshlq_n_u32): Remove.
(vrshlq_s32): Remove.
(vrshlq_n_s32): Remove.
(vrshlq_m_n_u8): Remove.
(vrshlq_m_n_s8): Remove.
(vrshlq_m_n_u16): Remove.
(vrshlq_m_n_s16): Remove.
(vrshlq_m_n_u32): Remove.
(vrshlq_m_n_s32): Remove.
(vrshlq_m_s8): Remove.
(vrshlq_m_s32): Remove.
(vrshlq_m_s16): Remove.
(vrshlq_m_u8): Remove.
(vrshlq_m_u32): Remove.
(vrshlq_m_u16): Remove.
(vrshlq_x_s8): Remove.
(vrshlq_x_s16): Remove.
(vrshlq_x_s32): Remove.
(vrshlq_x_u8): Remove.
(vrshlq_x_u16): Remove.
(vrshlq_x_u32): Remove.
(__arm_vrshlq_u8): Remove.
(__arm_vrshlq_n_u8): Remove.
(__arm_vrshlq_s8): Remove.
(__arm_vrshlq_n_s8): Remove.
(__arm_vrshlq_u16): Remove.
(__arm_vrshlq_n_u16): Remove.
(__arm_vrshlq_s16): Remove.
(__arm_vrshlq_n_s16): Remove.
(__arm_vrshlq_u32): Remove.
(__arm_vrshlq_n_u32): Remove.
(__arm_vrshlq_s32): Remove.
(__arm_vrshlq_n_s32): Remove.
(__arm_vrshlq_m_n_u8): Remove.
(__arm_vrshlq_m_n_s8): Remove.
(__arm_vrshlq_m_n_u16): Remove.
(__arm_vrshlq_m_n_s16): Remove.
(__arm_vrshlq_m_n_u32): Remove.
(__arm_vrshlq_m_n_s32): Remove.
(__arm_vrshlq_m_s8): Remove.
(__arm_vrshlq_m_s32): Remove.
(__arm_vrshlq_m_s16): Remove.
(__arm_vrshlq_m_u8): Remove.
(__arm_vrshlq_m_u32): Remove.
(__arm_vrshlq_m_u16): Remove.
(__arm_vrshlq_x_s8): Remove.
(__arm_vrshlq_x_s16): Remove.
(__arm_vrshlq_x_s32): Remove.
(__arm_vrshlq_x_u8): Remove.
(__arm_vrshlq_x_u16): Remove.
(__arm_vrshlq_x_u32): Remove.
(__arm_vrshlq): Remove.
(__arm_vrshlq_m_n): Remove.
(__arm_vrshlq_m): Remove.
(__arm_vrshlq_x): Remove.
(vqrshlq): Remove.
(vqrshlq_m_n): Remove.
(vqrshlq_m): Remove.
(vqrshlq_u8): Remove.
(vqrshlq_n_u8): Remove.
(vqrshlq_s8): Remove.
(vqrshlq_n_s8): Remove.
(vqrshlq_u16): Remove.
(vqrshlq_n_u16): Remove.
(vqrshlq_s16): Remove.
(vqrshlq_n_s16): Remove.
(vqrshlq_u32): Remove.
(vqrshlq_n_u32): Remove.
(vqrshlq_s32): Remove.
(vqrshlq_n_s32): Remove.
(vqrshlq_m_n_u8): Remove.
(vqrshlq_m_n_s8): Remove.
(vqrshlq_m_n_u16): Remove.
(vqrshlq_m_n_s16): Remove.
(vqrshlq_m_n_u32): Remove.
(vqrshlq_m_n_s32): Remove.
(vqrshlq_m_s8): Remove.
(vqrshlq_m_s32): Remove.
(vqrshlq_m_s16): Remove.
(vqrshlq_m_u8): Remove.
(vqrshlq_m_u32): Remove.
(vqrshlq_m_u16): Remove.
(__arm_vqrshlq_u8): Remove.
(__arm_vqrshlq_n_u8): Remove.
(__arm_vqrshlq_s8): Remove.
(__arm_vqrshlq_n_s8): Remove.
(__arm_vqrshlq_u16): Remove.
(__arm_vqrshlq_n_u16): Remove.
(__arm_vqrshlq_s16): Remove.
(__arm_vqrshlq_n_s16): Remove.
(__arm_vqrshlq_u32): Remove.
(__arm_vqrshlq_n_u32): Remove.
(__arm_vqrshlq_s32): Remove.
(__arm_vqrshlq_n_s32): Remove.
(__arm_vqrshlq_m_n_u8): Remove.
(__arm_vqrshlq_m_n_s8): Remove.
(__arm_vqrshlq_m_n_u16): Remove.
(__arm_vqrshlq_m_n_s16): Remove.
(__arm_vqrshlq_m_n_u32): Remove.
(__arm_vqrshlq_m_n_s32): Remove.
(__arm_vqrshlq_m_s8): Remove.
(__arm_vqrshlq_m_s32): Remove.
(__arm_vqrshlq_m_s16): Remove.
(__arm_vqrshlq_m_u8): Remove.
(__arm_vqrshlq_m_u32): Remove.
(__arm_vqrshlq_m_u16): Remove.
(__arm_vqrshlq): Remove.
(__arm_vqrshlq_m_n): Remove.
(__arm_vqrshlq_m): Remove.
---
gcc/config/arm/arm-mve-builtins-base.cc | 2 +
gcc/config/arm/arm-mve-builtins-base.def | 2 +
gcc/config/arm/arm-mve-builtins-base.h | 2 +
gcc/config/arm/arm-mve-builtins.cc | 4 +-
gcc/config/arm/arm_mve.h | 969 +----------------------
5 files changed, 18 insertions(+), 961 deletions(-)
Comments
> -----Original Message-----
> From: Christophe Lyon <christophe.lyon@arm.com>
> Sent: Friday, May 5, 2023 9:39 AM
> To: gcc-patches@gcc.gnu.org; Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>;
> Richard Earnshaw <Richard.Earnshaw@arm.com>; Richard Sandiford
> <Richard.Sandiford@arm.com>
> Cc: Christophe Lyon <Christophe.Lyon@arm.com>
> Subject: [PATCH 03/23] arm: [MVE intrinsics] rework vrshlq vqrshlq
>
> Implement vrshlq, vqrshlq using the new MVE builtins framework.
Ok.
Thanks,
Kyrill
>
> 2022-09-08 Christophe Lyon <christophe.lyon@arm.com>
>
> gcc/
> * config/arm/arm-mve-builtins-base.cc (vqrshlq, vrshlq): New.
> * config/arm/arm-mve-builtins-base.def (vqrshlq, vrshlq): New.
> * config/arm/arm-mve-builtins-base.h (vqrshlq, vrshlq): New.
> * config/arm/arm-mve-builtins.cc (has_inactive_argument): Handle
> vqrshlq, vrshlq.
> * config/arm/arm_mve.h (vrshlq): Remove.
> (vrshlq_m_n): Remove.
> (vrshlq_m): Remove.
> (vrshlq_x): Remove.
> (vrshlq_u8): Remove.
> (vrshlq_n_u8): Remove.
> (vrshlq_s8): Remove.
> (vrshlq_n_s8): Remove.
> (vrshlq_u16): Remove.
> (vrshlq_n_u16): Remove.
> (vrshlq_s16): Remove.
> (vrshlq_n_s16): Remove.
> (vrshlq_u32): Remove.
> (vrshlq_n_u32): Remove.
> (vrshlq_s32): Remove.
> (vrshlq_n_s32): Remove.
> (vrshlq_m_n_u8): Remove.
> (vrshlq_m_n_s8): Remove.
> (vrshlq_m_n_u16): Remove.
> (vrshlq_m_n_s16): Remove.
> (vrshlq_m_n_u32): Remove.
> (vrshlq_m_n_s32): Remove.
> (vrshlq_m_s8): Remove.
> (vrshlq_m_s32): Remove.
> (vrshlq_m_s16): Remove.
> (vrshlq_m_u8): Remove.
> (vrshlq_m_u32): Remove.
> (vrshlq_m_u16): Remove.
> (vrshlq_x_s8): Remove.
> (vrshlq_x_s16): Remove.
> (vrshlq_x_s32): Remove.
> (vrshlq_x_u8): Remove.
> (vrshlq_x_u16): Remove.
> (vrshlq_x_u32): Remove.
> (__arm_vrshlq_u8): Remove.
> (__arm_vrshlq_n_u8): Remove.
> (__arm_vrshlq_s8): Remove.
> (__arm_vrshlq_n_s8): Remove.
> (__arm_vrshlq_u16): Remove.
> (__arm_vrshlq_n_u16): Remove.
> (__arm_vrshlq_s16): Remove.
> (__arm_vrshlq_n_s16): Remove.
> (__arm_vrshlq_u32): Remove.
> (__arm_vrshlq_n_u32): Remove.
> (__arm_vrshlq_s32): Remove.
> (__arm_vrshlq_n_s32): Remove.
> (__arm_vrshlq_m_n_u8): Remove.
> (__arm_vrshlq_m_n_s8): Remove.
> (__arm_vrshlq_m_n_u16): Remove.
> (__arm_vrshlq_m_n_s16): Remove.
> (__arm_vrshlq_m_n_u32): Remove.
> (__arm_vrshlq_m_n_s32): Remove.
> (__arm_vrshlq_m_s8): Remove.
> (__arm_vrshlq_m_s32): Remove.
> (__arm_vrshlq_m_s16): Remove.
> (__arm_vrshlq_m_u8): Remove.
> (__arm_vrshlq_m_u32): Remove.
> (__arm_vrshlq_m_u16): Remove.
> (__arm_vrshlq_x_s8): Remove.
> (__arm_vrshlq_x_s16): Remove.
> (__arm_vrshlq_x_s32): Remove.
> (__arm_vrshlq_x_u8): Remove.
> (__arm_vrshlq_x_u16): Remove.
> (__arm_vrshlq_x_u32): Remove.
> (__arm_vrshlq): Remove.
> (__arm_vrshlq_m_n): Remove.
> (__arm_vrshlq_m): Remove.
> (__arm_vrshlq_x): Remove.
> (vqrshlq): Remove.
> (vqrshlq_m_n): Remove.
> (vqrshlq_m): Remove.
> (vqrshlq_u8): Remove.
> (vqrshlq_n_u8): Remove.
> (vqrshlq_s8): Remove.
> (vqrshlq_n_s8): Remove.
> (vqrshlq_u16): Remove.
> (vqrshlq_n_u16): Remove.
> (vqrshlq_s16): Remove.
> (vqrshlq_n_s16): Remove.
> (vqrshlq_u32): Remove.
> (vqrshlq_n_u32): Remove.
> (vqrshlq_s32): Remove.
> (vqrshlq_n_s32): Remove.
> (vqrshlq_m_n_u8): Remove.
> (vqrshlq_m_n_s8): Remove.
> (vqrshlq_m_n_u16): Remove.
> (vqrshlq_m_n_s16): Remove.
> (vqrshlq_m_n_u32): Remove.
> (vqrshlq_m_n_s32): Remove.
> (vqrshlq_m_s8): Remove.
> (vqrshlq_m_s32): Remove.
> (vqrshlq_m_s16): Remove.
> (vqrshlq_m_u8): Remove.
> (vqrshlq_m_u32): Remove.
> (vqrshlq_m_u16): Remove.
> (__arm_vqrshlq_u8): Remove.
> (__arm_vqrshlq_n_u8): Remove.
> (__arm_vqrshlq_s8): Remove.
> (__arm_vqrshlq_n_s8): Remove.
> (__arm_vqrshlq_u16): Remove.
> (__arm_vqrshlq_n_u16): Remove.
> (__arm_vqrshlq_s16): Remove.
> (__arm_vqrshlq_n_s16): Remove.
> (__arm_vqrshlq_u32): Remove.
> (__arm_vqrshlq_n_u32): Remove.
> (__arm_vqrshlq_s32): Remove.
> (__arm_vqrshlq_n_s32): Remove.
> (__arm_vqrshlq_m_n_u8): Remove.
> (__arm_vqrshlq_m_n_s8): Remove.
> (__arm_vqrshlq_m_n_u16): Remove.
> (__arm_vqrshlq_m_n_s16): Remove.
> (__arm_vqrshlq_m_n_u32): Remove.
> (__arm_vqrshlq_m_n_s32): Remove.
> (__arm_vqrshlq_m_s8): Remove.
> (__arm_vqrshlq_m_s32): Remove.
> (__arm_vqrshlq_m_s16): Remove.
> (__arm_vqrshlq_m_u8): Remove.
> (__arm_vqrshlq_m_u32): Remove.
> (__arm_vqrshlq_m_u16): Remove.
> (__arm_vqrshlq): Remove.
> (__arm_vqrshlq_m_n): Remove.
> (__arm_vqrshlq_m): Remove.
> ---
> gcc/config/arm/arm-mve-builtins-base.cc | 2 +
> gcc/config/arm/arm-mve-builtins-base.def | 2 +
> gcc/config/arm/arm-mve-builtins-base.h | 2 +
> gcc/config/arm/arm-mve-builtins.cc | 4 +-
> gcc/config/arm/arm_mve.h | 969 +----------------------
> 5 files changed, 18 insertions(+), 961 deletions(-)
>
> diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-
> mve-builtins-base.cc
> index de0cdb4229b..f5e48519b19 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.cc
> +++ b/gcc/config/arm/arm-mve-builtins-base.cc
> @@ -157,10 +157,12 @@ FUNCTION_WITH_RTX_M_N (vmulq, MULT,
> VMULQ)
> FUNCTION_WITH_RTX_M_N_NO_N_F (vorrq, IOR, VORRQ)
> FUNCTION_WITH_M_N_NO_F (vqaddq, VQADDQ)
> FUNCTION_WITH_M_N_NO_U_F (vqdmulhq, VQDMULHQ)
> +FUNCTION_WITH_M_N_NO_F (vqrshlq, VQRSHLQ)
> FUNCTION_WITH_M_N_NO_F (vqsubq, VQSUBQ)
> FUNCTION (vreinterpretq, vreinterpretq_impl,)
> FUNCTION_WITHOUT_N_NO_F (vrhaddq, VRHADDQ)
> FUNCTION_WITHOUT_N_NO_F (vrmulhq, VRMULHQ)
> +FUNCTION_WITH_M_N_NO_F (vrshlq, VRSHLQ)
> FUNCTION_WITH_RTX_M_N (vsubq, MINUS, VSUBQ)
> FUNCTION (vuninitializedq, vuninitializedq_impl,)
>
> diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-
> mve-builtins-base.def
> index d256f3ebb2d..e6dc2b00aaa 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.def
> +++ b/gcc/config/arm/arm-mve-builtins-base.def
> @@ -29,10 +29,12 @@ DEF_MVE_FUNCTION (vmulq, binary_opt_n,
> all_integer, mx_or_none)
> DEF_MVE_FUNCTION (vorrq, binary_orrq, all_integer, mx_or_none)
> DEF_MVE_FUNCTION (vqaddq, binary_opt_n, all_integer, m_or_none)
> DEF_MVE_FUNCTION (vqdmulhq, binary_opt_n, all_signed, m_or_none)
> +DEF_MVE_FUNCTION (vqrshlq, binary_round_lshift, all_integer, m_or_none)
> DEF_MVE_FUNCTION (vqsubq, binary_opt_n, all_integer, m_or_none)
> DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_integer,
> none)
> DEF_MVE_FUNCTION (vrhaddq, binary, all_integer, mx_or_none)
> DEF_MVE_FUNCTION (vrmulhq, binary, all_integer, mx_or_none)
> +DEF_MVE_FUNCTION (vrshlq, binary_round_lshift, all_integer, mx_or_none)
> DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_integer, mx_or_none)
> DEF_MVE_FUNCTION (vuninitializedq, inherent, all_integer_with_64, none)
> #undef REQUIRES_FLOAT
> diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-
> mve-builtins-base.h
> index d64cb5e1dec..31ba3fece82 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.h
> +++ b/gcc/config/arm/arm-mve-builtins-base.h
> @@ -34,10 +34,12 @@ extern const function_base *const vmulq;
> extern const function_base *const vorrq;
> extern const function_base *const vqaddq;
> extern const function_base *const vqdmulhq;
> +extern const function_base *const vqrshlq;
> extern const function_base *const vqsubq;
> extern const function_base *const vreinterpretq;
> extern const function_base *const vrhaddq;
> extern const function_base *const vrmulhq;
> +extern const function_base *const vrshlq;
> extern const function_base *const vsubq;
> extern const function_base *const vuninitializedq;
>
> diff --git a/gcc/config/arm/arm-mve-builtins.cc b/gcc/config/arm/arm-mve-
> builtins.cc
> index 0708d4fa94a..91b3ae71f94 100644
> --- a/gcc/config/arm/arm-mve-builtins.cc
> +++ b/gcc/config/arm/arm-mve-builtins.cc
> @@ -669,7 +669,9 @@ function_instance::has_inactive_argument () const
> if (pred != PRED_m)
> return false;
>
> - if (base == functions::vorrq && mode_suffix_id == MODE_n)
> + if ((base == functions::vorrq && mode_suffix_id == MODE_n)
> + || (base == functions::vqrshlq && mode_suffix_id == MODE_n)
> + || (base == functions::vrshlq && mode_suffix_id == MODE_n))
> return false;
>
> return true;
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> index 9c5d14794a1..636945d6ef0 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -79,10 +79,8 @@
> #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
> #define vabdq(__a, __b) __arm_vabdq(__a, __b)
> #define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
> -#define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
> #define vqshlq(__a, __b) __arm_vqshlq(__a, __b)
> #define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b)
> -#define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b)
> #define vminavq(__a, __b) __arm_vminavq(__a, __b)
> #define vminaq(__a, __b) __arm_vminaq(__a, __b)
> #define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b)
> @@ -153,9 +151,7 @@
> #define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm)
> #define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm)
> #define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p)
> -#define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p)
> #define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p)
> -#define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p)
> #define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p)
> #define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p)
> #define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p)
> @@ -254,10 +250,8 @@
> #define vqrdmlsdhq_m(__inactive, __a, __b, __p)
> __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p)
> #define vqrdmlsdhxq_m(__inactive, __a, __b, __p)
> __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p)
> #define vqrdmulhq_m(__inactive, __a, __b, __p)
> __arm_vqrdmulhq_m(__inactive, __a, __b, __p)
> -#define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive,
> __a, __b, __p)
> #define vqshlq_m_n(__inactive, __a, __imm, __p)
> __arm_vqshlq_m_n(__inactive, __a, __imm, __p)
> #define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive,
> __a, __b, __p)
> -#define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive,
> __a, __b, __p)
> #define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive,
> __a, __imm, __p)
> #define vshlq_m_n(__inactive, __a, __imm, __p)
> __arm_vshlq_m_n(__inactive, __a, __imm, __p)
> #define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive,
> __a, __imm, __p)
> @@ -385,7 +379,6 @@
> #define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p)
> #define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p)
> #define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p)
> -#define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p)
> #define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p)
> #define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p)
> #define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p)
> @@ -663,12 +656,8 @@
> #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
> #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
> #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
> -#define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
> -#define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
> #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
> #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
> -#define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
> -#define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
> #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
> #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
> #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
> @@ -691,12 +680,8 @@
> #define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm)
> #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
> #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
> -#define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
> -#define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
> #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
> #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
> -#define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
> -#define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
> #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
> #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
> #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
> @@ -743,12 +728,8 @@
> #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
> #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
> #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
> -#define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
> -#define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
> #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
> #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
> -#define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
> -#define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
> #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
> #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
> #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
> @@ -771,12 +752,8 @@
> #define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm)
> #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
> #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
> -#define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
> -#define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
> #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
> #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
> -#define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
> -#define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
> #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
> #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
> #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
> @@ -823,12 +800,8 @@
> #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
> #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
> #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
> -#define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
> -#define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
> #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
> #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
> -#define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
> -#define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
> #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
> #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
> #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
> @@ -851,12 +824,8 @@
> #define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm)
> #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
> #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
> -#define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
> -#define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
> #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
> #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
> -#define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
> -#define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
> #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
> #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
> #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
> @@ -1064,9 +1033,7 @@
> #define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm)
> #define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm)
> #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p)
> -#define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b,
> __p)
> #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b,
> __p)
> -#define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b,
> __p)
> #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p)
> #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p)
> #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p)
> @@ -1084,10 +1051,8 @@
> #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b,
> __p)
> #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a,
> __b, __p)
> #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p)
> -#define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p)
> #define vrev64q_m_s8(__inactive, __a, __p)
> __arm_vrev64q_m_s8(__inactive, __a, __p)
> #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p)
> -#define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b,
> __p)
> #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive,
> __a, __p)
> #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive,
> __a, __p)
> #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive,
> __a, __p)
> @@ -1147,9 +1112,7 @@
> #define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b,
> __imm)
> #define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b,
> __imm)
> #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b,
> __p)
> -#define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b,
> __p)
> #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b,
> __p)
> -#define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a,
> __b, __p)
> #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b,
> __p)
> #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p)
> #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b,
> __p)
> @@ -1167,10 +1130,8 @@
> #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b,
> __p)
> #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a,
> __b, __p)
> #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p)
> -#define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b,
> __p)
> #define vrev64q_m_s16(__inactive, __a, __p)
> __arm_vrev64q_m_s16(__inactive, __a, __p)
> #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b,
> __p)
> -#define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b,
> __p)
> #define vqnegq_m_s16(__inactive, __a, __p)
> __arm_vqnegq_m_s16(__inactive, __a, __p)
> #define vqabsq_m_s16(__inactive, __a, __p)
> __arm_vqabsq_m_s16(__inactive, __a, __p)
> #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive,
> __a, __p)
> @@ -1230,9 +1191,7 @@
> #define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b,
> __imm)
> #define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b,
> __imm)
> #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b,
> __p)
> -#define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b,
> __p)
> #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b,
> __p)
> -#define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a,
> __b, __p)
> #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b,
> __p)
> #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p)
> #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b,
> __p)
> @@ -1250,10 +1209,8 @@
> #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b,
> __p)
> #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a,
> __b, __p)
> #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p)
> -#define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b,
> __p)
> #define vrev64q_m_s32(__inactive, __a, __p)
> __arm_vrev64q_m_s32(__inactive, __a, __p)
> #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b,
> __p)
> -#define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b,
> __p)
> #define vqnegq_m_s32(__inactive, __a, __p)
> __arm_vqnegq_m_s32(__inactive, __a, __p)
> #define vqabsq_m_s32(__inactive, __a, __p)
> __arm_vqabsq_m_s32(__inactive, __a, __p)
> #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive,
> __a, __p)
> @@ -1646,12 +1603,6 @@
> #define vqrdmulhq_m_s8(__inactive, __a, __b, __p)
> __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p)
> #define vqrdmulhq_m_s32(__inactive, __a, __b, __p)
> __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p)
> #define vqrdmulhq_m_s16(__inactive, __a, __b, __p)
> __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p)
> -#define vqrshlq_m_s8(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_s8(__inactive, __a, __b, __p)
> -#define vqrshlq_m_s32(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_s32(__inactive, __a, __b, __p)
> -#define vqrshlq_m_s16(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_s16(__inactive, __a, __b, __p)
> -#define vqrshlq_m_u8(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_u8(__inactive, __a, __b, __p)
> -#define vqrshlq_m_u32(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_u32(__inactive, __a, __b, __p)
> -#define vqrshlq_m_u16(__inactive, __a, __b, __p)
> __arm_vqrshlq_m_u16(__inactive, __a, __b, __p)
> #define vqshlq_m_n_s8(__inactive, __a, __imm, __p)
> __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p)
> #define vqshlq_m_n_s32(__inactive, __a, __imm, __p)
> __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p)
> #define vqshlq_m_n_s16(__inactive, __a, __imm, __p)
> __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p)
> @@ -1664,12 +1615,6 @@
> #define vqshlq_m_u8(__inactive, __a, __b, __p)
> __arm_vqshlq_m_u8(__inactive, __a, __b, __p)
> #define vqshlq_m_u32(__inactive, __a, __b, __p)
> __arm_vqshlq_m_u32(__inactive, __a, __b, __p)
> #define vqshlq_m_u16(__inactive, __a, __b, __p)
> __arm_vqshlq_m_u16(__inactive, __a, __b, __p)
> -#define vrshlq_m_s8(__inactive, __a, __b, __p)
> __arm_vrshlq_m_s8(__inactive, __a, __b, __p)
> -#define vrshlq_m_s32(__inactive, __a, __b, __p)
> __arm_vrshlq_m_s32(__inactive, __a, __b, __p)
> -#define vrshlq_m_s16(__inactive, __a, __b, __p)
> __arm_vrshlq_m_s16(__inactive, __a, __b, __p)
> -#define vrshlq_m_u8(__inactive, __a, __b, __p)
> __arm_vrshlq_m_u8(__inactive, __a, __b, __p)
> -#define vrshlq_m_u32(__inactive, __a, __b, __p)
> __arm_vrshlq_m_u32(__inactive, __a, __b, __p)
> -#define vrshlq_m_u16(__inactive, __a, __b, __p)
> __arm_vrshlq_m_u16(__inactive, __a, __b, __p)
> #define vrshrq_m_n_s8(__inactive, __a, __imm, __p)
> __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p)
> #define vrshrq_m_n_s32(__inactive, __a, __imm, __p)
> __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p)
> #define vrshrq_m_n_s16(__inactive, __a, __imm, __p)
> __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p)
> @@ -2232,12 +2177,6 @@
> #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
> #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
> #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
> -#define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
> -#define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
> -#define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
> -#define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
> -#define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
> -#define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
> #define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a,
> __imm, __p)
> #define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a,
> __imm, __p)
> #define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a,
> __imm, __p)
> @@ -3300,20 +3239,6 @@ __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_uv16qi (__a, __b);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
> -{
> - return __builtin_mve_vrshlq_uv16qi (__a, __b);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
> -}
> -
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
> @@ -3328,20 +3253,6 @@ __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
> return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
> -{
> - return __builtin_mve_vqrshlq_uv16qi (__a, __b);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
> -}
> -
> __extension__ extern __inline uint8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
> @@ -3496,20 +3407,6 @@ __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_sv16qi (__a, __b);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
> -{
> - return __builtin_mve_vrshlq_sv16qi (__a, __b);
> -}
> -
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
> @@ -3524,20 +3421,6 @@ __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
> return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
> -{
> - return __builtin_mve_vqrshlq_sv16qi (__a, __b);
> -}
> -
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
> @@ -3862,20 +3745,6 @@ __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_uv8hi (__a, __b);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
> -{
> - return __builtin_mve_vrshlq_uv8hi (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
> -}
> -
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
> @@ -3890,20 +3759,6 @@ __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t
> __b)
> return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
> -{
> - return __builtin_mve_vqrshlq_uv8hi (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
> -}
> -
> __extension__ extern __inline uint16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
> @@ -4058,20 +3913,6 @@ __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_sv8hi (__a, __b);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
> -{
> - return __builtin_mve_vrshlq_sv8hi (__a, __b);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
> @@ -4086,20 +3927,6 @@ __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
> return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
> -{
> - return __builtin_mve_vqrshlq_sv8hi (__a, __b);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
> @@ -4424,20 +4251,6 @@ __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_uv4si (__a, __b);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
> -{
> - return __builtin_mve_vrshlq_uv4si (__a, __b);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_uv4si (__a, __b);
> -}
> -
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
> @@ -4452,20 +4265,6 @@ __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t
> __b)
> return __builtin_mve_vqshlq_r_uv4si (__a, __b);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
> -{
> - return __builtin_mve_vqrshlq_uv4si (__a, __b);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
> -}
> -
> __extension__ extern __inline uint32_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
> @@ -4620,20 +4419,6 @@ __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
> return __builtin_mve_vshlq_r_sv4si (__a, __b);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
> -{
> - return __builtin_mve_vrshlq_sv4si (__a, __b);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
> -{
> - return __builtin_mve_vrshlq_n_sv4si (__a, __b);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
> @@ -4648,20 +4433,6 @@ __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
> return __builtin_mve_vqshlq_r_sv4si (__a, __b);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
> -{
> - return __builtin_mve_vqrshlq_sv4si (__a, __b);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
> -{
> - return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
> @@ -5633,13 +5404,6 @@ __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> @@ -5647,13 +5411,6 @@ __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
> @@ -5773,13 +5530,6 @@ __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b,
> mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t
> __p)
> @@ -5794,13 +5544,6 @@ __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t
> __p)
> @@ -6215,13 +5958,6 @@ __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> @@ -6229,13 +5965,6 @@ __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
> @@ -6355,13 +6084,6 @@ __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t
> __p)
> @@ -6376,13 +6098,6 @@ __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t
> __p)
> @@ -6796,13 +6511,6 @@ __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> @@ -6810,13 +6518,6 @@ __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint32_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
> @@ -6936,13 +6637,6 @@ __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t
> __p)
> @@ -6957,13 +6651,6 @@ __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t
> __b, mve_pred16_t __p)
> return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t
> __p)
> @@ -9029,48 +8716,6 @@ __arm_vqrdmulhq_m_s16 (int16x8_t __inactive,
> int16x8_t __a, int16x8_t __b, mve_p
> return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm,
> mve_pred16_t __p)
> @@ -9155,48 +8800,6 @@ __arm_vqshlq_m_u16 (uint16x8_t __inactive,
> uint16x8_t __a, int16x8_t __b, mve_pr
> return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm,
> mve_pred16_t __p)
> @@ -12648,48 +12251,6 @@ __arm_vrev64q_x_u32 (uint32x4_t __a,
> mve_pred16_t __p)
> return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a,
> __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a,
> __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
> @@ -17203,20 +16764,6 @@ __arm_vshlq_r (uint8x16_t __a, int32_t __b)
> return __arm_vshlq_r_u8 (__a, __b);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint8x16_t __a, int8x16_t __b)
> -{
> - return __arm_vrshlq_u8 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint8x16_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_u8 (__a, __b);
> -}
> -
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq (uint8x16_t __a, int8x16_t __b)
> @@ -17231,20 +16778,6 @@ __arm_vqshlq_r (uint8x16_t __a, int32_t __b)
> return __arm_vqshlq_r_u8 (__a, __b);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint8x16_t __a, int8x16_t __b)
> -{
> - return __arm_vqrshlq_u8 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint8x16_t __a, int32_t __b)
> -{
> - return __arm_vqrshlq_n_u8 (__a, __b);
> -}
> -
> __extension__ extern __inline uint8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq (uint8_t __a, int8x16_t __b)
> @@ -17399,20 +16932,6 @@ __arm_vshlq_r (int8x16_t __a, int32_t __b)
> return __arm_vshlq_r_s8 (__a, __b);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int8x16_t __a, int8x16_t __b)
> -{
> - return __arm_vrshlq_s8 (__a, __b);
> -}
> -
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int8x16_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_s8 (__a, __b);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq (int8x16_t __a, int8x16_t __b)
> @@ -17427,20 +16946,6 @@ __arm_vqshlq_r (int8x16_t __a, int32_t __b)
> return __arm_vqshlq_r_s8 (__a, __b);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int8x16_t __a, int8x16_t __b)
> -{
> - return __arm_vqrshlq_s8 (__a, __b);
> -}
> -
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int8x16_t __a, int32_t __b)
> -{
> - return __arm_vqrshlq_n_s8 (__a, __b);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq (int8x16_t __a, int8x16_t __b)
> @@ -17746,63 +17251,35 @@ __extension__ extern __inline uint32_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vaddvaq (uint32_t __a, uint16x8_t __b)
> {
> - return __arm_vaddvaq_u16 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vabdq (uint16x8_t __a, uint16x8_t __b)
> -{
> - return __arm_vabdq_u16 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vshlq_r (uint16x8_t __a, int32_t __b)
> -{
> - return __arm_vshlq_r_u16 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint16x8_t __a, int16x8_t __b)
> -{
> - return __arm_vrshlq_u16 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint16x8_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_u16 (__a, __b);
> + return __arm_vaddvaq_u16 (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqshlq (uint16x8_t __a, int16x8_t __b)
> +__arm_vabdq (uint16x8_t __a, uint16x8_t __b)
> {
> - return __arm_vqshlq_u16 (__a, __b);
> + return __arm_vabdq_u16 (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqshlq_r (uint16x8_t __a, int32_t __b)
> +__arm_vshlq_r (uint16x8_t __a, int32_t __b)
> {
> - return __arm_vqshlq_r_u16 (__a, __b);
> + return __arm_vshlq_r_u16 (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint16x8_t __a, int16x8_t __b)
> +__arm_vqshlq (uint16x8_t __a, int16x8_t __b)
> {
> - return __arm_vqrshlq_u16 (__a, __b);
> + return __arm_vqshlq_u16 (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint16x8_t __a, int32_t __b)
> +__arm_vqshlq_r (uint16x8_t __a, int32_t __b)
> {
> - return __arm_vqrshlq_n_u16 (__a, __b);
> + return __arm_vqshlq_r_u16 (__a, __b);
> }
>
> __extension__ extern __inline uint16_t
> @@ -17959,20 +17436,6 @@ __arm_vshlq_r (int16x8_t __a, int32_t __b)
> return __arm_vshlq_r_s16 (__a, __b);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int16x8_t __a, int16x8_t __b)
> -{
> - return __arm_vrshlq_s16 (__a, __b);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int16x8_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_s16 (__a, __b);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq (int16x8_t __a, int16x8_t __b)
> @@ -17987,20 +17450,6 @@ __arm_vqshlq_r (int16x8_t __a, int32_t __b)
> return __arm_vqshlq_r_s16 (__a, __b);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int16x8_t __a, int16x8_t __b)
> -{
> - return __arm_vqrshlq_s16 (__a, __b);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int16x8_t __a, int32_t __b)
> -{
> - return __arm_vqrshlq_n_s16 (__a, __b);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq (int16x8_t __a, int16x8_t __b)
> @@ -18323,20 +17772,6 @@ __arm_vshlq_r (uint32x4_t __a, int32_t __b)
> return __arm_vshlq_r_u32 (__a, __b);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint32x4_t __a, int32x4_t __b)
> -{
> - return __arm_vrshlq_u32 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (uint32x4_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_u32 (__a, __b);
> -}
> -
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq (uint32x4_t __a, int32x4_t __b)
> @@ -18351,20 +17786,6 @@ __arm_vqshlq_r (uint32x4_t __a, int32_t __b)
> return __arm_vqshlq_r_u32 (__a, __b);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint32x4_t __a, int32x4_t __b)
> -{
> - return __arm_vqrshlq_u32 (__a, __b);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (uint32x4_t __a, int32_t __b)
> -{
> - return __arm_vqrshlq_n_u32 (__a, __b);
> -}
> -
> __extension__ extern __inline uint32_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq (uint32_t __a, int32x4_t __b)
> @@ -18519,20 +17940,6 @@ __arm_vshlq_r (int32x4_t __a, int32_t __b)
> return __arm_vshlq_r_s32 (__a, __b);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int32x4_t __a, int32x4_t __b)
> -{
> - return __arm_vrshlq_s32 (__a, __b);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq (int32x4_t __a, int32_t __b)
> -{
> - return __arm_vrshlq_n_s32 (__a, __b);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq (int32x4_t __a, int32x4_t __b)
> @@ -18547,20 +17954,6 @@ __arm_vqshlq_r (int32x4_t __a, int32_t __b)
> return __arm_vqshlq_r_s32 (__a, __b);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int32x4_t __a, int32x4_t __b)
> -{
> - return __arm_vqrshlq_s32 (__a, __b);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq (int32x4_t __a, int32_t __b)
> -{
> - return __arm_vqrshlq_n_s32 (__a, __b);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqrdmulhq (int32x4_t __a, int32x4_t __b)
> @@ -19492,13 +18885,6 @@ __arm_vshlq_m_r (uint8x16_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_u8 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_u8 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> @@ -19506,13 +18892,6 @@ __arm_vqshlq_m_r (uint8x16_t __a, int32_t
> __b, mve_pred16_t __p)
> return __arm_vqshlq_m_r_u8 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_u8 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
> @@ -19632,13 +19011,6 @@ __arm_vshlq_m_r (int8x16_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_s8 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_s8 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
> @@ -19653,13 +19025,6 @@ __arm_vqshlq_m_r (int8x16_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vqshlq_m_r_s8 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_s8 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
> @@ -20073,13 +19438,6 @@ __arm_vshlq_m_r (uint16x8_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_u16 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_u16 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> @@ -20087,13 +19445,6 @@ __arm_vqshlq_m_r (uint16x8_t __a, int32_t
> __b, mve_pred16_t __p)
> return __arm_vqshlq_m_r_u16 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_u16 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
> @@ -20213,13 +19564,6 @@ __arm_vshlq_m_r (int16x8_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_s16 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_s16 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
> @@ -20234,13 +19578,6 @@ __arm_vqshlq_m_r (int16x8_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vqshlq_m_r_s16 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_s16 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
> @@ -20654,13 +19991,6 @@ __arm_vshlq_m_r (uint32x4_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_u32 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_u32 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> @@ -20668,13 +19998,6 @@ __arm_vqshlq_m_r (uint32x4_t __a, int32_t
> __b, mve_pred16_t __p)
> return __arm_vqshlq_m_r_u32 (__a, __b, __p);
> }
>
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_u32 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline uint32_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
> @@ -20794,13 +20117,6 @@ __arm_vshlq_m_r (int32x4_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vshlq_m_r_s32 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_n_s32 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
> @@ -20815,13 +20131,6 @@ __arm_vqshlq_m_r (int32x4_t __a, int32_t __b,
> mve_pred16_t __p)
> return __arm_vqshlq_m_r_s32 (__a, __b, __p);
> }
>
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_n_s32 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
> @@ -22887,48 +22196,6 @@ __arm_vqrdmulhq_m (int16x8_t __inactive,
> int16x8_t __a, int16x8_t __b, mve_pred1
> return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm,
> mve_pred16_t __p)
> @@ -23013,48 +22280,6 @@ __arm_vqshlq_m (uint16x8_t __inactive,
> uint16x8_t __a, int16x8_t __b, mve_pred16
> return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p);
> -}
> -
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm,
> mve_pred16_t __p)
> @@ -26009,48 +25234,6 @@ __arm_vrev64q_x (uint32x4_t __a,
> mve_pred16_t __p)
> return __arm_vrev64q_x_u32 (__a, __p);
> }
>
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_s8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_s16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_s32 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_u8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_u16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_vrshlq_x_u32 (__a, __b, __p);
> -}
> -
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> __arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
> @@ -29858,22 +29041,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
>
> -#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)));})
> -
> #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1), \
> @@ -29908,22 +29075,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
>
> -#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce3(p1, int)));})
> -
> #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -30181,16 +29332,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
>
> -#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> - int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16
> (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32
> (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8
> (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
> -
> #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> @@ -30200,15 +29341,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
>
> -#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> - int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16
> (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32
> (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8
> (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
> -
> #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> __typeof(p2) __p2 = (p2); \
> @@ -31649,22 +30781,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
>
> -#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]:
> __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)));})
> -
> #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -31717,22 +30833,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
> int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
>
> -#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce3(p1, int)), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]:
> __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce3(p1, int)));})
> -
> #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -32100,15 +31200,6 @@ extern void *__ARM_undef;
> int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t)), \
> int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t)));})
>
> -#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> - int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16
> (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32
> (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8
> (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
> -
> #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> @@ -32128,16 +31219,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t), p2), \
> int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t), p2));})
>
> -#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> - int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16
> (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32
> (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8
> (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
> - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
> -
> #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8
> (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
> @@ -33076,16 +32157,6 @@ extern void *__ARM_undef;
> int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3));})
>
> -#define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> - __typeof(p2) __p2 = (p2); \
> - _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3), \
> - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3));})
> -
> #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
> int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8
> (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
> @@ -33333,17 +32404,6 @@ extern void *__ARM_undef;
> int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2,
> int), p3), \
> int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2,
> int), p3));})
>
> -#define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - __typeof(p2) __p2 = (p2); \
> - _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> - int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0,
> int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2,
> int8x16_t), p3), \
> - int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> - int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> - int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> - int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> - int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3));})
> -
> #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -33365,17 +32425,6 @@ extern void *__ARM_undef;
> int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3));})
>
> -#define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> - __typeof(p1) __p1 = (p1); \
> - __typeof(p2) __p2 = (p2); \
> - _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> - int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t),
> p3), \
> - int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> - int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> - int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> - int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> - int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3));})
> -
> #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> __typeof(p1) __p1 = (p1); \
> _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> --
> 2.34.1
@@ -157,10 +157,12 @@ FUNCTION_WITH_RTX_M_N (vmulq, MULT, VMULQ)
FUNCTION_WITH_RTX_M_N_NO_N_F (vorrq, IOR, VORRQ)
FUNCTION_WITH_M_N_NO_F (vqaddq, VQADDQ)
FUNCTION_WITH_M_N_NO_U_F (vqdmulhq, VQDMULHQ)
+FUNCTION_WITH_M_N_NO_F (vqrshlq, VQRSHLQ)
FUNCTION_WITH_M_N_NO_F (vqsubq, VQSUBQ)
FUNCTION (vreinterpretq, vreinterpretq_impl,)
FUNCTION_WITHOUT_N_NO_F (vrhaddq, VRHADDQ)
FUNCTION_WITHOUT_N_NO_F (vrmulhq, VRMULHQ)
+FUNCTION_WITH_M_N_NO_F (vrshlq, VRSHLQ)
FUNCTION_WITH_RTX_M_N (vsubq, MINUS, VSUBQ)
FUNCTION (vuninitializedq, vuninitializedq_impl,)
@@ -29,10 +29,12 @@ DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_integer, mx_or_none)
DEF_MVE_FUNCTION (vorrq, binary_orrq, all_integer, mx_or_none)
DEF_MVE_FUNCTION (vqaddq, binary_opt_n, all_integer, m_or_none)
DEF_MVE_FUNCTION (vqdmulhq, binary_opt_n, all_signed, m_or_none)
+DEF_MVE_FUNCTION (vqrshlq, binary_round_lshift, all_integer, m_or_none)
DEF_MVE_FUNCTION (vqsubq, binary_opt_n, all_integer, m_or_none)
DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_integer, none)
DEF_MVE_FUNCTION (vrhaddq, binary, all_integer, mx_or_none)
DEF_MVE_FUNCTION (vrmulhq, binary, all_integer, mx_or_none)
+DEF_MVE_FUNCTION (vrshlq, binary_round_lshift, all_integer, mx_or_none)
DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_integer, mx_or_none)
DEF_MVE_FUNCTION (vuninitializedq, inherent, all_integer_with_64, none)
#undef REQUIRES_FLOAT
@@ -34,10 +34,12 @@ extern const function_base *const vmulq;
extern const function_base *const vorrq;
extern const function_base *const vqaddq;
extern const function_base *const vqdmulhq;
+extern const function_base *const vqrshlq;
extern const function_base *const vqsubq;
extern const function_base *const vreinterpretq;
extern const function_base *const vrhaddq;
extern const function_base *const vrmulhq;
+extern const function_base *const vrshlq;
extern const function_base *const vsubq;
extern const function_base *const vuninitializedq;
@@ -669,7 +669,9 @@ function_instance::has_inactive_argument () const
if (pred != PRED_m)
return false;
- if (base == functions::vorrq && mode_suffix_id == MODE_n)
+ if ((base == functions::vorrq && mode_suffix_id == MODE_n)
+ || (base == functions::vqrshlq && mode_suffix_id == MODE_n)
+ || (base == functions::vrshlq && mode_suffix_id == MODE_n))
return false;
return true;
@@ -79,10 +79,8 @@
#define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
#define vabdq(__a, __b) __arm_vabdq(__a, __b)
#define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
-#define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
#define vqshlq(__a, __b) __arm_vqshlq(__a, __b)
#define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b)
-#define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b)
#define vminavq(__a, __b) __arm_vminavq(__a, __b)
#define vminaq(__a, __b) __arm_vminaq(__a, __b)
#define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b)
@@ -153,9 +151,7 @@
#define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm)
#define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm)
#define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p)
-#define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p)
#define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p)
-#define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p)
#define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p)
#define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p)
#define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p)
@@ -254,10 +250,8 @@
#define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p)
#define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p)
#define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p)
-#define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p)
#define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p)
#define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p)
-#define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p)
#define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p)
#define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p)
#define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p)
@@ -385,7 +379,6 @@
#define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p)
#define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p)
#define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p)
-#define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p)
#define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p)
#define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p)
#define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p)
@@ -663,12 +656,8 @@
#define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
#define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
#define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
-#define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
-#define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
#define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
#define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
-#define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
-#define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
#define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
#define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
#define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
@@ -691,12 +680,8 @@
#define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm)
#define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
#define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
-#define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
-#define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
#define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
#define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
-#define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
-#define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
#define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
#define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
#define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
@@ -743,12 +728,8 @@
#define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
#define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
#define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
-#define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
-#define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
#define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
#define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
-#define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
-#define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
#define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
#define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
#define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
@@ -771,12 +752,8 @@
#define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm)
#define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
#define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
-#define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
-#define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
#define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
#define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
-#define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
-#define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
#define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
#define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
#define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
@@ -823,12 +800,8 @@
#define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
#define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
#define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
-#define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
-#define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
#define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
#define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
-#define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
-#define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
#define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
#define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
#define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
@@ -851,12 +824,8 @@
#define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm)
#define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
#define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
-#define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
-#define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
#define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
#define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
-#define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
-#define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
#define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
#define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
#define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
@@ -1064,9 +1033,7 @@
#define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm)
#define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm)
#define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p)
-#define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p)
#define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p)
-#define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p)
#define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p)
#define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p)
#define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p)
@@ -1084,10 +1051,8 @@
#define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p)
#define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p)
#define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p)
-#define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p)
#define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p)
#define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p)
-#define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p)
#define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p)
#define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p)
#define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p)
@@ -1147,9 +1112,7 @@
#define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm)
#define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm)
#define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p)
-#define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p)
#define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p)
-#define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p)
#define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p)
#define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p)
#define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p)
@@ -1167,10 +1130,8 @@
#define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p)
#define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p)
#define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p)
-#define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p)
#define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p)
#define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p)
-#define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p)
#define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p)
#define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p)
#define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p)
@@ -1230,9 +1191,7 @@
#define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm)
#define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm)
#define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p)
-#define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p)
#define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p)
-#define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p)
#define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p)
#define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p)
#define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p)
@@ -1250,10 +1209,8 @@
#define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p)
#define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p)
#define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p)
-#define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p)
#define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p)
#define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p)
-#define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p)
#define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p)
#define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p)
#define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p)
@@ -1646,12 +1603,6 @@
#define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p)
#define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p)
#define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p)
-#define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p)
-#define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p)
-#define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p)
-#define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p)
-#define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p)
-#define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p)
#define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p)
#define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p)
#define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p)
@@ -1664,12 +1615,6 @@
#define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p)
#define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p)
#define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p)
-#define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p)
-#define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p)
-#define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p)
-#define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p)
-#define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p)
-#define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p)
#define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p)
#define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p)
#define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p)
@@ -2232,12 +2177,6 @@
#define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
#define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
#define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
-#define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
-#define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
-#define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
-#define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
-#define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
-#define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
#define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p)
#define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p)
#define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p)
@@ -3300,20 +3239,6 @@ __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
return __builtin_mve_vshlq_r_uv16qi (__a, __b);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return __builtin_mve_vrshlq_uv16qi (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
-}
-
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
@@ -3328,20 +3253,6 @@ __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return __builtin_mve_vqrshlq_uv16qi (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
-}
-
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
@@ -3496,20 +3407,6 @@ __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
return __builtin_mve_vshlq_r_sv16qi (__a, __b);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_mve_vrshlq_sv16qi (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
@@ -3524,20 +3421,6 @@ __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_mve_vqrshlq_sv16qi (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
@@ -3862,20 +3745,6 @@ __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
return __builtin_mve_vshlq_r_uv8hi (__a, __b);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return __builtin_mve_vrshlq_uv8hi (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
-}
-
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
@@ -3890,20 +3759,6 @@ __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return __builtin_mve_vqrshlq_uv8hi (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
-}
-
__extension__ extern __inline uint16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
@@ -4058,20 +3913,6 @@ __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
return __builtin_mve_vshlq_r_sv8hi (__a, __b);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_mve_vrshlq_sv8hi (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
@@ -4086,20 +3927,6 @@ __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_mve_vqrshlq_sv8hi (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
@@ -4424,20 +4251,6 @@ __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
return __builtin_mve_vshlq_r_uv4si (__a, __b);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return __builtin_mve_vrshlq_uv4si (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_uv4si (__a, __b);
-}
-
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
@@ -4452,20 +4265,6 @@ __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_uv4si (__a, __b);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return __builtin_mve_vqrshlq_uv4si (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
-}
-
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
@@ -4620,20 +4419,6 @@ __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
return __builtin_mve_vshlq_r_sv4si (__a, __b);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_mve_vrshlq_sv4si (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
-{
- return __builtin_mve_vrshlq_n_sv4si (__a, __b);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
@@ -4648,20 +4433,6 @@ __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
return __builtin_mve_vqshlq_r_sv4si (__a, __b);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_mve_vqrshlq_sv4si (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
-{
- return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
@@ -5633,13 +5404,6 @@ __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p);
-}
-
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
@@ -5647,13 +5411,6 @@ __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p);
-}
-
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -5773,13 +5530,6 @@ __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
@@ -5794,13 +5544,6 @@ __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
@@ -6215,13 +5958,6 @@ __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p);
-}
-
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
@@ -6229,13 +5965,6 @@ __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p);
-}
-
__extension__ extern __inline uint16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
@@ -6355,13 +6084,6 @@ __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
@@ -6376,13 +6098,6 @@ __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
@@ -6796,13 +6511,6 @@ __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p);
-}
-
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
@@ -6810,13 +6518,6 @@ __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p);
-}
-
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
@@ -6936,13 +6637,6 @@ __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
@@ -6957,13 +6651,6 @@ __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
@@ -9029,48 +8716,6 @@ __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_p
return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -9155,48 +8800,6 @@ __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pr
return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -12648,48 +12251,6 @@ __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p)
return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -17203,20 +16764,6 @@ __arm_vshlq_r (uint8x16_t __a, int32_t __b)
return __arm_vshlq_r_u8 (__a, __b);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint8x16_t __a, int8x16_t __b)
-{
- return __arm_vrshlq_u8 (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint8x16_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_u8 (__a, __b);
-}
-
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq (uint8x16_t __a, int8x16_t __b)
@@ -17231,20 +16778,6 @@ __arm_vqshlq_r (uint8x16_t __a, int32_t __b)
return __arm_vqshlq_r_u8 (__a, __b);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint8x16_t __a, int8x16_t __b)
-{
- return __arm_vqrshlq_u8 (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint8x16_t __a, int32_t __b)
-{
- return __arm_vqrshlq_n_u8 (__a, __b);
-}
-
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq (uint8_t __a, int8x16_t __b)
@@ -17399,20 +16932,6 @@ __arm_vshlq_r (int8x16_t __a, int32_t __b)
return __arm_vshlq_r_s8 (__a, __b);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vrshlq_s8 (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int8x16_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_s8 (__a, __b);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq (int8x16_t __a, int8x16_t __b)
@@ -17427,20 +16946,6 @@ __arm_vqshlq_r (int8x16_t __a, int32_t __b)
return __arm_vqshlq_r_s8 (__a, __b);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vqrshlq_s8 (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int8x16_t __a, int32_t __b)
-{
- return __arm_vqrshlq_n_s8 (__a, __b);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq (int8x16_t __a, int8x16_t __b)
@@ -17746,63 +17251,35 @@ __extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq (uint32_t __a, uint16x8_t __b)
{
- return __arm_vaddvaq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vabdq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_vabdq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vshlq_r (uint16x8_t __a, int32_t __b)
-{
- return __arm_vshlq_r_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint16x8_t __a, int16x8_t __b)
-{
- return __arm_vrshlq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint16x8_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_u16 (__a, __b);
+ return __arm_vaddvaq_u16 (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqshlq (uint16x8_t __a, int16x8_t __b)
+__arm_vabdq (uint16x8_t __a, uint16x8_t __b)
{
- return __arm_vqshlq_u16 (__a, __b);
+ return __arm_vabdq_u16 (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqshlq_r (uint16x8_t __a, int32_t __b)
+__arm_vshlq_r (uint16x8_t __a, int32_t __b)
{
- return __arm_vqshlq_r_u16 (__a, __b);
+ return __arm_vshlq_r_u16 (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint16x8_t __a, int16x8_t __b)
+__arm_vqshlq (uint16x8_t __a, int16x8_t __b)
{
- return __arm_vqrshlq_u16 (__a, __b);
+ return __arm_vqshlq_u16 (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint16x8_t __a, int32_t __b)
+__arm_vqshlq_r (uint16x8_t __a, int32_t __b)
{
- return __arm_vqrshlq_n_u16 (__a, __b);
+ return __arm_vqshlq_r_u16 (__a, __b);
}
__extension__ extern __inline uint16_t
@@ -17959,20 +17436,6 @@ __arm_vshlq_r (int16x8_t __a, int32_t __b)
return __arm_vshlq_r_s16 (__a, __b);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vrshlq_s16 (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int16x8_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_s16 (__a, __b);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq (int16x8_t __a, int16x8_t __b)
@@ -17987,20 +17450,6 @@ __arm_vqshlq_r (int16x8_t __a, int32_t __b)
return __arm_vqshlq_r_s16 (__a, __b);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vqrshlq_s16 (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int16x8_t __a, int32_t __b)
-{
- return __arm_vqrshlq_n_s16 (__a, __b);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq (int16x8_t __a, int16x8_t __b)
@@ -18323,20 +17772,6 @@ __arm_vshlq_r (uint32x4_t __a, int32_t __b)
return __arm_vshlq_r_u32 (__a, __b);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint32x4_t __a, int32x4_t __b)
-{
- return __arm_vrshlq_u32 (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (uint32x4_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_u32 (__a, __b);
-}
-
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq (uint32x4_t __a, int32x4_t __b)
@@ -18351,20 +17786,6 @@ __arm_vqshlq_r (uint32x4_t __a, int32_t __b)
return __arm_vqshlq_r_u32 (__a, __b);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint32x4_t __a, int32x4_t __b)
-{
- return __arm_vqrshlq_u32 (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (uint32x4_t __a, int32_t __b)
-{
- return __arm_vqrshlq_n_u32 (__a, __b);
-}
-
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq (uint32_t __a, int32x4_t __b)
@@ -18519,20 +17940,6 @@ __arm_vshlq_r (int32x4_t __a, int32_t __b)
return __arm_vshlq_r_s32 (__a, __b);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vrshlq_s32 (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq (int32x4_t __a, int32_t __b)
-{
- return __arm_vrshlq_n_s32 (__a, __b);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq (int32x4_t __a, int32x4_t __b)
@@ -18547,20 +17954,6 @@ __arm_vqshlq_r (int32x4_t __a, int32_t __b)
return __arm_vqshlq_r_s32 (__a, __b);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vqrshlq_s32 (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq (int32x4_t __a, int32_t __b)
-{
- return __arm_vqrshlq_n_s32 (__a, __b);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq (int32x4_t __a, int32x4_t __b)
@@ -19492,13 +18885,6 @@ __arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_u8 (__a, __b, __p);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_u8 (__a, __b, __p);
-}
-
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
@@ -19506,13 +18892,6 @@ __arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_u8 (__a, __b, __p);
}
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_u8 (__a, __b, __p);
-}
-
__extension__ extern __inline uint8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -19632,13 +19011,6 @@ __arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_s8 (__a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_s8 (__a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
@@ -19653,13 +19025,6 @@ __arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_s8 (__a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_s8 (__a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
@@ -20073,13 +19438,6 @@ __arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_u16 (__a, __b, __p);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_u16 (__a, __b, __p);
-}
-
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
@@ -20087,13 +19445,6 @@ __arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_u16 (__a, __b, __p);
}
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_u16 (__a, __b, __p);
-}
-
__extension__ extern __inline uint16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
@@ -20213,13 +19564,6 @@ __arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_s16 (__a, __b, __p);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_s16 (__a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
@@ -20234,13 +19578,6 @@ __arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_s16 (__a, __b, __p);
}
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_s16 (__a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
@@ -20654,13 +19991,6 @@ __arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_u32 (__a, __b, __p);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_u32 (__a, __b, __p);
-}
-
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
@@ -20668,13 +19998,6 @@ __arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_u32 (__a, __b, __p);
}
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_u32 (__a, __b, __p);
-}
-
__extension__ extern __inline uint32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
@@ -20794,13 +20117,6 @@ __arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vshlq_m_r_s32 (__a, __b, __p);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_n_s32 (__a, __b, __p);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
@@ -20815,13 +20131,6 @@ __arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
return __arm_vqshlq_m_r_s32 (__a, __b, __p);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_n_s32 (__a, __b, __p);
-}
-
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
@@ -22887,48 +22196,6 @@ __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred1
return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -23013,48 +22280,6 @@ __arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16
return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p);
-}
-
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -26009,48 +25234,6 @@ __arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p)
return __arm_vrev64q_x_u32 (__a, __p);
}
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vrshlq_x_u32 (__a, __b, __p);
-}
-
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -29858,22 +29041,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
-#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
-
#define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
@@ -29908,22 +29075,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
-#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));})
-
#define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -30181,16 +29332,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
-#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
- int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
-
#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
@@ -30200,15 +29341,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
-#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
- int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
-
#define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
__typeof(p2) __p2 = (p2); \
@@ -31649,22 +30781,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
-#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
-
#define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -31717,22 +30833,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
-#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));})
-
#define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -32100,15 +31200,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
-#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
- int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
-
#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
@@ -32128,16 +31219,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
-#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
- int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
- int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
- int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
-
#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
@@ -33076,16 +32157,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
-#define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
- __typeof(p2) __p2 = (p2); \
- _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
-
#define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
@@ -33333,17 +32404,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));})
-#define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- __typeof(p2) __p2 = (p2); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
-
#define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -33365,17 +32425,6 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
-#define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- __typeof(p2) __p2 = (p2); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
-
#define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \