[08/22] arm: [MVE intrinsics] rework vaddq vmulq vsubq

Message ID 20230418134608.244751-9-christophe.lyon@arm.com
State Accepted
Headers
Series arm: New framework for MVE intrinsics |

Checks

Context Check Description
snail/gcc-patch-check success Github commit url

Commit Message

Christophe Lyon April 18, 2023, 1:45 p.m. UTC
  Implement vaddq, vmulq, vsubq using the new MVE builtins framework.

2022-09-08  Christophe Lyon <christophe.lyon@arm.com>

	gcc/

	* config/arm/arm-mve-builtins-base.cc (FUNCTION_WITH_RTX_M_N):
	New.
	(vaddq, vmulq, vsubq): New.
	* config/arm/arm-mve-builtins-base.def (vaddq, vmulq, vsubq): New.
	* config/arm/arm-mve-builtins-base.h (vaddq, vmulq, vsubq): New.
	* config/arm/arm_mve.h (vaddq): Remove.
	(vaddq_m): Remove.
	(vaddq_x): Remove.
	(vaddq_n_u8): Remove.
	(vaddq_n_s8): Remove.
	(vaddq_n_u16): Remove.
	(vaddq_n_s16): Remove.
	(vaddq_n_u32): Remove.
	(vaddq_n_s32): Remove.
	(vaddq_n_f16): Remove.
	(vaddq_n_f32): Remove.
	(vaddq_m_n_s8): Remove.
	(vaddq_m_n_s32): Remove.
	(vaddq_m_n_s16): Remove.
	(vaddq_m_n_u8): Remove.
	(vaddq_m_n_u32): Remove.
	(vaddq_m_n_u16): Remove.
	(vaddq_m_s8): Remove.
	(vaddq_m_s32): Remove.
	(vaddq_m_s16): Remove.
	(vaddq_m_u8): Remove.
	(vaddq_m_u32): Remove.
	(vaddq_m_u16): Remove.
	(vaddq_m_f32): Remove.
	(vaddq_m_f16): Remove.
	(vaddq_m_n_f32): Remove.
	(vaddq_m_n_f16): Remove.
	(vaddq_s8): Remove.
	(vaddq_s16): Remove.
	(vaddq_s32): Remove.
	(vaddq_u8): Remove.
	(vaddq_u16): Remove.
	(vaddq_u32): Remove.
	(vaddq_f16): Remove.
	(vaddq_f32): Remove.
	(vaddq_x_s8): Remove.
	(vaddq_x_s16): Remove.
	(vaddq_x_s32): Remove.
	(vaddq_x_n_s8): Remove.
	(vaddq_x_n_s16): Remove.
	(vaddq_x_n_s32): Remove.
	(vaddq_x_u8): Remove.
	(vaddq_x_u16): Remove.
	(vaddq_x_u32): Remove.
	(vaddq_x_n_u8): Remove.
	(vaddq_x_n_u16): Remove.
	(vaddq_x_n_u32): Remove.
	(vaddq_x_f16): Remove.
	(vaddq_x_f32): Remove.
	(vaddq_x_n_f16): Remove.
	(vaddq_x_n_f32): Remove.
	(__arm_vaddq_n_u8): Remove.
	(__arm_vaddq_n_s8): Remove.
	(__arm_vaddq_n_u16): Remove.
	(__arm_vaddq_n_s16): Remove.
	(__arm_vaddq_n_u32): Remove.
	(__arm_vaddq_n_s32): Remove.
	(__arm_vaddq_m_n_s8): Remove.
	(__arm_vaddq_m_n_s32): Remove.
	(__arm_vaddq_m_n_s16): Remove.
	(__arm_vaddq_m_n_u8): Remove.
	(__arm_vaddq_m_n_u32): Remove.
	(__arm_vaddq_m_n_u16): Remove.
	(__arm_vaddq_m_s8): Remove.
	(__arm_vaddq_m_s32): Remove.
	(__arm_vaddq_m_s16): Remove.
	(__arm_vaddq_m_u8): Remove.
	(__arm_vaddq_m_u32): Remove.
	(__arm_vaddq_m_u16): Remove.
	(__arm_vaddq_s8): Remove.
	(__arm_vaddq_s16): Remove.
	(__arm_vaddq_s32): Remove.
	(__arm_vaddq_u8): Remove.
	(__arm_vaddq_u16): Remove.
	(__arm_vaddq_u32): Remove.
	(__arm_vaddq_x_s8): Remove.
	(__arm_vaddq_x_s16): Remove.
	(__arm_vaddq_x_s32): Remove.
	(__arm_vaddq_x_n_s8): Remove.
	(__arm_vaddq_x_n_s16): Remove.
	(__arm_vaddq_x_n_s32): Remove.
	(__arm_vaddq_x_u8): Remove.
	(__arm_vaddq_x_u16): Remove.
	(__arm_vaddq_x_u32): Remove.
	(__arm_vaddq_x_n_u8): Remove.
	(__arm_vaddq_x_n_u16): Remove.
	(__arm_vaddq_x_n_u32): Remove.
	(__arm_vaddq_n_f16): Remove.
	(__arm_vaddq_n_f32): Remove.
	(__arm_vaddq_m_f32): Remove.
	(__arm_vaddq_m_f16): Remove.
	(__arm_vaddq_m_n_f32): Remove.
	(__arm_vaddq_m_n_f16): Remove.
	(__arm_vaddq_f16): Remove.
	(__arm_vaddq_f32): Remove.
	(__arm_vaddq_x_f16): Remove.
	(__arm_vaddq_x_f32): Remove.
	(__arm_vaddq_x_n_f16): Remove.
	(__arm_vaddq_x_n_f32): Remove.
	(__arm_vaddq): Remove.
	(__arm_vaddq_m): Remove.
	(__arm_vaddq_x): Remove.
	(vmulq): Remove.
	(vmulq_m): Remove.
	(vmulq_x): Remove.
	(vmulq_u8): Remove.
	(vmulq_n_u8): Remove.
	(vmulq_s8): Remove.
	(vmulq_n_s8): Remove.
	(vmulq_u16): Remove.
	(vmulq_n_u16): Remove.
	(vmulq_s16): Remove.
	(vmulq_n_s16): Remove.
	(vmulq_u32): Remove.
	(vmulq_n_u32): Remove.
	(vmulq_s32): Remove.
	(vmulq_n_s32): Remove.
	(vmulq_n_f16): Remove.
	(vmulq_f16): Remove.
	(vmulq_n_f32): Remove.
	(vmulq_f32): Remove.
	(vmulq_m_n_s8): Remove.
	(vmulq_m_n_s32): Remove.
	(vmulq_m_n_s16): Remove.
	(vmulq_m_n_u8): Remove.
	(vmulq_m_n_u32): Remove.
	(vmulq_m_n_u16): Remove.
	(vmulq_m_s8): Remove.
	(vmulq_m_s32): Remove.
	(vmulq_m_s16): Remove.
	(vmulq_m_u8): Remove.
	(vmulq_m_u32): Remove.
	(vmulq_m_u16): Remove.
	(vmulq_m_f32): Remove.
	(vmulq_m_f16): Remove.
	(vmulq_m_n_f32): Remove.
	(vmulq_m_n_f16): Remove.
	(vmulq_x_s8): Remove.
	(vmulq_x_s16): Remove.
	(vmulq_x_s32): Remove.
	(vmulq_x_n_s8): Remove.
	(vmulq_x_n_s16): Remove.
	(vmulq_x_n_s32): Remove.
	(vmulq_x_u8): Remove.
	(vmulq_x_u16): Remove.
	(vmulq_x_u32): Remove.
	(vmulq_x_n_u8): Remove.
	(vmulq_x_n_u16): Remove.
	(vmulq_x_n_u32): Remove.
	(vmulq_x_f16): Remove.
	(vmulq_x_f32): Remove.
	(vmulq_x_n_f16): Remove.
	(vmulq_x_n_f32): Remove.
	(__arm_vmulq_u8): Remove.
	(__arm_vmulq_n_u8): Remove.
	(__arm_vmulq_s8): Remove.
	(__arm_vmulq_n_s8): Remove.
	(__arm_vmulq_u16): Remove.
	(__arm_vmulq_n_u16): Remove.
	(__arm_vmulq_s16): Remove.
	(__arm_vmulq_n_s16): Remove.
	(__arm_vmulq_u32): Remove.
	(__arm_vmulq_n_u32): Remove.
	(__arm_vmulq_s32): Remove.
	(__arm_vmulq_n_s32): Remove.
	(__arm_vmulq_m_n_s8): Remove.
	(__arm_vmulq_m_n_s32): Remove.
	(__arm_vmulq_m_n_s16): Remove.
	(__arm_vmulq_m_n_u8): Remove.
	(__arm_vmulq_m_n_u32): Remove.
	(__arm_vmulq_m_n_u16): Remove.
	(__arm_vmulq_m_s8): Remove.
	(__arm_vmulq_m_s32): Remove.
	(__arm_vmulq_m_s16): Remove.
	(__arm_vmulq_m_u8): Remove.
	(__arm_vmulq_m_u32): Remove.
	(__arm_vmulq_m_u16): Remove.
	(__arm_vmulq_x_s8): Remove.
	(__arm_vmulq_x_s16): Remove.
	(__arm_vmulq_x_s32): Remove.
	(__arm_vmulq_x_n_s8): Remove.
	(__arm_vmulq_x_n_s16): Remove.
	(__arm_vmulq_x_n_s32): Remove.
	(__arm_vmulq_x_u8): Remove.
	(__arm_vmulq_x_u16): Remove.
	(__arm_vmulq_x_u32): Remove.
	(__arm_vmulq_x_n_u8): Remove.
	(__arm_vmulq_x_n_u16): Remove.
	(__arm_vmulq_x_n_u32): Remove.
	(__arm_vmulq_n_f16): Remove.
	(__arm_vmulq_f16): Remove.
	(__arm_vmulq_n_f32): Remove.
	(__arm_vmulq_f32): Remove.
	(__arm_vmulq_m_f32): Remove.
	(__arm_vmulq_m_f16): Remove.
	(__arm_vmulq_m_n_f32): Remove.
	(__arm_vmulq_m_n_f16): Remove.
	(__arm_vmulq_x_f16): Remove.
	(__arm_vmulq_x_f32): Remove.
	(__arm_vmulq_x_n_f16): Remove.
	(__arm_vmulq_x_n_f32): Remove.
	(__arm_vmulq): Remove.
	(__arm_vmulq_m): Remove.
	(__arm_vmulq_x): Remove.
	(vsubq): Remove.
	(vsubq_m): Remove.
	(vsubq_x): Remove.
	(vsubq_n_f16): Remove.
	(vsubq_n_f32): Remove.
	(vsubq_u8): Remove.
	(vsubq_n_u8): Remove.
	(vsubq_s8): Remove.
	(vsubq_n_s8): Remove.
	(vsubq_u16): Remove.
	(vsubq_n_u16): Remove.
	(vsubq_s16): Remove.
	(vsubq_n_s16): Remove.
	(vsubq_u32): Remove.
	(vsubq_n_u32): Remove.
	(vsubq_s32): Remove.
	(vsubq_n_s32): Remove.
	(vsubq_f16): Remove.
	(vsubq_f32): Remove.
	(vsubq_m_s8): Remove.
	(vsubq_m_u8): Remove.
	(vsubq_m_s16): Remove.
	(vsubq_m_u16): Remove.
	(vsubq_m_s32): Remove.
	(vsubq_m_u32): Remove.
	(vsubq_m_n_s8): Remove.
	(vsubq_m_n_s32): Remove.
	(vsubq_m_n_s16): Remove.
	(vsubq_m_n_u8): Remove.
	(vsubq_m_n_u32): Remove.
	(vsubq_m_n_u16): Remove.
	(vsubq_m_f32): Remove.
	(vsubq_m_f16): Remove.
	(vsubq_m_n_f32): Remove.
	(vsubq_m_n_f16): Remove.
	(vsubq_x_s8): Remove.
	(vsubq_x_s16): Remove.
	(vsubq_x_s32): Remove.
	(vsubq_x_n_s8): Remove.
	(vsubq_x_n_s16): Remove.
	(vsubq_x_n_s32): Remove.
	(vsubq_x_u8): Remove.
	(vsubq_x_u16): Remove.
	(vsubq_x_u32): Remove.
	(vsubq_x_n_u8): Remove.
	(vsubq_x_n_u16): Remove.
	(vsubq_x_n_u32): Remove.
	(vsubq_x_f16): Remove.
	(vsubq_x_f32): Remove.
	(vsubq_x_n_f16): Remove.
	(vsubq_x_n_f32): Remove.
	(__arm_vsubq_u8): Remove.
	(__arm_vsubq_n_u8): Remove.
	(__arm_vsubq_s8): Remove.
	(__arm_vsubq_n_s8): Remove.
	(__arm_vsubq_u16): Remove.
	(__arm_vsubq_n_u16): Remove.
	(__arm_vsubq_s16): Remove.
	(__arm_vsubq_n_s16): Remove.
	(__arm_vsubq_u32): Remove.
	(__arm_vsubq_n_u32): Remove.
	(__arm_vsubq_s32): Remove.
	(__arm_vsubq_n_s32): Remove.
	(__arm_vsubq_m_s8): Remove.
	(__arm_vsubq_m_u8): Remove.
	(__arm_vsubq_m_s16): Remove.
	(__arm_vsubq_m_u16): Remove.
	(__arm_vsubq_m_s32): Remove.
	(__arm_vsubq_m_u32): Remove.
	(__arm_vsubq_m_n_s8): Remove.
	(__arm_vsubq_m_n_s32): Remove.
	(__arm_vsubq_m_n_s16): Remove.
	(__arm_vsubq_m_n_u8): Remove.
	(__arm_vsubq_m_n_u32): Remove.
	(__arm_vsubq_m_n_u16): Remove.
	(__arm_vsubq_x_s8): Remove.
	(__arm_vsubq_x_s16): Remove.
	(__arm_vsubq_x_s32): Remove.
	(__arm_vsubq_x_n_s8): Remove.
	(__arm_vsubq_x_n_s16): Remove.
	(__arm_vsubq_x_n_s32): Remove.
	(__arm_vsubq_x_u8): Remove.
	(__arm_vsubq_x_u16): Remove.
	(__arm_vsubq_x_u32): Remove.
	(__arm_vsubq_x_n_u8): Remove.
	(__arm_vsubq_x_n_u16): Remove.
	(__arm_vsubq_x_n_u32): Remove.
	(__arm_vsubq_n_f16): Remove.
	(__arm_vsubq_n_f32): Remove.
	(__arm_vsubq_f16): Remove.
	(__arm_vsubq_f32): Remove.
	(__arm_vsubq_m_f32): Remove.
	(__arm_vsubq_m_f16): Remove.
	(__arm_vsubq_m_n_f32): Remove.
	(__arm_vsubq_m_n_f16): Remove.
	(__arm_vsubq_x_f16): Remove.
	(__arm_vsubq_x_f32): Remove.
	(__arm_vsubq_x_n_f16): Remove.
	(__arm_vsubq_x_n_f32): Remove.
	(__arm_vsubq): Remove.
	(__arm_vsubq_m): Remove.
	(__arm_vsubq_x): Remove.
	* config/arm/arm_mve_builtins.def (vsubq_u, vsubq_s, vsubq_f):
	Remove.
	(vmulq_u, vmulq_s, vmulq_f): Remove.
	* config/arm/mve.md (mve_vsubq_<supf><mode>): Remove.
	(mve_vmulq_<supf><mode>): Remove.
---
 gcc/config/arm/arm-mve-builtins-base.cc  |   11 +
 gcc/config/arm/arm-mve-builtins-base.def |    6 +
 gcc/config/arm/arm-mve-builtins-base.h   |    3 +
 gcc/config/arm/arm_mve.h                 | 2498 ----------------------
 gcc/config/arm/arm_mve_builtins.def      |    6 -
 gcc/config/arm/mve.md                    |   27 -
 6 files changed, 20 insertions(+), 2531 deletions(-)
  

Comments

Kyrylo Tkachov May 2, 2023, 4:31 p.m. UTC | #1
> -----Original Message-----
> From: Christophe Lyon <christophe.lyon@arm.com>
> Sent: Tuesday, April 18, 2023 2:46 PM
> To: gcc-patches@gcc.gnu.org; Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>;
> Richard Earnshaw <Richard.Earnshaw@arm.com>; Richard Sandiford
> <Richard.Sandiford@arm.com>
> Cc: Christophe Lyon <Christophe.Lyon@arm.com>
> Subject: [PATCH 08/22] arm: [MVE intrinsics] rework vaddq vmulq vsubq
> 
> Implement vaddq, vmulq, vsubq using the new MVE builtins framework.
> 
> 2022-09-08  Christophe Lyon <christophe.lyon@arm.com>
> 
> 	gcc/
> 
> 	* config/arm/arm-mve-builtins-base.cc
> (FUNCTION_WITH_RTX_M_N):
> 	New.
> 	(vaddq, vmulq, vsubq): New.
> 	* config/arm/arm-mve-builtins-base.def (vaddq, vmulq, vsubq): New.
> 	* config/arm/arm-mve-builtins-base.h (vaddq, vmulq, vsubq): New.
> 	* config/arm/arm_mve.h (vaddq): Remove.
> 	(vaddq_m): Remove.
> 	(vaddq_x): Remove.
> 	(vaddq_n_u8): Remove.
> 	(vaddq_n_s8): Remove.
> 	(vaddq_n_u16): Remove.
> 	(vaddq_n_s16): Remove.
> 	(vaddq_n_u32): Remove.
> 	(vaddq_n_s32): Remove.
> 	(vaddq_n_f16): Remove.
> 	(vaddq_n_f32): Remove.
> 	(vaddq_m_n_s8): Remove.
> 	(vaddq_m_n_s32): Remove.
> 	(vaddq_m_n_s16): Remove.
> 	(vaddq_m_n_u8): Remove.
> 	(vaddq_m_n_u32): Remove.
> 	(vaddq_m_n_u16): Remove.
> 	(vaddq_m_s8): Remove.
> 	(vaddq_m_s32): Remove.
> 	(vaddq_m_s16): Remove.
> 	(vaddq_m_u8): Remove.
> 	(vaddq_m_u32): Remove.
> 	(vaddq_m_u16): Remove.
> 	(vaddq_m_f32): Remove.
> 	(vaddq_m_f16): Remove.
> 	(vaddq_m_n_f32): Remove.
> 	(vaddq_m_n_f16): Remove.
> 	(vaddq_s8): Remove.
> 	(vaddq_s16): Remove.
> 	(vaddq_s32): Remove.
> 	(vaddq_u8): Remove.
> 	(vaddq_u16): Remove.
> 	(vaddq_u32): Remove.
> 	(vaddq_f16): Remove.
> 	(vaddq_f32): Remove.
> 	(vaddq_x_s8): Remove.
> 	(vaddq_x_s16): Remove.
> 	(vaddq_x_s32): Remove.
> 	(vaddq_x_n_s8): Remove.
> 	(vaddq_x_n_s16): Remove.
> 	(vaddq_x_n_s32): Remove.
> 	(vaddq_x_u8): Remove.
> 	(vaddq_x_u16): Remove.
> 	(vaddq_x_u32): Remove.
> 	(vaddq_x_n_u8): Remove.
> 	(vaddq_x_n_u16): Remove.
> 	(vaddq_x_n_u32): Remove.
> 	(vaddq_x_f16): Remove.
> 	(vaddq_x_f32): Remove.
> 	(vaddq_x_n_f16): Remove.
> 	(vaddq_x_n_f32): Remove.
> 	(__arm_vaddq_n_u8): Remove.
> 	(__arm_vaddq_n_s8): Remove.
> 	(__arm_vaddq_n_u16): Remove.
> 	(__arm_vaddq_n_s16): Remove.
> 	(__arm_vaddq_n_u32): Remove.
> 	(__arm_vaddq_n_s32): Remove.
> 	(__arm_vaddq_m_n_s8): Remove.
> 	(__arm_vaddq_m_n_s32): Remove.
> 	(__arm_vaddq_m_n_s16): Remove.
> 	(__arm_vaddq_m_n_u8): Remove.
> 	(__arm_vaddq_m_n_u32): Remove.
> 	(__arm_vaddq_m_n_u16): Remove.
> 	(__arm_vaddq_m_s8): Remove.
> 	(__arm_vaddq_m_s32): Remove.
> 	(__arm_vaddq_m_s16): Remove.
> 	(__arm_vaddq_m_u8): Remove.
> 	(__arm_vaddq_m_u32): Remove.
> 	(__arm_vaddq_m_u16): Remove.
> 	(__arm_vaddq_s8): Remove.
> 	(__arm_vaddq_s16): Remove.
> 	(__arm_vaddq_s32): Remove.
> 	(__arm_vaddq_u8): Remove.
> 	(__arm_vaddq_u16): Remove.
> 	(__arm_vaddq_u32): Remove.
> 	(__arm_vaddq_x_s8): Remove.
> 	(__arm_vaddq_x_s16): Remove.
> 	(__arm_vaddq_x_s32): Remove.
> 	(__arm_vaddq_x_n_s8): Remove.
> 	(__arm_vaddq_x_n_s16): Remove.
> 	(__arm_vaddq_x_n_s32): Remove.
> 	(__arm_vaddq_x_u8): Remove.
> 	(__arm_vaddq_x_u16): Remove.
> 	(__arm_vaddq_x_u32): Remove.
> 	(__arm_vaddq_x_n_u8): Remove.
> 	(__arm_vaddq_x_n_u16): Remove.
> 	(__arm_vaddq_x_n_u32): Remove.
> 	(__arm_vaddq_n_f16): Remove.
> 	(__arm_vaddq_n_f32): Remove.
> 	(__arm_vaddq_m_f32): Remove.
> 	(__arm_vaddq_m_f16): Remove.
> 	(__arm_vaddq_m_n_f32): Remove.
> 	(__arm_vaddq_m_n_f16): Remove.
> 	(__arm_vaddq_f16): Remove.
> 	(__arm_vaddq_f32): Remove.
> 	(__arm_vaddq_x_f16): Remove.
> 	(__arm_vaddq_x_f32): Remove.
> 	(__arm_vaddq_x_n_f16): Remove.
> 	(__arm_vaddq_x_n_f32): Remove.
> 	(__arm_vaddq): Remove.
> 	(__arm_vaddq_m): Remove.
> 	(__arm_vaddq_x): Remove.
> 	(vmulq): Remove.
> 	(vmulq_m): Remove.
> 	(vmulq_x): Remove.
> 	(vmulq_u8): Remove.
> 	(vmulq_n_u8): Remove.
> 	(vmulq_s8): Remove.
> 	(vmulq_n_s8): Remove.
> 	(vmulq_u16): Remove.
> 	(vmulq_n_u16): Remove.
> 	(vmulq_s16): Remove.
> 	(vmulq_n_s16): Remove.
> 	(vmulq_u32): Remove.
> 	(vmulq_n_u32): Remove.
> 	(vmulq_s32): Remove.
> 	(vmulq_n_s32): Remove.
> 	(vmulq_n_f16): Remove.
> 	(vmulq_f16): Remove.
> 	(vmulq_n_f32): Remove.
> 	(vmulq_f32): Remove.
> 	(vmulq_m_n_s8): Remove.
> 	(vmulq_m_n_s32): Remove.
> 	(vmulq_m_n_s16): Remove.
> 	(vmulq_m_n_u8): Remove.
> 	(vmulq_m_n_u32): Remove.
> 	(vmulq_m_n_u16): Remove.
> 	(vmulq_m_s8): Remove.
> 	(vmulq_m_s32): Remove.
> 	(vmulq_m_s16): Remove.
> 	(vmulq_m_u8): Remove.
> 	(vmulq_m_u32): Remove.
> 	(vmulq_m_u16): Remove.
> 	(vmulq_m_f32): Remove.
> 	(vmulq_m_f16): Remove.
> 	(vmulq_m_n_f32): Remove.
> 	(vmulq_m_n_f16): Remove.
> 	(vmulq_x_s8): Remove.
> 	(vmulq_x_s16): Remove.
> 	(vmulq_x_s32): Remove.
> 	(vmulq_x_n_s8): Remove.
> 	(vmulq_x_n_s16): Remove.
> 	(vmulq_x_n_s32): Remove.
> 	(vmulq_x_u8): Remove.
> 	(vmulq_x_u16): Remove.
> 	(vmulq_x_u32): Remove.
> 	(vmulq_x_n_u8): Remove.
> 	(vmulq_x_n_u16): Remove.
> 	(vmulq_x_n_u32): Remove.
> 	(vmulq_x_f16): Remove.
> 	(vmulq_x_f32): Remove.
> 	(vmulq_x_n_f16): Remove.
> 	(vmulq_x_n_f32): Remove.
> 	(__arm_vmulq_u8): Remove.
> 	(__arm_vmulq_n_u8): Remove.
> 	(__arm_vmulq_s8): Remove.
> 	(__arm_vmulq_n_s8): Remove.
> 	(__arm_vmulq_u16): Remove.
> 	(__arm_vmulq_n_u16): Remove.
> 	(__arm_vmulq_s16): Remove.
> 	(__arm_vmulq_n_s16): Remove.
> 	(__arm_vmulq_u32): Remove.
> 	(__arm_vmulq_n_u32): Remove.
> 	(__arm_vmulq_s32): Remove.
> 	(__arm_vmulq_n_s32): Remove.
> 	(__arm_vmulq_m_n_s8): Remove.
> 	(__arm_vmulq_m_n_s32): Remove.
> 	(__arm_vmulq_m_n_s16): Remove.
> 	(__arm_vmulq_m_n_u8): Remove.
> 	(__arm_vmulq_m_n_u32): Remove.
> 	(__arm_vmulq_m_n_u16): Remove.
> 	(__arm_vmulq_m_s8): Remove.
> 	(__arm_vmulq_m_s32): Remove.
> 	(__arm_vmulq_m_s16): Remove.
> 	(__arm_vmulq_m_u8): Remove.
> 	(__arm_vmulq_m_u32): Remove.
> 	(__arm_vmulq_m_u16): Remove.
> 	(__arm_vmulq_x_s8): Remove.
> 	(__arm_vmulq_x_s16): Remove.
> 	(__arm_vmulq_x_s32): Remove.
> 	(__arm_vmulq_x_n_s8): Remove.
> 	(__arm_vmulq_x_n_s16): Remove.
> 	(__arm_vmulq_x_n_s32): Remove.
> 	(__arm_vmulq_x_u8): Remove.
> 	(__arm_vmulq_x_u16): Remove.
> 	(__arm_vmulq_x_u32): Remove.
> 	(__arm_vmulq_x_n_u8): Remove.
> 	(__arm_vmulq_x_n_u16): Remove.
> 	(__arm_vmulq_x_n_u32): Remove.
> 	(__arm_vmulq_n_f16): Remove.
> 	(__arm_vmulq_f16): Remove.
> 	(__arm_vmulq_n_f32): Remove.
> 	(__arm_vmulq_f32): Remove.
> 	(__arm_vmulq_m_f32): Remove.
> 	(__arm_vmulq_m_f16): Remove.
> 	(__arm_vmulq_m_n_f32): Remove.
> 	(__arm_vmulq_m_n_f16): Remove.
> 	(__arm_vmulq_x_f16): Remove.
> 	(__arm_vmulq_x_f32): Remove.
> 	(__arm_vmulq_x_n_f16): Remove.
> 	(__arm_vmulq_x_n_f32): Remove.
> 	(__arm_vmulq): Remove.
> 	(__arm_vmulq_m): Remove.
> 	(__arm_vmulq_x): Remove.
> 	(vsubq): Remove.
> 	(vsubq_m): Remove.
> 	(vsubq_x): Remove.
> 	(vsubq_n_f16): Remove.
> 	(vsubq_n_f32): Remove.
> 	(vsubq_u8): Remove.
> 	(vsubq_n_u8): Remove.
> 	(vsubq_s8): Remove.
> 	(vsubq_n_s8): Remove.
> 	(vsubq_u16): Remove.
> 	(vsubq_n_u16): Remove.
> 	(vsubq_s16): Remove.
> 	(vsubq_n_s16): Remove.
> 	(vsubq_u32): Remove.
> 	(vsubq_n_u32): Remove.
> 	(vsubq_s32): Remove.
> 	(vsubq_n_s32): Remove.
> 	(vsubq_f16): Remove.
> 	(vsubq_f32): Remove.
> 	(vsubq_m_s8): Remove.
> 	(vsubq_m_u8): Remove.
> 	(vsubq_m_s16): Remove.
> 	(vsubq_m_u16): Remove.
> 	(vsubq_m_s32): Remove.
> 	(vsubq_m_u32): Remove.
> 	(vsubq_m_n_s8): Remove.
> 	(vsubq_m_n_s32): Remove.
> 	(vsubq_m_n_s16): Remove.
> 	(vsubq_m_n_u8): Remove.
> 	(vsubq_m_n_u32): Remove.
> 	(vsubq_m_n_u16): Remove.
> 	(vsubq_m_f32): Remove.
> 	(vsubq_m_f16): Remove.
> 	(vsubq_m_n_f32): Remove.
> 	(vsubq_m_n_f16): Remove.
> 	(vsubq_x_s8): Remove.
> 	(vsubq_x_s16): Remove.
> 	(vsubq_x_s32): Remove.
> 	(vsubq_x_n_s8): Remove.
> 	(vsubq_x_n_s16): Remove.
> 	(vsubq_x_n_s32): Remove.
> 	(vsubq_x_u8): Remove.
> 	(vsubq_x_u16): Remove.
> 	(vsubq_x_u32): Remove.
> 	(vsubq_x_n_u8): Remove.
> 	(vsubq_x_n_u16): Remove.
> 	(vsubq_x_n_u32): Remove.
> 	(vsubq_x_f16): Remove.
> 	(vsubq_x_f32): Remove.
> 	(vsubq_x_n_f16): Remove.
> 	(vsubq_x_n_f32): Remove.
> 	(__arm_vsubq_u8): Remove.
> 	(__arm_vsubq_n_u8): Remove.
> 	(__arm_vsubq_s8): Remove.
> 	(__arm_vsubq_n_s8): Remove.
> 	(__arm_vsubq_u16): Remove.
> 	(__arm_vsubq_n_u16): Remove.
> 	(__arm_vsubq_s16): Remove.
> 	(__arm_vsubq_n_s16): Remove.
> 	(__arm_vsubq_u32): Remove.
> 	(__arm_vsubq_n_u32): Remove.
> 	(__arm_vsubq_s32): Remove.
> 	(__arm_vsubq_n_s32): Remove.
> 	(__arm_vsubq_m_s8): Remove.
> 	(__arm_vsubq_m_u8): Remove.
> 	(__arm_vsubq_m_s16): Remove.
> 	(__arm_vsubq_m_u16): Remove.
> 	(__arm_vsubq_m_s32): Remove.
> 	(__arm_vsubq_m_u32): Remove.
> 	(__arm_vsubq_m_n_s8): Remove.
> 	(__arm_vsubq_m_n_s32): Remove.
> 	(__arm_vsubq_m_n_s16): Remove.
> 	(__arm_vsubq_m_n_u8): Remove.
> 	(__arm_vsubq_m_n_u32): Remove.
> 	(__arm_vsubq_m_n_u16): Remove.
> 	(__arm_vsubq_x_s8): Remove.
> 	(__arm_vsubq_x_s16): Remove.
> 	(__arm_vsubq_x_s32): Remove.
> 	(__arm_vsubq_x_n_s8): Remove.
> 	(__arm_vsubq_x_n_s16): Remove.
> 	(__arm_vsubq_x_n_s32): Remove.
> 	(__arm_vsubq_x_u8): Remove.
> 	(__arm_vsubq_x_u16): Remove.
> 	(__arm_vsubq_x_u32): Remove.
> 	(__arm_vsubq_x_n_u8): Remove.
> 	(__arm_vsubq_x_n_u16): Remove.
> 	(__arm_vsubq_x_n_u32): Remove.
> 	(__arm_vsubq_n_f16): Remove.
> 	(__arm_vsubq_n_f32): Remove.
> 	(__arm_vsubq_f16): Remove.
> 	(__arm_vsubq_f32): Remove.
> 	(__arm_vsubq_m_f32): Remove.
> 	(__arm_vsubq_m_f16): Remove.
> 	(__arm_vsubq_m_n_f32): Remove.
> 	(__arm_vsubq_m_n_f16): Remove.
> 	(__arm_vsubq_x_f16): Remove.
> 	(__arm_vsubq_x_f32): Remove.
> 	(__arm_vsubq_x_n_f16): Remove.
> 	(__arm_vsubq_x_n_f32): Remove.
> 	(__arm_vsubq): Remove.
> 	(__arm_vsubq_m): Remove.
> 	(__arm_vsubq_x): Remove.
> 	* config/arm/arm_mve_builtins.def (vsubq_u, vsubq_s, vsubq_f):
> 	Remove.
> 	(vmulq_u, vmulq_s, vmulq_f): Remove.
> 	* config/arm/mve.md (mve_vsubq_<supf><mode>): Remove.
> 	(mve_vmulq_<supf><mode>): Remove.

[snip]

> diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
> index 5167fbc6add..ccb3cf23304 100644
> --- a/gcc/config/arm/mve.md
> +++ b/gcc/config/arm/mve.md
> @@ -1353,18 +1353,6 @@ (define_insn "mve_vmulltq_int_<supf><mode>"
>  ;; [vmulq_u, vmulq_s])
>  ;; [vsubq_s, vsubq_u])
>  ;;
> -(define_insn "mve_vmulq_<supf><mode>"
> -  [
> -    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> -	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
> "w")
> -		       (match_operand:MVE_2 2 "s_register_operand" "w")]
> -	 VMULQ))
> -  ]
> -  "TARGET_HAVE_MVE"
> -  "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
> -  [(set_attr "type" "mve_move")
> -])
> -
>  (define_insn "mve_<mve_addsubmul>q<mode>"
>    [
>     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> @@ -1742,21 +1730,6 @@ (define_insn "mve_vshlq_r_<supf><mode>"
>    [(set_attr "type" "mve_move")
>  ])
> 
> -;;
> -;; [vsubq_s, vsubq_u])
> -;;
> -(define_insn "mve_vsubq_<supf><mode>"
> -  [
> -   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> -	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
> "w")
> -		       (match_operand:MVE_2 2 "s_register_operand" "w")]
> -	 VSUBQ))
> -  ]
> -  "TARGET_HAVE_MVE"
> -  "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
> -  [(set_attr "type" "mve_move")
> -])
> -

Just to make sure I understand correctly, are these patterns being removed because the new builtins are wired through the factored patterns in patch [07/22]?
If so, ok.
Thanks,
Kyrill

>  ;;
>  ;; [vabdq_f])
>  ;;
> --
> 2.34.1
  
Christophe Lyon May 3, 2023, 9:06 a.m. UTC | #2
On 5/2/23 18:31, Kyrylo Tkachov wrote:
> 
> 
>> -----Original Message-----
>> From: Christophe Lyon <christophe.lyon@arm.com>
>> Sent: Tuesday, April 18, 2023 2:46 PM
>> To: gcc-patches@gcc.gnu.org; Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>;
>> Richard Earnshaw <Richard.Earnshaw@arm.com>; Richard Sandiford
>> <Richard.Sandiford@arm.com>
>> Cc: Christophe Lyon <Christophe.Lyon@arm.com>
>> Subject: [PATCH 08/22] arm: [MVE intrinsics] rework vaddq vmulq vsubq
>>
>> Implement vaddq, vmulq, vsubq using the new MVE builtins framework.
>>
>> 2022-09-08  Christophe Lyon <christophe.lyon@arm.com>
>>
>> 	gcc/
>>
>> 	* config/arm/arm-mve-builtins-base.cc
>> (FUNCTION_WITH_RTX_M_N):
>> 	New.
>> 	(vaddq, vmulq, vsubq): New.
>> 	* config/arm/arm-mve-builtins-base.def (vaddq, vmulq, vsubq): New.
>> 	* config/arm/arm-mve-builtins-base.h (vaddq, vmulq, vsubq): New.
>> 	* config/arm/arm_mve.h (vaddq): Remove.
>> 	(vaddq_m): Remove.
>> 	(vaddq_x): Remove.
>> 	(vaddq_n_u8): Remove.
>> 	(vaddq_n_s8): Remove.
>> 	(vaddq_n_u16): Remove.
>> 	(vaddq_n_s16): Remove.
>> 	(vaddq_n_u32): Remove.
>> 	(vaddq_n_s32): Remove.
>> 	(vaddq_n_f16): Remove.
>> 	(vaddq_n_f32): Remove.
>> 	(vaddq_m_n_s8): Remove.
>> 	(vaddq_m_n_s32): Remove.
>> 	(vaddq_m_n_s16): Remove.
>> 	(vaddq_m_n_u8): Remove.
>> 	(vaddq_m_n_u32): Remove.
>> 	(vaddq_m_n_u16): Remove.
>> 	(vaddq_m_s8): Remove.
>> 	(vaddq_m_s32): Remove.
>> 	(vaddq_m_s16): Remove.
>> 	(vaddq_m_u8): Remove.
>> 	(vaddq_m_u32): Remove.
>> 	(vaddq_m_u16): Remove.
>> 	(vaddq_m_f32): Remove.
>> 	(vaddq_m_f16): Remove.
>> 	(vaddq_m_n_f32): Remove.
>> 	(vaddq_m_n_f16): Remove.
>> 	(vaddq_s8): Remove.
>> 	(vaddq_s16): Remove.
>> 	(vaddq_s32): Remove.
>> 	(vaddq_u8): Remove.
>> 	(vaddq_u16): Remove.
>> 	(vaddq_u32): Remove.
>> 	(vaddq_f16): Remove.
>> 	(vaddq_f32): Remove.
>> 	(vaddq_x_s8): Remove.
>> 	(vaddq_x_s16): Remove.
>> 	(vaddq_x_s32): Remove.
>> 	(vaddq_x_n_s8): Remove.
>> 	(vaddq_x_n_s16): Remove.
>> 	(vaddq_x_n_s32): Remove.
>> 	(vaddq_x_u8): Remove.
>> 	(vaddq_x_u16): Remove.
>> 	(vaddq_x_u32): Remove.
>> 	(vaddq_x_n_u8): Remove.
>> 	(vaddq_x_n_u16): Remove.
>> 	(vaddq_x_n_u32): Remove.
>> 	(vaddq_x_f16): Remove.
>> 	(vaddq_x_f32): Remove.
>> 	(vaddq_x_n_f16): Remove.
>> 	(vaddq_x_n_f32): Remove.
>> 	(__arm_vaddq_n_u8): Remove.
>> 	(__arm_vaddq_n_s8): Remove.
>> 	(__arm_vaddq_n_u16): Remove.
>> 	(__arm_vaddq_n_s16): Remove.
>> 	(__arm_vaddq_n_u32): Remove.
>> 	(__arm_vaddq_n_s32): Remove.
>> 	(__arm_vaddq_m_n_s8): Remove.
>> 	(__arm_vaddq_m_n_s32): Remove.
>> 	(__arm_vaddq_m_n_s16): Remove.
>> 	(__arm_vaddq_m_n_u8): Remove.
>> 	(__arm_vaddq_m_n_u32): Remove.
>> 	(__arm_vaddq_m_n_u16): Remove.
>> 	(__arm_vaddq_m_s8): Remove.
>> 	(__arm_vaddq_m_s32): Remove.
>> 	(__arm_vaddq_m_s16): Remove.
>> 	(__arm_vaddq_m_u8): Remove.
>> 	(__arm_vaddq_m_u32): Remove.
>> 	(__arm_vaddq_m_u16): Remove.
>> 	(__arm_vaddq_s8): Remove.
>> 	(__arm_vaddq_s16): Remove.
>> 	(__arm_vaddq_s32): Remove.
>> 	(__arm_vaddq_u8): Remove.
>> 	(__arm_vaddq_u16): Remove.
>> 	(__arm_vaddq_u32): Remove.
>> 	(__arm_vaddq_x_s8): Remove.
>> 	(__arm_vaddq_x_s16): Remove.
>> 	(__arm_vaddq_x_s32): Remove.
>> 	(__arm_vaddq_x_n_s8): Remove.
>> 	(__arm_vaddq_x_n_s16): Remove.
>> 	(__arm_vaddq_x_n_s32): Remove.
>> 	(__arm_vaddq_x_u8): Remove.
>> 	(__arm_vaddq_x_u16): Remove.
>> 	(__arm_vaddq_x_u32): Remove.
>> 	(__arm_vaddq_x_n_u8): Remove.
>> 	(__arm_vaddq_x_n_u16): Remove.
>> 	(__arm_vaddq_x_n_u32): Remove.
>> 	(__arm_vaddq_n_f16): Remove.
>> 	(__arm_vaddq_n_f32): Remove.
>> 	(__arm_vaddq_m_f32): Remove.
>> 	(__arm_vaddq_m_f16): Remove.
>> 	(__arm_vaddq_m_n_f32): Remove.
>> 	(__arm_vaddq_m_n_f16): Remove.
>> 	(__arm_vaddq_f16): Remove.
>> 	(__arm_vaddq_f32): Remove.
>> 	(__arm_vaddq_x_f16): Remove.
>> 	(__arm_vaddq_x_f32): Remove.
>> 	(__arm_vaddq_x_n_f16): Remove.
>> 	(__arm_vaddq_x_n_f32): Remove.
>> 	(__arm_vaddq): Remove.
>> 	(__arm_vaddq_m): Remove.
>> 	(__arm_vaddq_x): Remove.
>> 	(vmulq): Remove.
>> 	(vmulq_m): Remove.
>> 	(vmulq_x): Remove.
>> 	(vmulq_u8): Remove.
>> 	(vmulq_n_u8): Remove.
>> 	(vmulq_s8): Remove.
>> 	(vmulq_n_s8): Remove.
>> 	(vmulq_u16): Remove.
>> 	(vmulq_n_u16): Remove.
>> 	(vmulq_s16): Remove.
>> 	(vmulq_n_s16): Remove.
>> 	(vmulq_u32): Remove.
>> 	(vmulq_n_u32): Remove.
>> 	(vmulq_s32): Remove.
>> 	(vmulq_n_s32): Remove.
>> 	(vmulq_n_f16): Remove.
>> 	(vmulq_f16): Remove.
>> 	(vmulq_n_f32): Remove.
>> 	(vmulq_f32): Remove.
>> 	(vmulq_m_n_s8): Remove.
>> 	(vmulq_m_n_s32): Remove.
>> 	(vmulq_m_n_s16): Remove.
>> 	(vmulq_m_n_u8): Remove.
>> 	(vmulq_m_n_u32): Remove.
>> 	(vmulq_m_n_u16): Remove.
>> 	(vmulq_m_s8): Remove.
>> 	(vmulq_m_s32): Remove.
>> 	(vmulq_m_s16): Remove.
>> 	(vmulq_m_u8): Remove.
>> 	(vmulq_m_u32): Remove.
>> 	(vmulq_m_u16): Remove.
>> 	(vmulq_m_f32): Remove.
>> 	(vmulq_m_f16): Remove.
>> 	(vmulq_m_n_f32): Remove.
>> 	(vmulq_m_n_f16): Remove.
>> 	(vmulq_x_s8): Remove.
>> 	(vmulq_x_s16): Remove.
>> 	(vmulq_x_s32): Remove.
>> 	(vmulq_x_n_s8): Remove.
>> 	(vmulq_x_n_s16): Remove.
>> 	(vmulq_x_n_s32): Remove.
>> 	(vmulq_x_u8): Remove.
>> 	(vmulq_x_u16): Remove.
>> 	(vmulq_x_u32): Remove.
>> 	(vmulq_x_n_u8): Remove.
>> 	(vmulq_x_n_u16): Remove.
>> 	(vmulq_x_n_u32): Remove.
>> 	(vmulq_x_f16): Remove.
>> 	(vmulq_x_f32): Remove.
>> 	(vmulq_x_n_f16): Remove.
>> 	(vmulq_x_n_f32): Remove.
>> 	(__arm_vmulq_u8): Remove.
>> 	(__arm_vmulq_n_u8): Remove.
>> 	(__arm_vmulq_s8): Remove.
>> 	(__arm_vmulq_n_s8): Remove.
>> 	(__arm_vmulq_u16): Remove.
>> 	(__arm_vmulq_n_u16): Remove.
>> 	(__arm_vmulq_s16): Remove.
>> 	(__arm_vmulq_n_s16): Remove.
>> 	(__arm_vmulq_u32): Remove.
>> 	(__arm_vmulq_n_u32): Remove.
>> 	(__arm_vmulq_s32): Remove.
>> 	(__arm_vmulq_n_s32): Remove.
>> 	(__arm_vmulq_m_n_s8): Remove.
>> 	(__arm_vmulq_m_n_s32): Remove.
>> 	(__arm_vmulq_m_n_s16): Remove.
>> 	(__arm_vmulq_m_n_u8): Remove.
>> 	(__arm_vmulq_m_n_u32): Remove.
>> 	(__arm_vmulq_m_n_u16): Remove.
>> 	(__arm_vmulq_m_s8): Remove.
>> 	(__arm_vmulq_m_s32): Remove.
>> 	(__arm_vmulq_m_s16): Remove.
>> 	(__arm_vmulq_m_u8): Remove.
>> 	(__arm_vmulq_m_u32): Remove.
>> 	(__arm_vmulq_m_u16): Remove.
>> 	(__arm_vmulq_x_s8): Remove.
>> 	(__arm_vmulq_x_s16): Remove.
>> 	(__arm_vmulq_x_s32): Remove.
>> 	(__arm_vmulq_x_n_s8): Remove.
>> 	(__arm_vmulq_x_n_s16): Remove.
>> 	(__arm_vmulq_x_n_s32): Remove.
>> 	(__arm_vmulq_x_u8): Remove.
>> 	(__arm_vmulq_x_u16): Remove.
>> 	(__arm_vmulq_x_u32): Remove.
>> 	(__arm_vmulq_x_n_u8): Remove.
>> 	(__arm_vmulq_x_n_u16): Remove.
>> 	(__arm_vmulq_x_n_u32): Remove.
>> 	(__arm_vmulq_n_f16): Remove.
>> 	(__arm_vmulq_f16): Remove.
>> 	(__arm_vmulq_n_f32): Remove.
>> 	(__arm_vmulq_f32): Remove.
>> 	(__arm_vmulq_m_f32): Remove.
>> 	(__arm_vmulq_m_f16): Remove.
>> 	(__arm_vmulq_m_n_f32): Remove.
>> 	(__arm_vmulq_m_n_f16): Remove.
>> 	(__arm_vmulq_x_f16): Remove.
>> 	(__arm_vmulq_x_f32): Remove.
>> 	(__arm_vmulq_x_n_f16): Remove.
>> 	(__arm_vmulq_x_n_f32): Remove.
>> 	(__arm_vmulq): Remove.
>> 	(__arm_vmulq_m): Remove.
>> 	(__arm_vmulq_x): Remove.
>> 	(vsubq): Remove.
>> 	(vsubq_m): Remove.
>> 	(vsubq_x): Remove.
>> 	(vsubq_n_f16): Remove.
>> 	(vsubq_n_f32): Remove.
>> 	(vsubq_u8): Remove.
>> 	(vsubq_n_u8): Remove.
>> 	(vsubq_s8): Remove.
>> 	(vsubq_n_s8): Remove.
>> 	(vsubq_u16): Remove.
>> 	(vsubq_n_u16): Remove.
>> 	(vsubq_s16): Remove.
>> 	(vsubq_n_s16): Remove.
>> 	(vsubq_u32): Remove.
>> 	(vsubq_n_u32): Remove.
>> 	(vsubq_s32): Remove.
>> 	(vsubq_n_s32): Remove.
>> 	(vsubq_f16): Remove.
>> 	(vsubq_f32): Remove.
>> 	(vsubq_m_s8): Remove.
>> 	(vsubq_m_u8): Remove.
>> 	(vsubq_m_s16): Remove.
>> 	(vsubq_m_u16): Remove.
>> 	(vsubq_m_s32): Remove.
>> 	(vsubq_m_u32): Remove.
>> 	(vsubq_m_n_s8): Remove.
>> 	(vsubq_m_n_s32): Remove.
>> 	(vsubq_m_n_s16): Remove.
>> 	(vsubq_m_n_u8): Remove.
>> 	(vsubq_m_n_u32): Remove.
>> 	(vsubq_m_n_u16): Remove.
>> 	(vsubq_m_f32): Remove.
>> 	(vsubq_m_f16): Remove.
>> 	(vsubq_m_n_f32): Remove.
>> 	(vsubq_m_n_f16): Remove.
>> 	(vsubq_x_s8): Remove.
>> 	(vsubq_x_s16): Remove.
>> 	(vsubq_x_s32): Remove.
>> 	(vsubq_x_n_s8): Remove.
>> 	(vsubq_x_n_s16): Remove.
>> 	(vsubq_x_n_s32): Remove.
>> 	(vsubq_x_u8): Remove.
>> 	(vsubq_x_u16): Remove.
>> 	(vsubq_x_u32): Remove.
>> 	(vsubq_x_n_u8): Remove.
>> 	(vsubq_x_n_u16): Remove.
>> 	(vsubq_x_n_u32): Remove.
>> 	(vsubq_x_f16): Remove.
>> 	(vsubq_x_f32): Remove.
>> 	(vsubq_x_n_f16): Remove.
>> 	(vsubq_x_n_f32): Remove.
>> 	(__arm_vsubq_u8): Remove.
>> 	(__arm_vsubq_n_u8): Remove.
>> 	(__arm_vsubq_s8): Remove.
>> 	(__arm_vsubq_n_s8): Remove.
>> 	(__arm_vsubq_u16): Remove.
>> 	(__arm_vsubq_n_u16): Remove.
>> 	(__arm_vsubq_s16): Remove.
>> 	(__arm_vsubq_n_s16): Remove.
>> 	(__arm_vsubq_u32): Remove.
>> 	(__arm_vsubq_n_u32): Remove.
>> 	(__arm_vsubq_s32): Remove.
>> 	(__arm_vsubq_n_s32): Remove.
>> 	(__arm_vsubq_m_s8): Remove.
>> 	(__arm_vsubq_m_u8): Remove.
>> 	(__arm_vsubq_m_s16): Remove.
>> 	(__arm_vsubq_m_u16): Remove.
>> 	(__arm_vsubq_m_s32): Remove.
>> 	(__arm_vsubq_m_u32): Remove.
>> 	(__arm_vsubq_m_n_s8): Remove.
>> 	(__arm_vsubq_m_n_s32): Remove.
>> 	(__arm_vsubq_m_n_s16): Remove.
>> 	(__arm_vsubq_m_n_u8): Remove.
>> 	(__arm_vsubq_m_n_u32): Remove.
>> 	(__arm_vsubq_m_n_u16): Remove.
>> 	(__arm_vsubq_x_s8): Remove.
>> 	(__arm_vsubq_x_s16): Remove.
>> 	(__arm_vsubq_x_s32): Remove.
>> 	(__arm_vsubq_x_n_s8): Remove.
>> 	(__arm_vsubq_x_n_s16): Remove.
>> 	(__arm_vsubq_x_n_s32): Remove.
>> 	(__arm_vsubq_x_u8): Remove.
>> 	(__arm_vsubq_x_u16): Remove.
>> 	(__arm_vsubq_x_u32): Remove.
>> 	(__arm_vsubq_x_n_u8): Remove.
>> 	(__arm_vsubq_x_n_u16): Remove.
>> 	(__arm_vsubq_x_n_u32): Remove.
>> 	(__arm_vsubq_n_f16): Remove.
>> 	(__arm_vsubq_n_f32): Remove.
>> 	(__arm_vsubq_f16): Remove.
>> 	(__arm_vsubq_f32): Remove.
>> 	(__arm_vsubq_m_f32): Remove.
>> 	(__arm_vsubq_m_f16): Remove.
>> 	(__arm_vsubq_m_n_f32): Remove.
>> 	(__arm_vsubq_m_n_f16): Remove.
>> 	(__arm_vsubq_x_f16): Remove.
>> 	(__arm_vsubq_x_f32): Remove.
>> 	(__arm_vsubq_x_n_f16): Remove.
>> 	(__arm_vsubq_x_n_f32): Remove.
>> 	(__arm_vsubq): Remove.
>> 	(__arm_vsubq_m): Remove.
>> 	(__arm_vsubq_x): Remove.
>> 	* config/arm/arm_mve_builtins.def (vsubq_u, vsubq_s, vsubq_f):
>> 	Remove.
>> 	(vmulq_u, vmulq_s, vmulq_f): Remove.
>> 	* config/arm/mve.md (mve_vsubq_<supf><mode>): Remove.
>> 	(mve_vmulq_<supf><mode>): Remove.
> 
> [snip]
> 
>> diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
>> index 5167fbc6add..ccb3cf23304 100644
>> --- a/gcc/config/arm/mve.md
>> +++ b/gcc/config/arm/mve.md
>> @@ -1353,18 +1353,6 @@ (define_insn "mve_vmulltq_int_<supf><mode>"
>>   ;; [vmulq_u, vmulq_s])
>>   ;; [vsubq_s, vsubq_u])
>>   ;;
>> -(define_insn "mve_vmulq_<supf><mode>"
>> -  [
>> -    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
>> -	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
>> "w")
>> -		       (match_operand:MVE_2 2 "s_register_operand" "w")]
>> -	 VMULQ))
>> -  ]
>> -  "TARGET_HAVE_MVE"
>> -  "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
>> -  [(set_attr "type" "mve_move")
>> -])
>> -
>>   (define_insn "mve_<mve_addsubmul>q<mode>"
>>     [
>>      (set (match_operand:MVE_2 0 "s_register_operand" "=w")
>> @@ -1742,21 +1730,6 @@ (define_insn "mve_vshlq_r_<supf><mode>"
>>     [(set_attr "type" "mve_move")
>>   ])
>>
>> -;;
>> -;; [vsubq_s, vsubq_u])
>> -;;
>> -(define_insn "mve_vsubq_<supf><mode>"
>> -  [
>> -   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
>> -	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
>> "w")
>> -		       (match_operand:MVE_2 2 "s_register_operand" "w")]
>> -	 VSUBQ))
>> -  ]
>> -  "TARGET_HAVE_MVE"
>> -  "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
>> -  [(set_attr "type" "mve_move")
>> -])
>> -
> 
> Just to make sure I understand correctly, are these patterns being removed because the new builtins are wired through the factored patterns in patch [07/22]?
> If so, ok.

Yes. In patch 07/22, we introduce mve_<mve_addsubmul>q<mode> which uses 
standard RTX codes, instead of unspecs.
With this patch 08/22, the builtins now rely on the RTX codes, so the 
unspec-based patterns as useless (as was already the case for "add").

Christophe

> Thanks,
> Kyrill
> 
>>   ;;
>>   ;; [vabdq_f])
>>   ;;
>> --
>> 2.34.1
>
  

Patch

diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-mve-builtins-base.cc
index 02a3b23865c..48b09bffd0c 100644
--- a/gcc/config/arm/arm-mve-builtins-base.cc
+++ b/gcc/config/arm/arm-mve-builtins-base.cc
@@ -82,7 +82,18 @@  class vuninitializedq_impl : public quiet<function_base>
 
 namespace arm_mve {
 
+  /* Helper for builtins with RTX codes, _m predicated and _n overrides.  */
+#define FUNCTION_WITH_RTX_M_N(NAME, RTX, UNSPEC) FUNCTION		\
+  (NAME, unspec_based_mve_function_exact_insn,				\
+   (RTX, RTX, RTX,							\
+    UNSPEC##_N_S, UNSPEC##_N_U, UNSPEC##_N_F,				\
+    UNSPEC##_M_S, UNSPEC##_M_U, UNSPEC##_M_F,				\
+    UNSPEC##_M_N_S, UNSPEC##_M_N_U, UNSPEC##_M_N_F))
+
+FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ)
+FUNCTION_WITH_RTX_M_N (vmulq, MULT, VMULQ)
 FUNCTION (vreinterpretq, vreinterpretq_impl,)
+FUNCTION_WITH_RTX_M_N (vsubq, MINUS, VSUBQ)
 FUNCTION (vuninitializedq, vuninitializedq_impl,)
 
 } /* end namespace arm_mve */
diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-mve-builtins-base.def
index f669642a259..624558c08b2 100644
--- a/gcc/config/arm/arm-mve-builtins-base.def
+++ b/gcc/config/arm/arm-mve-builtins-base.def
@@ -18,11 +18,17 @@ 
    <http://www.gnu.org/licenses/>.  */
 
 #define REQUIRES_FLOAT false
+DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none)
+DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_integer, mx_or_none)
 DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_integer, none)
+DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_integer, mx_or_none)
 DEF_MVE_FUNCTION (vuninitializedq, inherent, all_integer_with_64, none)
 #undef REQUIRES_FLOAT
 
 #define REQUIRES_FLOAT true
+DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_float, mx_or_none)
+DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_float, mx_or_none)
 DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_float, none)
+DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_float, mx_or_none)
 DEF_MVE_FUNCTION (vuninitializedq, inherent, all_float, none)
 #undef REQUIRES_FLOAT
diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-mve-builtins-base.h
index ec309cbe572..30f8549c495 100644
--- a/gcc/config/arm/arm-mve-builtins-base.h
+++ b/gcc/config/arm/arm-mve-builtins-base.h
@@ -23,7 +23,10 @@ 
 namespace arm_mve {
 namespace functions {
 
+extern const function_base *const vaddq;
+extern const function_base *const vmulq;
 extern const function_base *const vreinterpretq;
+extern const function_base *const vsubq;
 extern const function_base *const vuninitializedq;
 
 } /* end namespace arm_mve::functions */
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 5dc5ecef134..42a1af2ae15 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -61,14 +61,12 @@ 
 #define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p)
 #define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b)
 #define vshlq(__a, __b) __arm_vshlq(__a, __b)
-#define vsubq(__a, __b) __arm_vsubq(__a, __b)
 #define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b)
 #define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b)
 #define vqsubq(__a, __b) __arm_vqsubq(__a, __b)
 #define vqaddq(__a, __b) __arm_vqaddq(__a, __b)
 #define vorrq(__a, __b) __arm_vorrq(__a, __b)
 #define vornq(__a, __b) __arm_vornq(__a, __b)
-#define vmulq(__a, __b) __arm_vmulq(__a, __b)
 #define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b)
 #define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b)
 #define vmulhq(__a, __b) __arm_vmulhq(__a, __b)
@@ -89,7 +87,6 @@ 
 #define vandq(__a, __b) __arm_vandq(__a, __b)
 #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
 #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
-#define vaddq(__a, __b) __arm_vaddq(__a, __b)
 #define vabdq(__a, __b) __arm_vabdq(__a, __b)
 #define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
 #define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
@@ -235,12 +232,10 @@ 
 #define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p)
 #define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p)
 #define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p)
-#define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p)
 #define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p)
 #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
 #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p)
 #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p)
-#define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p)
 #define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p)
 #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p)
 #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p)
@@ -262,7 +257,6 @@ 
 #define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p)
 #define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p)
 #define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p)
-#define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p)
 #define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p)
 #define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p)
 #define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p)
@@ -394,7 +388,6 @@ 
 #define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p)
 #define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p)
 #define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p)
-#define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p)
 #define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p)
 #define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p)
 #define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p)
@@ -403,8 +396,6 @@ 
 #define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p)
 #define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p)
 #define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p)
-#define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p)
-#define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p)
 #define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p)
 #define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p)
 #define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p)
@@ -651,8 +642,6 @@ 
 #define vctp64q(__a) __arm_vctp64q(__a)
 #define vctp8q(__a) __arm_vctp8q(__a)
 #define vpnot(__a) __arm_vpnot(__a)
-#define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b)
-#define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b)
 #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b)
 #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b)
 #define vcvtq_n_f16_s16(__a,  __imm6) __arm_vcvtq_n_f16_s16(__a,  __imm6)
@@ -693,8 +682,6 @@ 
 #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b)
 #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b)
 #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b)
-#define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b)
-#define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b)
 #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b)
 #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b)
 #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b)
@@ -703,8 +690,6 @@ 
 #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b)
 #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b)
 #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b)
-#define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b)
-#define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b)
 #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b)
 #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b)
 #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b)
@@ -731,7 +716,6 @@ 
 #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
 #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
 #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
-#define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b)
 #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
 #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
 #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
@@ -761,8 +745,6 @@ 
 #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b)
 #define vqshluq_n_s8(__a,  __imm) __arm_vqshluq_n_s8(__a,  __imm)
 #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
-#define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b)
-#define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b)
 #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
 #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
 #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
@@ -782,8 +764,6 @@ 
 #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b)
 #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b)
 #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
-#define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b)
-#define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b)
 #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b)
 #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b)
 #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b)
@@ -808,13 +788,10 @@ 
 #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
 #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
 #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
-#define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b)
 #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
 #define vshlq_n_s8(__a,  __imm) __arm_vshlq_n_s8(__a,  __imm)
 #define vrshrq_n_s8(__a,  __imm) __arm_vrshrq_n_s8(__a,  __imm)
 #define vqshlq_n_s8(__a,  __imm) __arm_vqshlq_n_s8(__a,  __imm)
-#define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b)
-#define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b)
 #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b)
 #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b)
 #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b)
@@ -823,8 +800,6 @@ 
 #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b)
 #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b)
 #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b)
-#define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b)
-#define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b)
 #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b)
 #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b)
 #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b)
@@ -851,7 +826,6 @@ 
 #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
 #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
 #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
-#define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b)
 #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
 #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
 #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
@@ -881,8 +855,6 @@ 
 #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b)
 #define vqshluq_n_s16(__a,  __imm) __arm_vqshluq_n_s16(__a,  __imm)
 #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
-#define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b)
-#define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b)
 #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
 #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
 #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
@@ -902,8 +874,6 @@ 
 #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b)
 #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b)
 #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
-#define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b)
-#define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b)
 #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b)
 #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b)
 #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b)
@@ -928,13 +898,10 @@ 
 #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
 #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
 #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
-#define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b)
 #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
 #define vshlq_n_s16(__a,  __imm) __arm_vshlq_n_s16(__a,  __imm)
 #define vrshrq_n_s16(__a,  __imm) __arm_vrshrq_n_s16(__a,  __imm)
 #define vqshlq_n_s16(__a,  __imm) __arm_vqshlq_n_s16(__a,  __imm)
-#define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b)
-#define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b)
 #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b)
 #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b)
 #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b)
@@ -943,8 +910,6 @@ 
 #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b)
 #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b)
 #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b)
-#define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b)
-#define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b)
 #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b)
 #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b)
 #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b)
@@ -971,7 +936,6 @@ 
 #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
 #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
 #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
-#define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b)
 #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
 #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
 #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
@@ -1001,8 +965,6 @@ 
 #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b)
 #define vqshluq_n_s32(__a,  __imm) __arm_vqshluq_n_s32(__a,  __imm)
 #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
-#define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b)
-#define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b)
 #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
 #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
 #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
@@ -1022,8 +984,6 @@ 
 #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b)
 #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b)
 #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
-#define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b)
-#define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b)
 #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b)
 #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b)
 #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b)
@@ -1048,7 +1008,6 @@ 
 #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
 #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
 #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
-#define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b)
 #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
 #define vshlq_n_s32(__a,  __imm) __arm_vshlq_n_s32(__a,  __imm)
 #define vrshrq_n_s32(__a,  __imm) __arm_vrshrq_n_s32(__a,  __imm)
@@ -1078,7 +1037,6 @@ 
 #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b)
 #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b)
 #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b)
-#define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b)
 #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b)
 #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b)
 #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b)
@@ -1087,8 +1045,6 @@ 
 #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b)
 #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b)
 #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b)
-#define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b)
-#define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b)
 #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b)
 #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b)
 #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b)
@@ -1112,7 +1068,6 @@ 
 #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
 #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
 #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
-#define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b)
 #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
 #define vshlltq_n_s8(__a,  __imm) __arm_vshlltq_n_s8(__a,  __imm)
 #define vshllbq_n_s8(__a,  __imm) __arm_vshllbq_n_s8(__a,  __imm)
@@ -1143,7 +1098,6 @@ 
 #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b)
 #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b)
 #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b)
-#define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b)
 #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b)
 #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b)
 #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b)
@@ -1152,8 +1106,6 @@ 
 #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b)
 #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b)
 #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b)
-#define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b)
-#define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b)
 #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b)
 #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b)
 #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b)
@@ -1177,7 +1129,6 @@ 
 #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
 #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
 #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
-#define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b)
 #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
 #define vshlltq_n_s16(__a,  __imm) __arm_vshlltq_n_s16(__a,  __imm)
 #define vshllbq_n_s16(__a,  __imm) __arm_vshllbq_n_s16(__a,  __imm)
@@ -1681,34 +1632,28 @@ 
 #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p)
 #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p)
 #define vsriq_m_n_s8(__a, __b,  __imm, __p) __arm_vsriq_m_n_s8(__a, __b,  __imm, __p)
-#define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p)
 #define vcvtq_m_n_f16_u16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a,  __imm6, __p)
 #define vqshluq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a,  __imm, __p)
 #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p)
 #define vsriq_m_n_u8(__a, __b,  __imm, __p) __arm_vsriq_m_n_u8(__a, __b,  __imm, __p)
 #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p)
-#define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p)
 #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p)
 #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p)
 #define vcvtq_m_n_f16_s16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a,  __imm6, __p)
 #define vsriq_m_n_s16(__a, __b,  __imm, __p) __arm_vsriq_m_n_s16(__a, __b,  __imm, __p)
-#define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p)
 #define vcvtq_m_n_f32_u32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a,  __imm6, __p)
 #define vqshluq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a,  __imm, __p)
 #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p)
 #define vsriq_m_n_u16(__a, __b,  __imm, __p) __arm_vsriq_m_n_u16(__a, __b,  __imm, __p)
 #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p)
-#define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p)
 #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p)
 #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p)
 #define vcvtq_m_n_f32_s32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a,  __imm6, __p)
 #define vsriq_m_n_s32(__a, __b,  __imm, __p) __arm_vsriq_m_n_s32(__a, __b,  __imm, __p)
-#define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p)
 #define vqshluq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a,  __imm, __p)
 #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p)
 #define vsriq_m_n_u32(__a, __b,  __imm, __p) __arm_vsriq_m_n_u32(__a, __b,  __imm, __p)
 #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p)
-#define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p)
 #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p)
 #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p)
 #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p)
@@ -1717,18 +1662,6 @@ 
 #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p)
 #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p)
 #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p)
-#define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p)
-#define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p)
-#define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p)
-#define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p)
-#define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p)
-#define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p)
-#define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p)
-#define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p)
-#define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p)
-#define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p)
-#define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p)
-#define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p)
 #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p)
 #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p)
 #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p)
@@ -1852,18 +1785,6 @@ 
 #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p)
 #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p)
 #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p)
-#define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p)
-#define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p)
-#define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p)
-#define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p)
-#define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p)
-#define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p)
-#define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p)
-#define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p)
-#define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p)
-#define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p)
-#define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p)
-#define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p)
 #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p)
 #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p)
 #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p)
@@ -2008,12 +1929,6 @@ 
 #define vsliq_m_n_u8(__a, __b,  __imm, __p) __arm_vsliq_m_n_u8(__a, __b,  __imm, __p)
 #define vsliq_m_n_u32(__a, __b,  __imm, __p) __arm_vsliq_m_n_u32(__a, __b,  __imm, __p)
 #define vsliq_m_n_u16(__a, __b,  __imm, __p) __arm_vsliq_m_n_u16(__a, __b,  __imm, __p)
-#define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p)
-#define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p)
-#define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p)
-#define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p)
-#define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p)
-#define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p)
 #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p)
 #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p)
 #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p)
@@ -2091,10 +2006,6 @@ 
 #define vshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vshrntq_m_n_u16(__a, __b,  __imm, __p)
 #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p)
 #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p)
-#define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p)
-#define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p)
-#define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p)
-#define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p)
 #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p)
 #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p)
 #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p)
@@ -2139,18 +2050,10 @@ 
 #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p)
 #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p)
 #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p)
-#define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p)
-#define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p)
-#define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p)
-#define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p)
 #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p)
 #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p)
 #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p)
 #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p)
-#define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p)
-#define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p)
-#define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p)
-#define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p)
 #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value)
 #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value)
 #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value)
@@ -2347,14 +2250,6 @@ 
 #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
 #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
 #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
-#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
-#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
-#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
-#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
-#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
-#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
-#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
-#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
 #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
 #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
 #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
@@ -2484,18 +2379,6 @@ 
 #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p)
 #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p)
 #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p)
-#define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p)
-#define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p)
-#define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p)
-#define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p)
-#define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p)
-#define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p)
-#define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p)
-#define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p)
-#define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p)
-#define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p)
-#define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p)
-#define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p)
 #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p)
 #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p)
 #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p)
@@ -2530,30 +2413,6 @@ 
 #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p)
 #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p)
 #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p)
-#define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p)
-#define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p)
-#define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p)
-#define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p)
-#define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p)
-#define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p)
-#define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p)
-#define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p)
-#define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p)
-#define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p)
-#define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p)
-#define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p)
-#define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p)
-#define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p)
-#define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p)
-#define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p)
-#define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p)
-#define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p)
-#define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p)
-#define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p)
-#define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p)
-#define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p)
-#define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p)
-#define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p)
 #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p)
 #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p)
 #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p)
@@ -2722,20 +2581,8 @@ 
 #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p)
 #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p)
 #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p)
-#define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p)
-#define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p)
-#define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p)
-#define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p)
 #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p)
 #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p)
-#define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p)
-#define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p)
-#define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p)
-#define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p)
-#define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p)
-#define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p)
-#define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p)
-#define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p)
 #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p)
 #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p)
 #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p)
@@ -3659,19 +3506,6 @@  __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b)
 {
   return __builtin_mve_vshlq_uv4si (__a, __b);
 }
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
-  return __builtin_mve_vsubq_uv16qi (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b)
-{
-  return __builtin_mve_vsubq_n_uv16qi (__a, __b);
-}
 
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -3729,20 +3563,6 @@  __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b)
   return __builtin_mve_vornq_uv16qi (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
-  return __builtin_mve_vmulq_uv16qi (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b)
-{
-  return __builtin_mve_vmulq_n_uv16qi (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b)
@@ -3927,13 +3747,6 @@  __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b)
   return __builtin_mve_vaddvaq_uv16qi (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b)
-{
-  return __builtin_mve_vaddq_n_uv16qi (__a, __b);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
@@ -4137,20 +3950,6 @@  __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p)
   return __builtin_mve_vaddvq_p_sv16qi (__a, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_s8 (int8x16_t __a, int8x16_t __b)
-{
-  return __builtin_mve_vsubq_sv16qi (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_s8 (int8x16_t __a, int8_t __b)
-{
-  return __builtin_mve_vsubq_n_sv16qi (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
@@ -4284,20 +4083,6 @@  __arm_vornq_s8 (int8x16_t __a, int8x16_t __b)
   return __builtin_mve_vornq_sv16qi (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_s8 (int8x16_t __a, int8x16_t __b)
-{
-  return __builtin_mve_vmulq_sv16qi (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_s8 (int8x16_t __a, int8_t __b)
-{
-  return __builtin_mve_vmulq_n_sv16qi (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b)
@@ -4466,13 +4251,6 @@  __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
   return __builtin_mve_vaddvaq_sv16qi (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_s8 (int8x16_t __a, int8_t __b)
-{
-  return __builtin_mve_vaddq_n_sv16qi (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b)
@@ -4501,20 +4279,6 @@  __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm)
   return __builtin_mve_vqshlq_n_sv16qi (__a, __imm);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
-  return __builtin_mve_vsubq_uv8hi (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b)
-{
-  return __builtin_mve_vsubq_n_uv8hi (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
@@ -4571,20 +4335,6 @@  __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b)
   return __builtin_mve_vornq_uv8hi (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
-  return __builtin_mve_vmulq_uv8hi (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
-{
-  return __builtin_mve_vmulq_n_uv8hi (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b)
@@ -4769,13 +4519,6 @@  __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b)
   return __builtin_mve_vaddvaq_uv8hi (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b)
-{
-  return __builtin_mve_vaddq_n_uv8hi (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
@@ -4979,20 +4722,6 @@  __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p)
   return __builtin_mve_vaddvq_p_sv8hi (__a, __p);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_s16 (int16x8_t __a, int16x8_t __b)
-{
-  return __builtin_mve_vsubq_sv8hi (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_s16 (int16x8_t __a, int16_t __b)
-{
-  return __builtin_mve_vsubq_n_sv8hi (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
@@ -5126,20 +4855,6 @@  __arm_vornq_s16 (int16x8_t __a, int16x8_t __b)
   return __builtin_mve_vornq_sv8hi (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_s16 (int16x8_t __a, int16x8_t __b)
-{
-  return __builtin_mve_vmulq_sv8hi (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_s16 (int16x8_t __a, int16_t __b)
-{
-  return __builtin_mve_vmulq_n_sv8hi (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b)
@@ -5308,13 +5023,6 @@  __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
   return __builtin_mve_vaddvaq_sv8hi (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_s16 (int16x8_t __a, int16_t __b)
-{
-  return __builtin_mve_vaddq_n_sv8hi (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b)
@@ -5343,20 +5051,6 @@  __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm)
   return __builtin_mve_vqshlq_n_sv8hi (__a, __imm);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
-  return __builtin_mve_vsubq_uv4si (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b)
-{
-  return __builtin_mve_vsubq_n_uv4si (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
@@ -5413,20 +5107,6 @@  __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b)
   return __builtin_mve_vornq_uv4si (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
-  return __builtin_mve_vmulq_uv4si (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
-{
-  return __builtin_mve_vmulq_n_uv4si (__a, __b);
-}
-
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b)
@@ -5611,13 +5291,6 @@  __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b)
   return __builtin_mve_vaddvaq_uv4si (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b)
-{
-  return __builtin_mve_vaddq_n_uv4si (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
@@ -5821,20 +5494,6 @@  __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
   return __builtin_mve_vaddvq_p_sv4si (__a, __p);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_s32 (int32x4_t __a, int32x4_t __b)
-{
-  return __builtin_mve_vsubq_sv4si (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_s32 (int32x4_t __a, int32_t __b)
-{
-  return __builtin_mve_vsubq_n_sv4si (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
@@ -5968,20 +5627,6 @@  __arm_vornq_s32 (int32x4_t __a, int32x4_t __b)
   return __builtin_mve_vornq_sv4si (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_s32 (int32x4_t __a, int32x4_t __b)
-{
-  return __builtin_mve_vmulq_sv4si (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_s32 (int32x4_t __a, int32_t __b)
-{
-  return __builtin_mve_vmulq_n_sv4si (__a, __b);
-}
-
 __extension__ extern __inline int64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b)
@@ -6150,13 +5795,6 @@  __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
   return __builtin_mve_vaddvaq_sv4si (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_s32 (int32x4_t __a, int32_t __b)
-{
-  return __builtin_mve_vaddq_n_sv4si (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b)
@@ -9355,13 +8993,6 @@  __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t
   return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -9390,13 +9021,6 @@  __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred
   return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
@@ -9418,13 +9042,6 @@  __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t
   return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
@@ -9453,13 +9070,6 @@  __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pre
   return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
@@ -9481,13 +9091,6 @@  __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t
   return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
@@ -9516,13 +9119,6 @@  __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pre
   return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
@@ -9579,90 +9175,6 @@  __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pr
   return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -10524,90 +10036,6 @@  __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b,
   return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -11616,48 +11044,6 @@  __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16
   return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int64_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
@@ -13333,48 +12719,6 @@  __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset,
   __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
-  return __a + __b;
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
@@ -14325,90 +13669,6 @@  __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
   return __builtin_mve_vabsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
@@ -14647,174 +13907,6 @@  __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
   return __builtin_mve_vmulltq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -16970,20 +16062,6 @@  __arm_vcvtmq_s32_f32 (float32x4_t __a)
   return __builtin_mve_vcvtmq_sv4si (__a);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_f16 (float16x8_t __a, float16_t __b)
-{
-  return __builtin_mve_vsubq_n_fv8hf (__a, __b);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_n_f32 (float32x4_t __a, float32_t __b)
-{
-  return __builtin_mve_vsubq_n_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b)
@@ -17152,13 +16230,6 @@  __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b)
   return __builtin_mve_vcmpeqq_fv8hf (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_f16 (float16x8_t __a, float16x8_t __b)
-{
-  return __builtin_mve_vsubq_fv8hf (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b)
@@ -17173,20 +16244,6 @@  __arm_vornq_f16 (float16x8_t __a, float16x8_t __b)
   return __builtin_mve_vornq_fv8hf (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_f16 (float16x8_t __a, float16_t __b)
-{
-  return __builtin_mve_vmulq_n_fv8hf (__a, __b);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_f16 (float16x8_t __a, float16x8_t __b)
-{
-  return __builtin_mve_vmulq_fv8hf (__a, __b);
-}
-
 __extension__ extern __inline float16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b)
@@ -17306,13 +16363,6 @@  __arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
   return __builtin_mve_vandq_fv8hf (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_f16 (float16x8_t __a, float16_t __b)
-{
-  return __builtin_mve_vaddq_n_fv8hf (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
@@ -17404,13 +16454,6 @@  __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b)
   return __builtin_mve_vcmpeqq_fv4sf (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_f32 (float32x4_t __a, float32x4_t __b)
-{
-  return __builtin_mve_vsubq_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b)
@@ -17425,20 +16468,6 @@  __arm_vornq_f32 (float32x4_t __a, float32x4_t __b)
   return __builtin_mve_vornq_fv4sf (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_n_f32 (float32x4_t __a, float32_t __b)
-{
-  return __builtin_mve_vmulq_n_fv4sf (__a, __b);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_f32 (float32x4_t __a, float32x4_t __b)
-{
-  return __builtin_mve_vmulq_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b)
@@ -17558,13 +16587,6 @@  __arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
   return __builtin_mve_vandq_fv4sf (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_n_f32 (float32x4_t __a, float32_t __b)
-{
-  return __builtin_mve_vaddq_n_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
@@ -18350,34 +17372,6 @@  __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve
   return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -18686,34 +17680,6 @@  __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, m
   return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -18742,34 +17708,6 @@  __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve
   return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vld1q_f32 (float32_t const * __base)
@@ -18994,20 +17932,6 @@  __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs
   __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
-{
-  return __a + __b;
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
-{
-  return __a + __b;
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset)
@@ -19112,34 +18036,6 @@  __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p)
   return __builtin_mve_vabsq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vaddq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p)
@@ -19154,62 +18050,6 @@  __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p)
   return __builtin_mve_vnegq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vmulq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vsubq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -20448,20 +19288,6 @@  __arm_vshlq (uint32x4_t __a, int32x4_t __b)
  return __arm_vshlq_u32 (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint8x16_t __a, uint8x16_t __b)
-{
- return __arm_vsubq_u8 (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint8x16_t __a, uint8_t __b)
-{
- return __arm_vsubq_n_u8 (__a, __b);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vrmulhq (uint8x16_t __a, uint8x16_t __b)
@@ -20518,20 +19344,6 @@  __arm_vornq (uint8x16_t __a, uint8x16_t __b)
  return __arm_vornq_u8 (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint8x16_t __a, uint8x16_t __b)
-{
- return __arm_vmulq_u8 (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint8x16_t __a, uint8_t __b)
-{
- return __arm_vmulq_n_u8 (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b)
@@ -20714,13 +19526,6 @@  __arm_vaddvaq (uint32_t __a, uint8x16_t __b)
  return __arm_vaddvaq_u8 (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint8x16_t __a, uint8_t __b)
-{
- return __arm_vaddq_n_u8 (__a, __b);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (uint8x16_t __a, uint8x16_t __b)
@@ -20924,20 +19729,6 @@  __arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p)
  return __arm_vaddvq_p_s8 (__a, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vsubq_s8 (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int8x16_t __a, int8_t __b)
-{
- return __arm_vsubq_n_s8 (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r (int8x16_t __a, int32_t __b)
@@ -21071,20 +19862,6 @@  __arm_vornq (int8x16_t __a, int8x16_t __b)
  return __arm_vornq_s8 (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vmulq_s8 (__a, __b);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int8x16_t __a, int8_t __b)
-{
- return __arm_vmulq_n_s8 (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (int8x16_t __a, int8x16_t __b)
@@ -21253,13 +20030,6 @@  __arm_vaddvaq (int32_t __a, int8x16_t __b)
  return __arm_vaddvaq_s8 (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int8x16_t __a, int8_t __b)
-{
- return __arm_vaddq_n_s8 (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (int8x16_t __a, int8x16_t __b)
@@ -21288,20 +20058,6 @@  __arm_vqshlq_n (int8x16_t __a, const int __imm)
  return __arm_vqshlq_n_s8 (__a, __imm);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_vsubq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint16x8_t __a, uint16_t __b)
-{
- return __arm_vsubq_n_u16 (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vrmulhq (uint16x8_t __a, uint16x8_t __b)
@@ -21358,20 +20114,6 @@  __arm_vornq (uint16x8_t __a, uint16x8_t __b)
  return __arm_vornq_u16 (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_vmulq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint16x8_t __a, uint16_t __b)
-{
- return __arm_vmulq_n_u16 (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b)
@@ -21554,13 +20296,6 @@  __arm_vaddvaq (uint32_t __a, uint16x8_t __b)
  return __arm_vaddvaq_u16 (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint16x8_t __a, uint16_t __b)
-{
- return __arm_vaddq_n_u16 (__a, __b);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (uint16x8_t __a, uint16x8_t __b)
@@ -21764,20 +20499,6 @@  __arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p)
  return __arm_vaddvq_p_s16 (__a, __p);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vsubq_s16 (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int16x8_t __a, int16_t __b)
-{
- return __arm_vsubq_n_s16 (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r (int16x8_t __a, int32_t __b)
@@ -21911,20 +20632,6 @@  __arm_vornq (int16x8_t __a, int16x8_t __b)
  return __arm_vornq_s16 (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vmulq_s16 (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int16x8_t __a, int16_t __b)
-{
- return __arm_vmulq_n_s16 (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (int16x8_t __a, int16x8_t __b)
@@ -22093,13 +20800,6 @@  __arm_vaddvaq (int32_t __a, int16x8_t __b)
  return __arm_vaddvaq_s16 (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int16x8_t __a, int16_t __b)
-{
- return __arm_vaddq_n_s16 (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (int16x8_t __a, int16x8_t __b)
@@ -22128,20 +20828,6 @@  __arm_vqshlq_n (int16x8_t __a, const int __imm)
  return __arm_vqshlq_n_s16 (__a, __imm);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint32x4_t __a, uint32x4_t __b)
-{
- return __arm_vsubq_u32 (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (uint32x4_t __a, uint32_t __b)
-{
- return __arm_vsubq_n_u32 (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vrmulhq (uint32x4_t __a, uint32x4_t __b)
@@ -22198,20 +20884,6 @@  __arm_vornq (uint32x4_t __a, uint32x4_t __b)
  return __arm_vornq_u32 (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint32x4_t __a, uint32x4_t __b)
-{
- return __arm_vmulq_u32 (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (uint32x4_t __a, uint32_t __b)
-{
- return __arm_vmulq_n_u32 (__a, __b);
-}
-
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b)
@@ -22394,13 +21066,6 @@  __arm_vaddvaq (uint32_t __a, uint32x4_t __b)
  return __arm_vaddvaq_u32 (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint32x4_t __a, uint32_t __b)
-{
- return __arm_vaddq_n_u32 (__a, __b);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (uint32x4_t __a, uint32x4_t __b)
@@ -22604,20 +21269,6 @@  __arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p)
  return __arm_vaddvq_p_s32 (__a, __p);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vsubq_s32 (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (int32x4_t __a, int32_t __b)
-{
- return __arm_vsubq_n_s32 (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vshlq_r (int32x4_t __a, int32_t __b)
@@ -22751,20 +21402,6 @@  __arm_vornq (int32x4_t __a, int32x4_t __b)
  return __arm_vornq_s32 (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vmulq_s32 (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (int32x4_t __a, int32_t __b)
-{
- return __arm_vmulq_n_s32 (__a, __b);
-}
-
 __extension__ extern __inline int64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmulltq_int (int32x4_t __a, int32x4_t __b)
@@ -22933,13 +21570,6 @@  __arm_vaddvaq (int32_t __a, int32x4_t __b)
  return __arm_vaddvaq_s32 (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int32x4_t __a, int32_t __b)
-{
- return __arm_vaddq_n_s32 (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (int32x4_t __a, int32x4_t __b)
@@ -26097,13 +24727,6 @@  __arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
  return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_s8 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
@@ -26132,13 +24755,6 @@  __arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_
  return __arm_vshlq_m_u8 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_u8 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
@@ -26160,13 +24776,6 @@  __arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
  return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_s16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
@@ -26195,13 +24804,6 @@  __arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_
  return __arm_vshlq_m_u16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
@@ -26223,13 +24825,6 @@  __arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
  return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_s32 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
@@ -26258,13 +24853,6 @@  __arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_
  return __arm_vshlq_m_u32 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_u32 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
@@ -26321,90 +24909,6 @@  __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16
  return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -27266,90 +25770,6 @@  __arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_
  return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -28358,48 +26778,6 @@  __arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p
  return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int64_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
@@ -29849,48 +28227,6 @@  __arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uin
  __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vaddq_s8 (__a, __b);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vaddq_s16 (__a, __b);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vaddq_s32 (__a, __b);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint8x16_t __a, uint8x16_t __b)
-{
- return __arm_vaddq_u8 (__a, __b);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_vaddq_u16 (__a, __b);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (uint32x4_t __a, uint32x4_t __b)
-{
- return __arm_vaddq_u32 (__a, __b);
-}
-
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
@@ -30598,90 +28934,6 @@  __arm_vabsq_x (int32x4_t __a, mve_pred16_t __p)
  return __arm_vabsq_x_s32 (__a, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_u32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_u32 (__a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vclsq_x (int8x16_t __a, mve_pred16_t __p)
@@ -30920,174 +29172,6 @@  __arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
  return __arm_vmulltq_int_x_u32 (__a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_u32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_u32 (__a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_u32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_u32 (__a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -32847,20 +30931,6 @@  __arm_vcvtq (uint32x4_t __a)
  return __arm_vcvtq_f32_u32 (__a);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (float16x8_t __a, float16_t __b)
-{
- return __arm_vsubq_n_f16 (__a, __b);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (float32x4_t __a, float32_t __b)
-{
- return __arm_vsubq_n_f32 (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbrsrq (float16x8_t __a, int32_t __b)
@@ -32987,13 +31057,6 @@  __arm_vcmpeqq (float16x8_t __a, float16x8_t __b)
  return __arm_vcmpeqq_f16 (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (float16x8_t __a, float16x8_t __b)
-{
- return __arm_vsubq_f16 (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vorrq (float16x8_t __a, float16x8_t __b)
@@ -33008,20 +31071,6 @@  __arm_vornq (float16x8_t __a, float16x8_t __b)
  return __arm_vornq_f16 (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (float16x8_t __a, float16_t __b)
-{
- return __arm_vmulq_n_f16 (__a, __b);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (float16x8_t __a, float16x8_t __b)
-{
- return __arm_vmulq_f16 (__a, __b);
-}
-
 __extension__ extern __inline float16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vminnmvq (float16_t __a, float16x8_t __b)
@@ -33141,13 +31190,6 @@  __arm_vandq (float16x8_t __a, float16x8_t __b)
  return __arm_vandq_f16 (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (float16x8_t __a, float16_t __b)
-{
- return __arm_vaddq_n_f16 (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (float16x8_t __a, float16x8_t __b)
@@ -33239,13 +31281,6 @@  __arm_vcmpeqq (float32x4_t __a, float32x4_t __b)
  return __arm_vcmpeqq_f32 (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq (float32x4_t __a, float32x4_t __b)
-{
- return __arm_vsubq_f32 (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vorrq (float32x4_t __a, float32x4_t __b)
@@ -33260,20 +31295,6 @@  __arm_vornq (float32x4_t __a, float32x4_t __b)
  return __arm_vornq_f32 (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (float32x4_t __a, float32_t __b)
-{
- return __arm_vmulq_n_f32 (__a, __b);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq (float32x4_t __a, float32x4_t __b)
-{
- return __arm_vmulq_f32 (__a, __b);
-}
-
 __extension__ extern __inline float32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vminnmvq (float32_t __a, float32x4_t __b)
@@ -33393,13 +31414,6 @@  __arm_vandq (float32x4_t __a, float32x4_t __b)
  return __arm_vandq_f32 (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (float32x4_t __a, float32_t __b)
-{
- return __arm_vaddq_n_f32 (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (float32x4_t __a, float32x4_t __b)
@@ -34170,34 +32184,6 @@  __arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pre
  return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_f16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -34506,34 +32492,6 @@  __arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_p
  return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_f16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -34562,34 +32520,6 @@  __arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pre
  return __arm_vorrq_m_f16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_f16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vld1q (float32_t const * __base)
@@ -34772,20 +32702,6 @@  __arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset,
  __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (float16x8_t __a, float16x8_t __b)
-{
- return __arm_vaddq_f16 (__a, __b);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq (float32x4_t __a, float32x4_t __b)
-{
- return __arm_vaddq_f32 (__a, __b);
-}
-
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value)
@@ -34856,34 +32772,6 @@  __arm_vabsq_x (float32x4_t __a, mve_pred16_t __p)
  return __arm_vabsq_x_f32 (__a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_f32 (__a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vaddq_x_n_f32 (__a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vnegq_x (float16x8_t __a, mve_pred16_t __p)
@@ -34898,62 +32786,6 @@  __arm_vnegq_x (float32x4_t __a, mve_pred16_t __p)
  return __arm_vnegq_x_f32 (__a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_f32 (__a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vmulq_x_n_f32 (__a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_f32 (__a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
-{
- return __arm_vsubq_x_n_f32 (__a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -35846,26 +33678,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
 
-#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));})
-
 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -35906,26 +33718,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
 
-#define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
-
 #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -36147,26 +33939,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
 
-#define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
-
 #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -37288,27 +35060,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -37479,27 +35230,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -37513,27 +35243,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -37879,26 +35588,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
   int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
 
-#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
@@ -38014,26 +35703,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
@@ -38115,26 +35784,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
 
-#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));})
-
 #define __arm_vcmulq_rot90_x(p1,p2,p3)  ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
@@ -38307,22 +35956,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
 
-#define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));})
-
 #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
@@ -38508,22 +36141,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
 
-#define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
-
 #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -38687,22 +36304,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
 
-#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));})
-
 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -39375,23 +36976,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
 
-#define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -39513,40 +37097,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
-#define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
-#define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
@@ -39790,22 +37340,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
   int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
 
-#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));})
-
 #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
@@ -39892,22 +37426,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
 
-#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));})
-
 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
@@ -40014,22 +37532,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
 
-#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));})
-
 #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
   int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def
index 5e5510f6e37..8de765de3b0 100644
--- a/gcc/config/arm/arm_mve_builtins.def
+++ b/gcc/config/arm/arm_mve_builtins.def
@@ -92,7 +92,6 @@  VAR1 (BINOP_UNONE_UNONE_PRED, vaddlvq_p_u, v4si)
 VAR3 (BINOP_PRED_NONE_NONE, vcmpneq_, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vshlq_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_NONE, vshlq_u, v16qi, v8hi, v4si)
-VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_n_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vrmulhq_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vrhaddq_u, v16qi, v8hi, v4si)
@@ -102,7 +101,6 @@  VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_n_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vorrq_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vornq_u, v16qi, v8hi, v4si)
-VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_n_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vmulltq_int_u, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_UNONE_UNONE, vmullbq_int_u, v16qi, v8hi, v4si)
@@ -155,7 +153,6 @@  VAR3 (BINOP_PRED_NONE_NONE, vcmpeqq_, v16qi, v8hi, v4si)
 VAR3 (BINOP_PRED_NONE_NONE, vcmpeqq_n_, v16qi, v8hi, v4si)
 VAR3 (BINOP_UNONE_NONE_IMM, vqshluq_n_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_PRED, vaddvq_p_s, v16qi, v8hi, v4si)
-VAR3 (BINOP_NONE_NONE_NONE, vsubq_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vsubq_n_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vshlq_r_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vrshlq_s, v16qi, v8hi, v4si)
@@ -176,7 +173,6 @@  VAR3 (BINOP_NONE_NONE_NONE, vqaddq_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vqaddq_n_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vorrq_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vornq_s, v16qi, v8hi, v4si)
-VAR3 (BINOP_NONE_NONE_NONE, vmulq_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vmulq_n_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vmulltq_int_s, v16qi, v8hi, v4si)
 VAR3 (BINOP_NONE_NONE_NONE, vmullbq_int_s, v16qi, v8hi, v4si)
@@ -230,7 +226,6 @@  VAR2 (BINOP_PRED_NONE_NONE, vcmpgeq_n_f, v8hf, v4sf)
 VAR2 (BINOP_PRED_NONE_NONE, vcmpgeq_f, v8hf, v4sf)
 VAR2 (BINOP_PRED_NONE_NONE, vcmpeqq_n_f, v8hf, v4sf)
 VAR2 (BINOP_PRED_NONE_NONE, vcmpeqq_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vsubq_f, v8hf, v4sf)
 VAR2 (BINOP_NONE_NONE_NONE, vqmovntq_s, v8hi, v4si)
 VAR2 (BINOP_NONE_NONE_NONE, vqmovnbq_s, v8hi, v4si)
 VAR2 (BINOP_NONE_NONE_NONE, vqdmulltq_s, v8hi, v4si)
@@ -240,7 +235,6 @@  VAR2 (BINOP_NONE_NONE_NONE, vqdmullbq_n_s, v8hi, v4si)
 VAR2 (BINOP_NONE_NONE_NONE, vorrq_f, v8hf, v4sf)
 VAR2 (BINOP_NONE_NONE_NONE, vornq_f, v8hf, v4sf)
 VAR2 (BINOP_NONE_NONE_NONE, vmulq_n_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vmulq_f, v8hf, v4sf)
 VAR2 (BINOP_NONE_NONE_NONE, vmovntq_s, v8hi, v4si)
 VAR2 (BINOP_NONE_NONE_NONE, vmovnbq_s, v8hi, v4si)
 VAR2 (BINOP_NONE_NONE_NONE, vmlsldavxq_s, v8hi, v4si)
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 5167fbc6add..ccb3cf23304 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -1353,18 +1353,6 @@  (define_insn "mve_vmulltq_int_<supf><mode>"
 ;; [vmulq_u, vmulq_s])
 ;; [vsubq_s, vsubq_u])
 ;;
-(define_insn "mve_vmulq_<supf><mode>"
-  [
-    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")]
-	 VMULQ))
-  ]
-  "TARGET_HAVE_MVE"
-  "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
 (define_insn "mve_<mve_addsubmul>q<mode>"
   [
    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
@@ -1742,21 +1730,6 @@  (define_insn "mve_vshlq_r_<supf><mode>"
   [(set_attr "type" "mve_move")
 ])
 
-;;
-;; [vsubq_s, vsubq_u])
-;;
-(define_insn "mve_vsubq_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")]
-	 VSUBQ))
-  ]
-  "TARGET_HAVE_MVE"
-  "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vabdq_f])
 ;;