@@ -10403,6 +10403,15 @@ vld1q_p64 (const poly64_t * __a)
return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
}
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p64_x2 (const poly64_t * __a)
+{
+ union { poly64x2x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -10432,6 +10441,42 @@ vld1q_s64 (const int64_t * __a)
return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
}
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s8_x2 (const int8_t * __a)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s16_x2 (const int16_t * __a)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s32_x2 (const int32_t * __a)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s64_x2 (const int64_t * __a)
+{
+ union { int64x2x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -10448,6 +10493,26 @@ vld1q_f32 (const float32_t * __a)
return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a);
}
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f16_x2 (const float16_t * __a)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v8hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f32_x2 (const float32_t * __a)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u8 (const uint8_t * __a)
@@ -10476,6 +10541,42 @@ vld1q_u64 (const uint64_t * __a)
return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
}
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u8_x2 (const uint8_t * __a)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u16_x2 (const uint16_t * __a)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u32_x2 (const uint32_t * __a)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u64_x2 (const uint64_t * __a)
+{
+ union { uint64x2x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline poly8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p8 (const poly8_t * __a)
@@ -10490,6 +10591,24 @@ vld1q_p16 (const poly16_t * __a)
return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
}
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p8_x2 (const poly8_t * __a)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p16_x2 (const poly16_t * __a)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
@@ -19782,6 +19901,15 @@ vld1q_bf16 (const bfloat16_t * __ptr)
return __builtin_neon_vld1v8bf (__ptr);
}
+__extension__ extern __inline bfloat16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_bf16_x2 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x2v8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
__extension__ extern __inline bfloat16x4x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_bf16 (bfloat16_t const * __ptr)
@@ -301,6 +301,7 @@ VAR1 (TERNOP, vtbx4, v8qi)
VAR13 (LOAD1, vld1,
v8qi, v4hi, v4hf, v2si, v2sf, v16qi, v8hi, v8hf, v4si, v4sf, v2di,
v4bf, v8bf)
+VAR7 (LOAD1, vld1_x2, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf)
VAR12 (LOAD1LANE, vld1_lane,
v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di, v4bf, v8bf)
VAR10 (LOAD1, vld1_dup,
@@ -4957,6 +4957,16 @@ if (BYTES_BIG_ENDIAN)
[(set_attr "type" "neon_load1_1reg<q>")]
)
+(define_insn "neon_vld1_x2<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(match_operand:OI 1 "neon_struct_operand" "Um")
+ (unspec:VQXBF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD1))]
+ "TARGET_NEON"
+ "vld1.<V_sz_elem>\t%h0, %A1"
+ [(set_attr "type" "neon_load1_2reg<q>")]
+)
+
;; The lane numbers in the RTL are in GCC lane order, having been flipped
;; in arm_expand_neon_args. The lane numbers are restored to architectural
;; lane order here.
new file mode 100644
@@ -0,0 +1,67 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O2" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+
+uint8x16x2_t test_vld1q_u8_x2 (uint8_t * a)
+{
+ return vld1q_u8_x2 (a);
+}
+
+uint16x8x2_t test_vld1q_u16_x2 (uint16_t * a)
+{
+ return vld1q_u16_x2 (a);
+}
+
+uint32x4x2_t test_vld1q_u32_x2 (uint32_t * a)
+{
+ return vld1q_u32_x2 (a);
+}
+
+uint64x2x2_t test_vld1q_u64_x2 (uint64_t * a)
+{
+ return vld1q_u64_x2 (a);
+}
+
+int8x16x2_t test_vld1q_s8_x2 (int8_t * a)
+{
+ return vld1q_s8_x2 (a);
+}
+
+int16x8x2_t test_vld1q_s16_x2 (int16_t * a)
+{
+ return vld1q_s16_x2 (a);
+}
+
+int32x4x2_t test_vld1q_s32_x2 (int32_t * a)
+{
+ return vld1q_s32_x2 (a);
+}
+
+int64x2x2_t test_vld1q_s64_x2 (int64_t * a)
+{
+ return vld1q_s64_x2 (a);
+}
+
+float32x4x2_t test_vld1q_f32_x2 (float32_t * a)
+{
+ return vld1q_f32_x2 (a);
+}
+
+poly8x16x2_t test_vld1q_p8_x2 (poly8_t * a)
+{
+ return vld1q_p8_x2 (a);
+}
+
+poly16x8x2_t test_vld1q_p16_x2 (poly16_t * a)
+{
+ return vld1q_p16_x2 (a);
+}
+
+/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
+/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 2 } } */
+
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_2a_bf16_neon_ok } */
+/* { dg-options "-save-temps -O2" } */
+/* { dg-add-options arm_v8_2a_bf16_neon } */
+
+#include "arm_neon.h"
+
+bfloat16x8x2_t test_vld1q_bf16_x2 (bfloat16_t * a)
+{
+ return vld1q_bf16_x2 (a);
+}
+
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 1 } } */
new file mode 100644
@@ -0,0 +1,14 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_fp16_ok } */
+/* { dg-options "-save-temps -O2" } */
+/* { dg-add-options arm_neon_fp16 } */
+
+#include "arm_neon.h"
+
+float16x8x2_t test_vld1q_f16_x2 (float16_t * a)
+{
+ return vld1q_f16_x2 (a);
+}
+
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 1 } } */
+
new file mode 100644
@@ -0,0 +1,14 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_crypto_ok } */
+/* { dg-options "-save-temps -O2" } */
+/* { dg-add-options arm_crypto } */
+
+#include "arm_neon.h"
+
+poly64x2x2_t test_vld1q_p64_x2 (poly64_t * a)
+{
+ return vld1q_p64_x2 (a);
+}
+
+/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 1 } } */
+