@@ -10421,6 +10421,15 @@ vld1q_p64_x3 (const poly64_t * __a)
return __rv.__i;
}
+__extension__ extern __inline poly64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p64_x4 (const poly64_t * __a)
+{
+ union { poly64x2x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -10522,6 +10531,42 @@ vld1q_s64_x3 (const int64_t * __a)
return __rv.__i;
}
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s8_x4 (const uint8_t * __a)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s16_x4 (const uint16_t * __a)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s32_x4 (const int32_t * __a)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s64_x4 (const int64_t * __a)
+{
+ union { int64x2x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -10578,6 +10623,26 @@ vld1q_f32_x3 (const float32_t * __a)
return __rv.__i;
}
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f16_x4 (const float16_t * __a)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v8hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f32_x4 (const float32_t * __a)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u8 (const uint8_t * __a)
@@ -10678,6 +10743,42 @@ vld1q_u64_x3 (const uint64_t * __a)
return __rv.__i;
}
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u8_x4 (const uint8_t * __a)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u16_x4 (const uint16_t * __a)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u32_x4 (const uint32_t * __a)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u64_x4 (const uint64_t * __a)
+{
+ union { uint64x2x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline poly8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p8 (const poly8_t * __a)
@@ -10728,6 +10829,24 @@ vld1q_p16_x3 (const poly16_t * __a)
return __rv.__i;
}
+__extension__ extern __inline poly8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p8_x4 (const poly8_t * __a)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p16_x4 (const poly16_t * __a)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
@@ -20038,6 +20157,15 @@ vld1q_bf16_x3 (const bfloat16_t * __ptr)
return __rv.__i;
}
+__extension__ extern __inline bfloat16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_bf16_x4 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld1_x4v8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
__extension__ extern __inline bfloat16x4x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_bf16 (bfloat16_t const * __ptr)
@@ -300,6 +300,7 @@ VAR1 (TERNOP, vtbx3, v8qi)
VAR1 (TERNOP, vtbx4, v8qi)
VAR7 (LOAD1, vld1_x2, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf)
VAR7 (LOAD1, vld1_x3, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf)
+VAR7 (LOAD1, vld1_x4, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf)
VAR13 (LOAD1, vld1,
v8qi, v4hi, v4hf, v2si, v2sf, v16qi, v8hi, v8hf, v4si, v4sf, v2di,
v4bf, v8bf)
@@ -4994,6 +4994,36 @@ if (BYTES_BIG_ENDIAN)
[(set_attr "type" "neon_load1_3reg<q>")]
)
+(define_insn "neon_vld1_x4<mode>"
+ [(set (match_operand:XI 0 "s_register_operand" "=w")
+ (unspec:XI [(match_operand:OI 1 "neon_struct_operand" "Um")
+ (unspec:VQXBF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4A))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 6);
+ ops[4] = operands[1];
+
+ output_asm_insn ("vld1.<V_sz_elem>\t{%P0, %P1, %P2, %P3}, %A4", ops);
+
+ ops[0] = gen_rtx_REG (DImode, regno + 8);
+ ops[1] = gen_rtx_REG (DImode, regno + 10);
+ ops[2] = gen_rtx_REG (DImode, regno + 12);
+ ops[3] = gen_rtx_REG (DImode, regno + 14);
+ ops[4] = operands[1];
+
+ output_asm_insn ("vld1.<V_sz_elem>\t{%P0, %P1, %P2, %P3}, %A4", ops);
+
+ return "";
+}
+ [(set_attr "type" "neon_load1_3reg<q>")]
+)
+
;; The lane numbers in the RTL are in GCC lane order, having been flipped
;; in arm_expand_neon_args. The lane numbers are restored to architectural
;; lane order here.
@@ -115,14 +115,73 @@ poly16x8x3_t test_vld1q_p16_x3 (poly16_t * a)
return vld1q_p16_x3 (a);
}
+uint8x16x4_t test_vld1q_u8_x4 (uint8_t * a)
+{
+ return vld1q_u8_x4 (a);
+}
+
+uint16x8x4_t test_vld1q_u16_x4 (uint16_t * a)
+{
+ return vld1q_u16_x4 (a);
+}
+
+uint32x4x4_t test_vld1q_u32_x4 (uint32_t * a)
+{
+ return vld1q_u32_x4 (a);
+}
+
+uint64x2x4_t test_vld1q_u64_x4 (uint64_t * a)
+{
+ return vld1q_u64_x4 (a);
+}
+
+int8x16x4_t test_vld1q_s8_x4 (int8_t * a)
+{
+ return vld1q_s8_x4 (a);
+}
+
+int16x8x4_t test_vld1q_s16_x4 (int16_t * a)
+{
+ return vld1q_s16_x4 (a);
+}
+
+int32x4x4_t test_vld1q_s32_x4 (int32_t * a)
+{
+ return vld1q_s32_x4 (a);
+}
+
+int64x2x4_t test_vld1q_s64_x4 (int64_t * a)
+{
+ return vld1q_s64_x4 (a);
+}
+
+float32x4x4_t test_vld1q_f32_x4 (float32_t * a)
+{
+ return vld1q_f32_x4 (a);
+}
+
+poly8x16x4_t test_vld1q_p8_x4 (poly8_t * a)
+{
+ return vld1q_p8_x4 (a);
+}
+
+poly16x8x4_t test_vld1q_p16_x4 (poly16_t * a)
+{
+ return vld1q_p16_x4 (a);
+}
+
/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */
/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
+/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */
/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 2 } } */
/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+:64\]\n} 4 } } */
+/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+:64\]\n} 4 } } */
@@ -15,5 +15,11 @@ bfloat16x8x3_t test_vld1q_bf16_x3 (bfloat16_t * a)
return vld1q_bf16_x3 (a);
}
+bfloat16x8x4_t test_vld1q_bf16_x4 (bfloat16_t * a)
+{
+ return vld1q_bf16_x4 (a);
+}
+
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */
@@ -15,5 +15,11 @@ float16x8x3_t test_vld1q_f16_x3 (float16_t * a)
return vld1q_f16_x3 (a);
}
+float16x8x4_t test_vld1q_f16_x4 (float16_t * a)
+{
+ return vld1q_f16_x4 (a);
+}
+
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 1 } } */
/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */
@@ -15,5 +15,11 @@ poly64x2x3_t test_vld1q_p64_x3 (poly64_t * a)
return vld1q_p64_x3 (a);
}
+poly64x2x4_t test_vld1q_p64_x4 (poly64_t * a)
+{
+ return vld1q_p64_x4 (a);
+}
+
/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 1 } } */
/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+:64\]\n} 2 } } */
+/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+\}, \[r[0-9]+:64\]\n} 2 } } */