@@ -4198,6 +4198,8 @@ ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
break;
case E_V8QImode:
case E_V4HImode:
+ case E_V4HFmode:
+ case E_V4BFmode:
case E_V2SImode:
if (TARGET_SSE4_1)
{
@@ -4207,6 +4209,8 @@ ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
break;
case E_V4QImode:
case E_V2HImode:
+ case E_V2HFmode:
+ case E_V2BFmode:
if (TARGET_SSE4_1)
{
gen = gen_mmx_pblendvb_v4qi;
@@ -24360,7 +24360,11 @@ ix86_get_mask_mode (machine_mode data_mode)
/* Scalar mask case. */
if ((TARGET_AVX512F && TARGET_EVEX512 && vector_size == 64)
- || (TARGET_AVX512VL && (vector_size == 32 || vector_size == 16)))
+ || (TARGET_AVX512VL && (vector_size == 32 || vector_size == 16))
+ /* AVX512FP16 only supports vector comparison
+ to kmask for _Float16. */
+ || (TARGET_AVX512VL && TARGET_AVX512FP16
+ && GET_MODE_INNER (data_mode) == E_HFmode))
{
if (elem_size == 4
|| elem_size == 8
@@ -61,6 +61,9 @@ (define_mode_iterator MMXMODE248 [V4HI V2SI V1DI])
(define_mode_iterator V_32 [V4QI V2HI V1SI V2HF V2BF])
(define_mode_iterator V2FI_32 [V2HF V2BF V2HI])
+(define_mode_iterator V4FI_64 [V4HF V4BF V4HI])
+(define_mode_iterator V4F_64 [V4HF V4BF])
+(define_mode_iterator V2F_32 [V2HF V2BF])
;; 4-byte integer vector modes
(define_mode_iterator VI_32 [V4QI V2HI])
@@ -1972,10 +1975,12 @@ (define_mode_attr mov_to_sse_suffix
[(V2HF "d") (V4HF "q") (V2HI "d") (V4HI "q")])
(define_mode_attr mmxxmmmode
- [(V2HF "V8HF") (V2HI "V8HI") (V2BF "V8BF")])
+ [(V2HF "V8HF") (V2HI "V8HI") (V2BF "V8BF")
+ (V4HF "V8HF") (V4HI "V8HI") (V4BF "V8BF")])
(define_mode_attr mmxxmmmodelower
- [(V2HF "v8hf") (V2HI "v8hi") (V2BF "v8bf")])
+ [(V2HF "v8hf") (V2HI "v8hi") (V2BF "v8bf")
+ (V4HF "v8hf") (V4HI "v8hi") (V4BF "v8bf")])
(define_expand "movd_<mode>_to_sse"
[(set (match_operand:<mmxxmmmode> 0 "register_operand")
@@ -2114,6 +2119,110 @@ (define_insn_and_split "*mmx_nabs<mode>2"
[(set (match_dup 0)
(ior:<MODE> (match_dup 1) (match_dup 2)))])
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "vec_cmpv4hfqi"
+ [(set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 ""
+ [(match_operand:V4HF 2 "nonimmediate_operand")
+ (match_operand:V4HF 3 "nonimmediate_operand")]))]
+ "TARGET_MMX_WITH_SSE && TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math"
+{
+ rtx ops[4];
+ ops[3] = gen_reg_rtx (V8HFmode);
+ ops[2] = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movq_v4hf_to_sse (ops[3], operands[3]));
+ emit_insn (gen_movq_v4hf_to_sse (ops[2], operands[2]));
+ emit_insn (gen_vec_cmpv8hfqi (operands[0], operands[1], ops[2], ops[3]));
+ DONE;
+})
+
+(define_expand "vcond_mask_<mode>v4hi"
+ [(set (match_operand:V4F_64 0 "register_operand")
+ (vec_merge:V4F_64
+ (match_operand:V4F_64 1 "register_operand")
+ (match_operand:V4F_64 2 "register_operand")
+ (match_operand:V4HI 3 "register_operand")))]
+ "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+ ix86_expand_sse_movcc (operands[0], operands[3],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "vcond_mask_<mode>qi"
+ [(set (match_operand:V4FI_64 0 "register_operand")
+ (vec_merge:V4FI_64
+ (match_operand:V4FI_64 1 "register_operand")
+ (match_operand:V4FI_64 2 "register_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_MMX_WITH_SSE && TARGET_AVX512BW && TARGET_AVX512VL"
+{
+ rtx op0 = gen_reg_rtx (<mmxxmmmode>mode);
+ operands[1] = lowpart_subreg (<mmxxmmmode>mode, operands[1], <MODE>mode);
+ operands[2] = lowpart_subreg (<mmxxmmmode>mode, operands[2], <MODE>mode);
+ emit_insn (gen_vcond_mask_<mmxxmmmodelower>qi (op0, operands[1],
+ operands[2], operands[3]));
+ emit_move_insn (operands[0],
+ lowpart_subreg (<MODE>mode, op0, <mmxxmmmode>mode));
+ DONE;
+})
+
+(define_expand "vec_cmpv2hfqi"
+ [(set (match_operand:QI 0 "register_operand")
+ (match_operator:QI 1 ""
+ [(match_operand:V2HF 2 "nonimmediate_operand")
+ (match_operand:V2HF 3 "nonimmediate_operand")]))]
+ "TARGET_AVX512FP16 && TARGET_AVX512VL
+ && ix86_partial_vec_fp_math"
+{
+ rtx ops[4];
+ ops[3] = gen_reg_rtx (V8HFmode);
+ ops[2] = gen_reg_rtx (V8HFmode);
+
+ emit_insn (gen_movd_v2hf_to_sse (ops[3], operands[3]));
+ emit_insn (gen_movd_v2hf_to_sse (ops[2], operands[2]));
+ emit_insn (gen_vec_cmpv8hfqi (operands[0], operands[1], ops[2], ops[3]));
+ DONE;
+})
+
+(define_expand "vcond_mask_<mode>v2hi"
+ [(set (match_operand:V2F_32 0 "register_operand")
+ (vec_merge:V2F_32
+ (match_operand:V2F_32 1 "register_operand")
+ (match_operand:V2F_32 2 "register_operand")
+ (match_operand:V2HI 3 "register_operand")))]
+ "TARGET_SSE4_1"
+{
+ ix86_expand_sse_movcc (operands[0], operands[3],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "vcond_mask_<mode>qi"
+ [(set (match_operand:V2FI_32 0 "register_operand")
+ (vec_merge:V2FI_32
+ (match_operand:V2FI_32 1 "register_operand")
+ (match_operand:V2FI_32 2 "register_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512BW && TARGET_AVX512VL"
+{
+ rtx op0 = gen_reg_rtx (<mmxxmmmode>mode);
+ operands[1] = lowpart_subreg (<mmxxmmmode>mode, operands[1], <MODE>mode);
+ operands[2] = lowpart_subreg (<mmxxmmmode>mode, operands[2], <MODE>mode);
+ emit_insn (gen_vcond_mask_<mmxxmmmodelower>qi (op0, operands[1],
+ operands[2], operands[3]));
+ emit_move_insn (operands[0],
+ lowpart_subreg (<MODE>mode, op0, <mmxxmmmode>mode));
+ DONE;
+})
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel half-precision floating point rounding operations.
@@ -3251,6 +3360,21 @@ (define_insn "<code><mode>3"
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
+(define_split
+ [(set (match_operand:V4HI 0 "register_operand")
+ (eq:V4HI
+ (eq:V4HI
+ (us_minus:V4HI
+ (match_operand:V4HI 1 "register_operand")
+ (match_operand:V4HI 2 "register_operand"))
+ (match_operand:V4HI 3 "const0_operand"))
+ (match_operand:V4HI 4 "const0_operand")))]
+ "TARGET_SSE4_1 && TARGET_MMX_WITH_SSE"
+ [(set (match_dup 0)
+ (umin:V4HI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (eq:V4HI (match_dup 0) (match_dup 2)))])
+
(define_expand "mmx_<code>v8qi3"
[(set (match_operand:V8QI 0 "register_operand")
(umaxmin:V8QI
@@ -3284,6 +3408,21 @@ (define_expand "<code>v8qi3"
(match_operand:V8QI 2 "register_operand")))]
"TARGET_MMX_WITH_SSE")
+(define_split
+ [(set (match_operand:V8QI 0 "register_operand")
+ (eq:V8QI
+ (eq:V8QI
+ (us_minus:V8QI
+ (match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand"))
+ (match_operand:V8QI 3 "const0_operand"))
+ (match_operand:V8QI 4 "const0_operand")))]
+ "TARGET_MMX_WITH_SSE"
+ [(set (match_dup 0)
+ (umin:V8QI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (eq:V8QI (match_dup 0) (match_dup 2)))])
+
(define_insn "<code><mode>3"
[(set (match_operand:VI1_16_32 0 "register_operand" "=x,Yw")
(umaxmin:VI1_16_32
@@ -3297,6 +3436,21 @@ (define_insn "<code><mode>3"
(set_attr "type" "sseiadd")
(set_attr "mode" "TI")])
+(define_split
+ [(set (match_operand:V4QI 0 "register_operand")
+ (eq:V4QI
+ (eq:V4QI
+ (us_minus:V4QI
+ (match_operand:V4QI 1 "register_operand")
+ (match_operand:V4QI 2 "register_operand"))
+ (match_operand:V4QI 3 "const0_operand"))
+ (match_operand:V4QI 4 "const0_operand")))]
+ "TARGET_SSE2"
+ [(set (match_dup 0)
+ (umin:V4QI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (eq:V4QI (match_dup 0) (match_dup 2)))])
+
(define_insn "<code>v2hi3"
[(set (match_operand:V2HI 0 "register_operand" "=Yr,*x,Yv")
(umaxmin:V2HI
@@ -3313,6 +3467,21 @@ (define_insn "<code>v2hi3"
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "TI")])
+(define_split
+ [(set (match_operand:V2HI 0 "register_operand")
+ (eq:V2HI
+ (eq:V2HI
+ (us_minus:V2HI
+ (match_operand:V2HI 1 "register_operand")
+ (match_operand:V2HI 2 "register_operand"))
+ (match_operand:V2HI 3 "const0_operand"))
+ (match_operand:V2HI 4 "const0_operand")))]
+ "TARGET_SSE4_1"
+ [(set (match_dup 0)
+ (umin:V2HI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (eq:V2HI (match_dup 0) (match_dup 2)))])
+
(define_insn "ssse3_abs<mode>2"
[(set (match_operand:MMXMODEI 0 "register_operand" "=y,Yv")
(abs:MMXMODEI
@@ -3785,6 +3954,54 @@ (define_insn "mmx_pblendvb_v8qi"
(set_attr "btver2_decode" "vector")
(set_attr "mode" "TI")])
+(define_split
+ [(set (match_operand:V8QI 0 "register_operand")
+ (unspec:V8QI
+ [(match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand")
+ (eq:V8QI
+ (eq:V8QI
+ (match_operand:V8QI 3 "register_operand")
+ (match_operand:V8QI 4 "register_operand"))
+ (match_operand:V8QI 5 "const0_operand"))]
+ UNSPEC_BLENDV))]
+ "TARGET_MMX_WITH_SSE"
+ [(set (match_dup 6)
+ (eq:V8QI (match_dup 3) (match_dup 4)))
+ (set (match_dup 0)
+ (unspec:V8QI
+ [(match_dup 2)
+ (match_dup 1)
+ (match_dup 6)]
+ UNSPEC_BLENDV))]
+ "operands[6] = gen_reg_rtx (V8QImode);")
+
+(define_split
+ [(set (match_operand:V8QI 0 "register_operand")
+ (unspec:V8QI
+ [(match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand")
+ (subreg:V8QI
+ (eq:MMXMODE24
+ (eq:MMXMODE24
+ (match_operand:MMXMODE24 3 "register_operand")
+ (match_operand:MMXMODE24 4 "register_operand"))
+ (match_operand:MMXMODE24 5 "const0_operand")) 0)]
+ UNSPEC_BLENDV))]
+ "TARGET_MMX_WITH_SSE"
+ [(set (match_dup 6)
+ (eq:MMXMODE24 (match_dup 3) (match_dup 4)))
+ (set (match_dup 0)
+ (unspec:V8QI
+ [(match_dup 2)
+ (match_dup 1)
+ (match_dup 7)]
+ UNSPEC_BLENDV))]
+{
+ operands[6] = gen_reg_rtx (<MODE>mode);
+ operands[7] = lowpart_subreg (V8QImode, operands[6], <MODE>mode);
+})
+
(define_insn "mmx_pblendvb_<mode>"
[(set (match_operand:VI_16_32 0 "register_operand" "=Yr,*x,x")
(unspec:VI_16_32
@@ -3805,6 +4022,54 @@ (define_insn "mmx_pblendvb_<mode>"
(set_attr "btver2_decode" "vector")
(set_attr "mode" "TI")])
+(define_split
+ [(set (match_operand:VI_16_32 0 "register_operand")
+ (unspec:VI_16_32
+ [(match_operand:VI_16_32 1 "register_operand")
+ (match_operand:VI_16_32 2 "register_operand")
+ (eq:VI_16_32
+ (eq:VI_16_32
+ (match_operand:VI_16_32 3 "register_operand")
+ (match_operand:VI_16_32 4 "register_operand"))
+ (match_operand:VI_16_32 5 "const0_operand"))]
+ UNSPEC_BLENDV))]
+ "TARGET_SSE2"
+ [(set (match_dup 6)
+ (eq:VI_16_32 (match_dup 3) (match_dup 4)))
+ (set (match_dup 0)
+ (unspec:VI_16_32
+ [(match_dup 2)
+ (match_dup 1)
+ (match_dup 6)]
+ UNSPEC_BLENDV))]
+ "operands[6] = gen_reg_rtx (<MODE>mode);")
+
+(define_split
+ [(set (match_operand:V4QI 0 "register_operand")
+ (unspec:V4QI
+ [(match_operand:V4QI 1 "register_operand")
+ (match_operand:V4QI 2 "register_operand")
+ (subreg:V4QI
+ (eq:V2HI
+ (eq:V2HI
+ (match_operand:V2HI 3 "register_operand")
+ (match_operand:V2HI 4 "register_operand"))
+ (match_operand:V2HI 5 "const0_operand")) 0)]
+ UNSPEC_BLENDV))]
+ "TARGET_SSE2"
+ [(set (match_dup 6)
+ (eq:V2HI (match_dup 3) (match_dup 4)))
+ (set (match_dup 0)
+ (unspec:V4QI
+ [(match_dup 2)
+ (match_dup 1)
+ (match_dup 7)]
+ UNSPEC_BLENDV))]
+{
+ operands[6] = gen_reg_rtx (V2HImode);
+ operands[7] = lowpart_subreg (V4QImode, operands[6], V2HImode);
+})
+
;; XOP parallel XMM conditional moves
(define_insn "*xop_pcmov_<mode>"
[(set (match_operand:MMXMODE124 0 "register_operand" "=x")
@@ -4644,29 +4644,14 @@ (define_expand "vcond<V_128:mode><VF_128:mode>"
DONE;
})
-(define_expand "vcond<mode><mode>"
- [(set (match_operand:VHF_AVX512VL 0 "register_operand")
- (if_then_else:VHF_AVX512VL
- (match_operator 3 ""
- [(match_operand:VHF_AVX512VL 4 "vector_operand")
- (match_operand:VHF_AVX512VL 5 "vector_operand")])
- (match_operand:VHF_AVX512VL 1 "general_operand")
- (match_operand:VHF_AVX512VL 2 "general_operand")))]
- "TARGET_AVX512FP16"
-{
- bool ok = ix86_expand_fp_vcond (operands);
- gcc_assert (ok);
- DONE;
-})
-
-(define_expand "vcond<sseintvecmodelower><mode>"
- [(set (match_operand:<sseintvecmode> 0 "register_operand")
- (if_then_else:<sseintvecmode>
+(define_expand "vcond<VI2HFBF_AVX512VL:mode><VHF_AVX512VL:mode>"
+ [(set (match_operand:VI2HFBF_AVX512VL 0 "register_operand")
+ (if_then_else:VI2HFBF_AVX512VL
(match_operator 3 ""
[(match_operand:VHF_AVX512VL 4 "vector_operand")
(match_operand:VHF_AVX512VL 5 "vector_operand")])
- (match_operand:<sseintvecmode> 1 "general_operand")
- (match_operand:<sseintvecmode> 2 "general_operand")))]
+ (match_operand:VI2HFBF_AVX512VL 1 "general_operand")
+ (match_operand:VI2HFBF_AVX512VL 2 "general_operand")))]
"TARGET_AVX512FP16"
{
bool ok = ix86_expand_fp_vcond (operands);
new file mode 100644
@@ -0,0 +1,45 @@
+/* PR target/103861 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
+/* { dg-final { scan-assembler-times "vpcmpeqw" 6 } } */
+/* { dg-final { scan-assembler-times "vpcmpgtw" 2 } } */
+/* { dg-final { scan-assembler-times "vpminuw" 2 } } */
+/* { dg-final { scan-assembler-times "vcmpph" 8 } } */
+/* { dg-final { scan-assembler-times "vpblendvb" 8 } } */
+typedef unsigned short __attribute__((__vector_size__ (4))) __v2hu;
+typedef short __attribute__((__vector_size__ (4))) __v2hi;
+
+typedef unsigned short __attribute__((__vector_size__ (8))) __v4hu;
+typedef short __attribute__((__vector_size__ (8))) __v4hi;
+
+typedef _Float16 __attribute__((__vector_size__ (4))) __v2hf;
+typedef _Float16 __attribute__((__vector_size__ (8))) __v4hf;
+
+
+__v2hu au, bu;
+__v2hi as, bs;
+__v2hf af, bf;
+
+__v4hu cu, du;
+__v4hi cs, ds;
+__v4hf cf, df;
+
+__v2hf auf (__v2hu a, __v2hu b) { return (a > b) ? af : bf; }
+__v2hf asf (__v2hi a, __v2hi b) { return (a > b) ? af : bf; }
+__v2hu afu (__v2hf a, __v2hf b) { return (a > b) ? au : bu; }
+__v2hi afs (__v2hf a, __v2hf b) { return (a > b) ? as : bs; }
+
+__v4hf cuf (__v4hu c, __v4hu d) { return (c > d) ? cf : df; }
+__v4hf csf (__v4hi c, __v4hi d) { return (c > d) ? cf : df; }
+__v4hu cfu (__v4hf c, __v4hf d) { return (c > d) ? cu : du; }
+__v4hi cfs (__v4hf c, __v4hf d) { return (c > d) ? cs : ds; }
+
+__v2hf auf_ne (__v2hu a, __v2hu b) { return (a != b) ? af : bf; }
+__v2hf asf_ne (__v2hi a, __v2hi b) { return (a != b) ? af : bf; }
+__v2hu afu_ne (__v2hf a, __v2hf b) { return (a != b) ? au : bu; }
+__v2hi afs_ne (__v2hf a, __v2hf b) { return (a != b) ? as : bs; }
+
+__v4hf cuf_ne (__v4hu c, __v4hu d) { return (c != d) ? cf : df; }
+__v4hf csf_ne (__v4hi c, __v4hi d) { return (c != d) ? cf : df; }
+__v4hu cfu_ne (__v4hf c, __v4hf d) { return (c != d) ? cu : du; }
+__v4hi cfs_ne (__v4hf c, __v4hf d) { return (c != d) ? cs : ds; }
new file mode 100644
@@ -0,0 +1,26 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
+/* { dg-final { scan-assembler-times "vcmpph" 10 } } */
+
+typedef _Float16 __attribute__((__vector_size__ (4))) v2hf;
+typedef _Float16 __attribute__((__vector_size__ (8))) v4hf;
+
+
+#define VCMPMN(type, op, name) \
+type \
+__attribute__ ((noinline, noclone)) \
+vec_cmp_##type##type##name (type a, type b) \
+{ \
+ return a op b; \
+}
+
+VCMPMN (v4hf, <, lt)
+VCMPMN (v2hf, <, lt)
+VCMPMN (v4hf, <=, le)
+VCMPMN (v2hf, <=, le)
+VCMPMN (v4hf, >, gt)
+VCMPMN (v2hf, >, gt)
+VCMPMN (v4hf, >=, ge)
+VCMPMN (v2hf, >=, ge)
+VCMPMN (v4hf, ==, eq)
+VCMPMN (v2hf, ==, eq)