@@ -908,6 +908,7 @@ DEF_GCC_BUILTIN (BUILT_IN_ISLESS,
DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISSIGNALING, "issignaling", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST)
@@ -123,6 +123,7 @@ static rtx expand_builtin_fegetround (tr
static rtx expand_builtin_feclear_feraise_except (tree, rtx, machine_mode,
optab);
static rtx expand_builtin_cexpi (tree, rtx);
+static rtx expand_builtin_issignaling (tree, rtx);
static rtx expand_builtin_int_roundingfn (tree, rtx);
static rtx expand_builtin_int_roundingfn_2 (tree, rtx);
static rtx expand_builtin_next_arg (void);
@@ -2747,6 +2748,300 @@ build_call_nofold_loc (location_t loc, t
return fn;
}
+/* Expand the __builtin_issignaling builtin. This needs to handle
+ all floating point formats that do support NaNs (for those that
+ don't it just sets target to 0). */
+
+static rtx
+expand_builtin_issignaling (tree exp, rtx target)
+{
+ if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE))
+ return NULL_RTX;
+
+ tree arg = CALL_EXPR_ARG (exp, 0);
+ scalar_float_mode fmode = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (arg));
+ const struct real_format *fmt = REAL_MODE_FORMAT (fmode);
+
+ /* Expand the argument yielding a RTX expression. */
+ rtx temp = expand_normal (arg);
+
+ /* If mode doesn't support NaN, always return 0.
+ Don't use !HONOR_SNANS (fmode) here, so there is some possibility of
+ __builtin_issignaling working without -fsignaling-nans. Especially
+ when -fno-signaling-nans is the default.
+ On the other side, MODE_HAS_NANS (fmode) is unnecessary, with
+ -ffinite-math-only even __builtin_isnan or __builtin_fpclassify
+ fold to 0 or non-NaN/Inf classification. */
+ if (!HONOR_NANS (fmode))
+ {
+ emit_move_insn (target, const0_rtx);
+ return target;
+ }
+
+ /* Check if the back end provides an insn that handles issignaling for the
+ argument's mode. */
+ enum insn_code icode = optab_handler (issignaling_optab, fmode);
+ if (icode != CODE_FOR_nothing)
+ {
+ rtx_insn *last = get_last_insn ();
+ rtx this_target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+ if (maybe_emit_unop_insn (icode, this_target, temp, UNKNOWN))
+ return this_target;
+ delete_insns_since (last);
+ }
+
+ if (DECIMAL_FLOAT_MODE_P (fmode))
+ {
+ scalar_int_mode imode;
+ rtx hi;
+ switch (fmt->ieee_bits)
+ {
+ case 32:
+ case 64:
+ imode = int_mode_for_mode (fmode).require ();
+ temp = gen_lowpart (imode, temp);
+ break;
+ case 128:
+ imode = int_mode_for_size (64, 1).require ();
+ hi = NULL_RTX;
+ /* For decimal128, TImode support isn't always there and even when
+ it is, working on the DImode high part is usually better. */
+ if (!MEM_P (temp))
+ {
+ if (rtx t = simplify_gen_subreg (imode, temp, fmode,
+ subreg_highpart_offset (imode,
+ fmode)))
+ hi = t;
+ else
+ {
+ scalar_int_mode imode2;
+ if (int_mode_for_mode (fmode).exists (&imode2))
+ {
+ rtx temp2 = gen_lowpart (imode2, temp);
+ poly_uint64 off = subreg_highpart_offset (imode, imode2);
+ if (rtx t = simplify_gen_subreg (imode, temp2,
+ imode2, off))
+ hi = t;
+ }
+ }
+ if (!hi)
+ {
+ rtx mem = assign_stack_temp (fmode, GET_MODE_SIZE (fmode));
+ emit_move_insn (mem, temp);
+ temp = mem;
+ }
+ }
+ if (!hi)
+ {
+ poly_int64 offset
+ = subreg_highpart_offset (imode, GET_MODE (temp));
+ hi = adjust_address (temp, imode, offset);
+ }
+ temp = hi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ /* In all of decimal{32,64,128}, there is MSB sign bit and sNaN
+ have 6 bits below it all set. */
+ rtx val
+ = GEN_INT (HOST_WIDE_INT_C (0x3f) << (GET_MODE_BITSIZE (imode) - 7));
+ temp = expand_binop (imode, and_optab, temp, val,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, EQ, temp, val, imode, 1, 1);
+ return temp;
+ }
+
+ /* Only PDP11 has these defined differently but doesn't support NaNs. */
+ gcc_assert (FLOAT_WORDS_BIG_ENDIAN == WORDS_BIG_ENDIAN);
+ gcc_assert (fmt->signbit_ro > 0 && fmt->b == 2);
+ gcc_assert (MODE_COMPOSITE_P (fmode)
+ || (fmt->pnan == fmt->p
+ && fmt->signbit_ro == fmt->signbit_rw));
+
+ switch (fmt->p)
+ {
+ case 106: /* IBM double double */
+ /* For IBM double double, recurse on the most significant double. */
+ gcc_assert (MODE_COMPOSITE_P (fmode));
+ temp = convert_modes (DFmode, fmode, temp, 0);
+ fmode = DFmode;
+ fmt = REAL_MODE_FORMAT (DFmode);
+ /* FALLTHRU */
+ case 8: /* bfloat */
+ case 11: /* IEEE half */
+ case 24: /* IEEE single */
+ case 53: /* IEEE double or Intel extended with rounding to double */
+ if (fmt->p == 53 && fmt->signbit_ro == 79)
+ goto extended;
+ {
+ scalar_int_mode imode = int_mode_for_mode (fmode).require ();
+ temp = gen_lowpart (imode, temp);
+ rtx val = GEN_INT ((HOST_WIDE_INT_M1U << (fmt->p - 2))
+ & ~(HOST_WIDE_INT_M1U << fmt->signbit_ro));
+ if (fmt->qnan_msb_set)
+ {
+ rtx mask = GEN_INT (~(HOST_WIDE_INT_M1U << fmt->signbit_ro));
+ rtx bit = GEN_INT (HOST_WIDE_INT_1U << (fmt->p - 2));
+ /* For non-MIPS/PA IEEE single/double/half or bfloat, expand to:
+ ((temp ^ bit) & mask) > val. */
+ temp = expand_binop (imode, xor_optab, temp, bit,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (imode, and_optab, temp, mask,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, GTU, temp, val, imode,
+ 1, 1);
+ }
+ else
+ {
+ /* For MIPS/PA IEEE single/double, expand to:
+ (temp & val) == val. */
+ temp = expand_binop (imode, and_optab, temp, val,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, EQ, temp, val, imode,
+ 1, 1);
+ }
+ }
+ break;
+ case 113: /* IEEE quad */
+ {
+ rtx hi = NULL_RTX, lo = NULL_RTX;
+ scalar_int_mode imode = int_mode_for_size (64, 1).require ();
+ /* For IEEE quad, TImode support isn't always there and even when
+ it is, working on DImode parts is usually better. */
+ if (!MEM_P (temp))
+ {
+ hi = simplify_gen_subreg (imode, temp, fmode,
+ subreg_highpart_offset (imode, fmode));
+ lo = simplify_gen_subreg (imode, temp, fmode,
+ subreg_lowpart_offset (imode, fmode));
+ if (!hi || !lo)
+ {
+ scalar_int_mode imode2;
+ if (int_mode_for_mode (fmode).exists (&imode2))
+ {
+ rtx temp2 = gen_lowpart (imode2, temp);
+ hi = simplify_gen_subreg (imode, temp2, imode2,
+ subreg_highpart_offset (imode,
+ imode2));
+ lo = simplify_gen_subreg (imode, temp2, imode2,
+ subreg_lowpart_offset (imode,
+ imode2));
+ }
+ }
+ if (!hi || !lo)
+ {
+ rtx mem = assign_stack_temp (fmode, GET_MODE_SIZE (fmode));
+ emit_move_insn (mem, temp);
+ temp = mem;
+ }
+ }
+ if (!hi || !lo)
+ {
+ poly_int64 offset
+ = subreg_highpart_offset (imode, GET_MODE (temp));
+ hi = adjust_address (temp, imode, offset);
+ offset = subreg_lowpart_offset (imode, GET_MODE (temp));
+ lo = adjust_address (temp, imode, offset);
+ }
+ rtx val = GEN_INT ((HOST_WIDE_INT_M1U << (fmt->p - 2 - 64))
+ & ~(HOST_WIDE_INT_M1U << (fmt->signbit_ro - 64)));
+ if (fmt->qnan_msb_set)
+ {
+ rtx mask = GEN_INT (~(HOST_WIDE_INT_M1U << (fmt->signbit_ro
+ - 64)));
+ rtx bit = GEN_INT (HOST_WIDE_INT_1U << (fmt->p - 2 - 64));
+ /* For non-MIPS/PA IEEE quad, expand to:
+ (((hi ^ bit) | ((lo | -lo) >> 63)) & mask) > val. */
+ rtx nlo = expand_unop (imode, neg_optab, lo, NULL_RTX, 0);
+ lo = expand_binop (imode, ior_optab, lo, nlo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ lo = expand_shift (RSHIFT_EXPR, imode, lo, 63, NULL_RTX, 1);
+ temp = expand_binop (imode, xor_optab, hi, bit,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (imode, ior_optab, temp, lo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (imode, and_optab, temp, mask,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, GTU, temp, val, imode,
+ 1, 1);
+ }
+ else
+ {
+ /* For MIPS/PA IEEE quad, expand to:
+ (hi & val) == val. */
+ temp = expand_binop (imode, and_optab, hi, val,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, EQ, temp, val, imode,
+ 1, 1);
+ }
+ }
+ break;
+ case 64: /* Intel or Motorola extended */
+ extended:
+ {
+ rtx ex, hi, lo;
+ scalar_int_mode imode = int_mode_for_size (32, 1).require ();
+ scalar_int_mode iemode = int_mode_for_size (16, 1).require ();
+ if (!MEM_P (temp))
+ {
+ rtx mem = assign_stack_temp (fmode, GET_MODE_SIZE (fmode));
+ emit_move_insn (mem, temp);
+ temp = mem;
+ }
+ if (fmt->signbit_ro == 95)
+ {
+ /* Motorola, always big endian, with 16-bit gap in between
+ 16-bit sign+exponent and 64-bit mantissa. */
+ ex = adjust_address (temp, iemode, 0);
+ hi = adjust_address (temp, imode, 4);
+ lo = adjust_address (temp, imode, 8);
+ }
+ else if (!WORDS_BIG_ENDIAN)
+ {
+ /* Intel little endian, 64-bit mantissa followed by 16-bit
+ sign+exponent and then either 16 or 48 bits of gap. */
+ ex = adjust_address (temp, iemode, 8);
+ hi = adjust_address (temp, imode, 4);
+ lo = adjust_address (temp, imode, 0);
+ }
+ else
+ {
+ /* Big endian Itanium. */
+ ex = adjust_address (temp, iemode, 0);
+ hi = adjust_address (temp, imode, 2);
+ lo = adjust_address (temp, imode, 6);
+ }
+ rtx val = GEN_INT (HOST_WIDE_INT_M1U << 30);
+ gcc_assert (fmt->qnan_msb_set);
+ rtx mask = GEN_INT (0x7fff);
+ rtx bit = GEN_INT (HOST_WIDE_INT_1U << 30);
+ /* For Intel/Motorola extended format, expand to:
+ (ex & mask) == mask && ((hi ^ bit) | ((lo | -lo) >> 31)) > val. */
+ rtx nlo = expand_unop (imode, neg_optab, lo, NULL_RTX, 0);
+ lo = expand_binop (imode, ior_optab, lo, nlo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ lo = expand_shift (RSHIFT_EXPR, imode, lo, 31, NULL_RTX, 1);
+ temp = expand_binop (imode, xor_optab, hi, bit,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (imode, ior_optab, temp, lo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (target, GTU, temp, val, imode, 1, 1);
+ ex = expand_binop (iemode, and_optab, ex, mask,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ ex = emit_store_flag_force (gen_reg_rtx (GET_MODE (temp)), EQ,
+ ex, mask, iemode, 1, 1);
+ temp = expand_binop (GET_MODE (temp), and_optab, temp, ex,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return temp;
+}
+
/* Expand a call to one of the builtin rounding functions gcc defines
as an extension (lfloor and lceil). As these are gcc extensions we
do not need to worry about setting errno to EDOM.
@@ -5508,9 +5803,9 @@ expand_builtin_signbit (tree exp, rtx ta
if (icode != CODE_FOR_nothing)
{
rtx_insn *last = get_last_insn ();
- target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
- if (maybe_emit_unop_insn (icode, target, temp, UNKNOWN))
- return target;
+ rtx this_target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+ if (maybe_emit_unop_insn (icode, this_target, temp, UNKNOWN))
+ return this_target;
delete_insns_since (last);
}
@@ -7120,6 +7415,12 @@ expand_builtin (tree exp, rtx target, rt
return target;
break;
+ case BUILT_IN_ISSIGNALING:
+ target = expand_builtin_issignaling (exp, target);
+ if (target)
+ return target;
+ break;
+
CASE_FLT_FN (BUILT_IN_ICEIL):
CASE_FLT_FN (BUILT_IN_LCEIL):
CASE_FLT_FN (BUILT_IN_LLCEIL):
@@ -8963,6 +9264,17 @@ fold_builtin_classify (location_t loc, t
arg = builtin_save_expr (arg);
return fold_build2_loc (loc, UNORDERED_EXPR, type, arg, arg);
+ case BUILT_IN_ISSIGNALING:
+ /* Folding to true for REAL_CST is done in fold_const_call_ss.
+ Don't use tree_expr_signaling_nan_p (arg) -> integer_one_node
+ and !tree_expr_maybe_signaling_nan_p (arg) -> integer_zero_node
+ here, so there is some possibility of __builtin_issignaling working
+ without -fsignaling-nans. Especially when -fno-signaling-nans is
+ the default. */
+ if (!tree_expr_maybe_nan_p (arg))
+ return omit_one_operand_loc (loc, type, integer_zero_node, arg);
+ return NULL_TREE;
+
default:
gcc_unreachable ();
}
@@ -9399,6 +9711,9 @@ fold_builtin_1 (location_t loc, tree exp
case BUILT_IN_ISNAND128:
return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISNAN);
+ case BUILT_IN_ISSIGNALING:
+ return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISSIGNALING);
+
case BUILT_IN_FREE:
if (integer_zerop (arg0))
return build_empty_stmt (loc);
@@ -313,6 +313,7 @@ OPTAB_D (fmod_optab, "fmod$a3")
OPTAB_D (hypot_optab, "hypot$a3")
OPTAB_D (ilogb_optab, "ilogb$a2")
OPTAB_D (isinf_optab, "isinf$a2")
+OPTAB_D (issignaling_optab, "issignaling$a2")
OPTAB_D (ldexp_optab, "ldexp$a3")
OPTAB_D (log10_optab, "log10$a2")
OPTAB_D (log1p_optab, "log1p$a2")
@@ -952,6 +952,10 @@ fold_const_call_ss (wide_int *result, co
*result = wi::shwi (real_isfinite (arg) ? 1 : 0, precision);
return true;
+ case CFN_BUILT_IN_ISSIGNALING:
+ *result = wi::shwi (real_issignaling_nan (arg) ? 1 : 0, precision);
+ return true;
+
CASE_CFN_ISINF:
case CFN_BUILT_IN_ISINFD32:
case CFN_BUILT_IN_ISINFD64:
@@ -24732,6 +24732,58 @@ (define_expand "spaceshipxf3"
DONE;
})
+;; Defined because the generic expand_builtin_issignaling for XFmode
+;; only tests for sNaNs, but i387 treats also pseudo numbers as always
+;; signaling.
+(define_expand "issignalingxf2"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:XF 1 "general_operand")]
+ ""
+{
+ rtx temp = operands[1];
+ if (!MEM_P (temp))
+ {
+ rtx mem = assign_stack_temp (XFmode, GET_MODE_SIZE (XFmode));
+ emit_move_insn (mem, temp);
+ temp = mem;
+ }
+ rtx ex = adjust_address (temp, HImode, 8);
+ rtx hi = adjust_address (temp, SImode, 4);
+ rtx lo = adjust_address (temp, SImode, 0);
+ rtx val = GEN_INT (HOST_WIDE_INT_M1U << 30);
+ rtx mask = GEN_INT (0x7fff);
+ rtx bit = GEN_INT (HOST_WIDE_INT_1U << 30);
+ /* Expand to:
+ ((ex & mask) && (int) hi >= 0)
+ || ((ex & mask) == mask && ((hi ^ bit) | ((lo | -lo) >> 31)) > val). */
+ rtx nlo = expand_unop (SImode, neg_optab, lo, NULL_RTX, 0);
+ lo = expand_binop (SImode, ior_optab, lo, nlo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ lo = expand_shift (RSHIFT_EXPR, SImode, lo, 31, NULL_RTX, 1);
+ temp = expand_binop (SImode, xor_optab, hi, bit,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (SImode, ior_optab, temp, lo,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = emit_store_flag_force (gen_reg_rtx (SImode), GTU, temp, val,
+ SImode, 1, 1);
+ ex = expand_binop (HImode, and_optab, ex, mask,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ rtx temp2 = emit_store_flag_force (gen_reg_rtx (SImode), NE,
+ ex, const0_rtx, SImode, 1, 1);
+ ex = emit_store_flag_force (gen_reg_rtx (SImode), EQ,
+ ex, mask, HImode, 1, 1);
+ temp = expand_binop (SImode, and_optab, temp, ex,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ rtx temp3 = emit_store_flag_force (gen_reg_rtx (SImode), GE,
+ hi, const0_rtx, SImode, 0, 1);
+ temp2 = expand_binop (SImode, and_optab, temp2, temp3,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (SImode, ior_optab, temp, temp2,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ emit_move_insn (operands[0], temp);
+ DONE;
+})
+
(include "mmx.md")
(include "sse.md")
(include "sync.md")
@@ -13001,6 +13001,7 @@ is called and the @var{flag} argument pa
@findex __builtin_isless
@findex __builtin_islessequal
@findex __builtin_islessgreater
+@findex __builtin_issignaling
@findex __builtin_isunordered
@findex __builtin_object_size
@findex __builtin_powi
@@ -13556,6 +13557,8 @@ In the same fashion, GCC provides @code{
@code{isinf_sign}, @code{isnormal} and @code{signbit} built-ins used with
@code{__builtin_} prefixed. The @code{isinf} and @code{isnan}
built-in functions appear both with and without the @code{__builtin_} prefix.
+With @code{-ffinite-math-only} option the @code{isinf} and @code{isnan}
+built-in functions will always return 0.
GCC provides built-in versions of the ISO C99 floating-point rounding and
exceptions handling functions @code{fegetround}, @code{feclearexcept} and
@@ -14489,6 +14492,20 @@ Similar to @code{__builtin_nans}, except
@code{_Float@var{n}x}.
@end deftypefn
+@deftypefn {Built-in Function} int __builtin_issignaling (...)
+Return non-zero if the argument is a signaling NaN and zero otherwise.
+Note while the parameter list is an
+ellipsis, this function only accepts exactly one floating-point
+argument. GCC treats this parameter as type-generic, which means it
+does not do default promotion from float to double.
+This built-in function can work even without the non-default
+@code{-fsignaling-nans} option, although if a signaling NaN is computed,
+stored or passed as argument to some function other than this built-in
+in the current translation unit, it is safer to use @code{-fsignaling-nans}.
+With @code{-ffinite-math-only} option this built-in function will always
+return 0.
+@end deftypefn
+
@deftypefn {Built-in Function} int __builtin_ffs (int x)
Returns one plus the index of the least significant 1-bit of @var{x}, or
if @var{x} is zero, returns zero.
@@ -6184,6 +6184,10 @@ floating-point mode.
This pattern is not allowed to @code{FAIL}.
+@cindex @code{issignaling@var{m}2} instruction pattern
+@item @samp{issignaling@var{m}2}
+Set operand 0 to 1 if operand 1 is a signaling NaN and to 0 otherwise.
+
@cindex @code{cadd90@var{m}3} instruction pattern
@item @samp{cadd90@var{m}3}
Perform vector add and subtract on even/odd number pairs. The operation being
@@ -6294,6 +6294,7 @@ check_builtin_function_arguments (locati
case BUILT_IN_ISINF_SIGN:
case BUILT_IN_ISNAN:
case BUILT_IN_ISNORMAL:
+ case BUILT_IN_ISSIGNALING:
case BUILT_IN_SIGNBIT:
if (builtin_function_validate_nargs (loc, fndecl, nargs, 1))
{
@@ -3546,6 +3546,7 @@ convert_arguments (location_t loc, vec<l
case BUILT_IN_ISINF_SIGN:
case BUILT_IN_ISNAN:
case BUILT_IN_ISNORMAL:
+ case BUILT_IN_ISSIGNALING:
case BUILT_IN_FPCLASSIFY:
type_generic_remove_excess_precision = true;
break;
@@ -1013,6 +1013,8 @@ gfc_init_builtin_functions (void)
"__builtin_isnan", ATTR_CONST_NOTHROW_LEAF_LIST);
gfc_define_builtin ("__builtin_isnormal", ftype, BUILT_IN_ISNORMAL,
"__builtin_isnormal", ATTR_CONST_NOTHROW_LEAF_LIST);
+ gfc_define_builtin ("__builtin_issignaling", ftype, BUILT_IN_ISSIGNALING,
+ "__builtin_issignaling", ATTR_CONST_NOTHROW_LEAF_LIST);
gfc_define_builtin ("__builtin_signbit", ftype, BUILT_IN_SIGNBIT,
"__builtin_signbit", ATTR_CONST_NOTHROW_LEAF_LIST);
@@ -0,0 +1,130 @@
+/* { dg-do run } */
+/* { dg-add-options ieee } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#ifndef EXT
+int
+f1 (void)
+{
+ return __builtin_issignaling (__builtin_nansf (""));
+}
+
+int
+f2 (void)
+{
+ return __builtin_issignaling (__builtin_nan (""));
+}
+
+int
+f3 (void)
+{
+ return __builtin_issignaling (0.0L);
+}
+
+int
+f4 (float x)
+{
+ return __builtin_issignaling (x);
+}
+
+int
+f5 (double x)
+{
+ return __builtin_issignaling (x);
+}
+
+int
+f6 (long double x)
+{
+ return __builtin_issignaling (x);
+}
+#else
+#define CONCATX(X, Y) X ## Y
+#define CONCAT(X, Y) CONCATX (X, Y)
+#define CONCAT3(X, Y, Z) CONCAT (CONCAT (X, Y), Z)
+#define CONCAT4(W, X, Y, Z) CONCAT (CONCAT (CONCAT (W, X), Y), Z)
+
+#if EXT
+# define TYPE CONCAT3 (_Float, WIDTH, x)
+# define CST(C) CONCAT4 (C, f, WIDTH, x)
+# define FN(F) CONCAT4 (F, f, WIDTH, x)
+#else
+# define TYPE CONCAT (_Float, WIDTH)
+# define CST(C) CONCAT3 (C, f, WIDTH)
+# define FN(F) CONCAT3 (F, f, WIDTH)
+#endif
+
+int
+f1 (void)
+{
+ return __builtin_issignaling (FN (__builtin_nans) (""));
+}
+
+int
+f2 (void)
+{
+ return __builtin_issignaling (FN (__builtin_nan) (""));
+}
+
+int
+f3 (void)
+{
+ return __builtin_issignaling (CST (0.0));
+}
+
+int
+f4 (TYPE x)
+{
+ return __builtin_issignaling (x);
+}
+#endif
+
+#ifndef EXT
+float x;
+double y;
+long double z;
+#else
+TYPE w;
+#endif
+
+int
+main ()
+{
+ if (!f1 () || f2 () || f3 ())
+ __builtin_abort ();
+ asm volatile ("" : : : "memory");
+#ifndef EXT
+ if (f4 (x) || !f4 (__builtin_nansf ("0x123")) || f4 (42.0f) || f4 (__builtin_nanf ("0x234"))
+ || f4 (__builtin_inff ()) || f4 (-__builtin_inff ()) || f4 (-42.0f) || f4 (-0.0f) || f4 (0.0f))
+ __builtin_abort ();
+ x = __builtin_nansf ("");
+ asm volatile ("" : : : "memory");
+ if (!f4 (x))
+ __builtin_abort ();
+ if (f5 (y) || !f5 (__builtin_nans ("0x123")) || f5 (42.0) || f5 (__builtin_nan ("0x234"))
+ || f5 (__builtin_inf ()) || f5 (-__builtin_inf ()) || f5 (-42.0) || f5 (-0.0) || f5 (0.0))
+ __builtin_abort ();
+ y = __builtin_nans ("");
+ asm volatile ("" : : : "memory");
+ if (!f5 (y))
+ __builtin_abort ();
+ if (f6 (z) || !f6 (__builtin_nansl ("0x123")) || f6 (42.0L) || f6 (__builtin_nanl ("0x234"))
+ || f6 (__builtin_infl ()) || f6 (-__builtin_infl ()) || f6 (-42.0L) || f6 (-0.0L) || f6 (0.0L))
+ __builtin_abort ();
+ z = __builtin_nansl ("");
+ asm volatile ("" : : : "memory");
+ if (!f6 (z))
+ __builtin_abort ();
+#else
+ if (f4 (w) || !f4 (FN (__builtin_nans) ("0x123")) || f4 (CST (42.0)) || f4 (FN (__builtin_nan) ("0x234"))
+ || f4 (FN (__builtin_inf) ()) || f4 (-FN (__builtin_inf) ()) || f4 (CST (-42.0)) || f4 (CST (-0.0)) || f4 (CST (0.0)))
+ __builtin_abort ();
+ w = FN (__builtin_nans) ("");
+ asm volatile ("" : : : "memory");
+ if (!f4 (w))
+ __builtin_abort ();
+#endif
+ return 0;
+}
@@ -0,0 +1,73 @@
+/* { dg-do run } */
+/* { dg-require-effective-target dfp } */
+/* { dg-additional-options "-fsignaling-nans" } */
+
+int
+f1 (void)
+{
+ return __builtin_issignaling (__builtin_nansd32 (""));
+}
+
+int
+f2 (void)
+{
+ return __builtin_issignaling (__builtin_nand64 (""));
+}
+
+int
+f3 (void)
+{
+ return __builtin_issignaling (0.0DD);
+}
+
+int
+f4 (_Decimal32 x)
+{
+ return __builtin_issignaling (x);
+}
+
+int
+f5 (_Decimal64 x)
+{
+ return __builtin_issignaling (x);
+}
+
+int
+f6 (_Decimal128 x)
+{
+ return __builtin_issignaling (x);
+}
+
+_Decimal32 x;
+_Decimal64 y;
+_Decimal128 z;
+
+int
+main ()
+{
+ if (!f1 () || f2 () || f3 ())
+ __builtin_abort ();
+ asm volatile ("" : : : "memory");
+ if (f4 (x) || !f4 (__builtin_nansd32 ("0x123")) || f4 (42.0DF) || f4 (__builtin_nand32 ("0x234"))
+ || f4 (__builtin_infd32 ()) || f4 (-__builtin_infd32 ()) || f4 (-42.0DF) || f4 (-0.0DF) || f4 (0.0DF))
+ __builtin_abort ();
+ x = __builtin_nansd32 ("");
+ asm volatile ("" : : : "memory");
+ if (!f4 (x))
+ __builtin_abort ();
+ if (f5 (y) || !f5 (__builtin_nansd64 ("0x123")) || f5 (42.0DD) || f5 (__builtin_nand64 ("0x234"))
+ || f5 (__builtin_infd64 ()) || f5 (-__builtin_infd64 ()) || f5 (-42.0DD) || f5 (-0.0DD) || f5 (0.0DD))
+ __builtin_abort ();
+ y = __builtin_nansd64 ("");
+ asm volatile ("" : : : "memory");
+ if (!f5 (y))
+ __builtin_abort ();
+ if (f6 (z) || !f6 (__builtin_nansd128 ("0x123")) || f6 (42.0DL) || f6 (__builtin_nand128 ("0x234"))
+ || f6 (__builtin_infd128 ()) || f6 (-__builtin_infd128 ()) || f6 (-42.0DL) || f6 (-0.0DL) || f6 (0.0DL))
+ __builtin_abort ();
+ z = __builtin_nansd128 ("");
+ asm volatile ("" : : : "memory");
+ if (!f6 (z))
+ __builtin_abort ();
+ return 0;
+}
@@ -0,0 +1,13 @@
+/* Test _Float16 __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float16 } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float16_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 16
+#define EXT 0
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float32 __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float32 } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float32_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 32
+#define EXT 0
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float32x __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float32x } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float32x_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 32
+#define EXT 1
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float64 __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float64 } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float64_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 64
+#define EXT 0
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float64x __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float64x } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float64x_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 64
+#define EXT 1
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float128 __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float128 } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float128_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 128
+#define EXT 0
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,13 @@
+/* Test _Float128x __builtin_issignaling. */
+/* { dg-do run } */
+/* { dg-options "" } */
+/* { dg-add-options float128x } */
+/* { dg-add-options ieee } */
+/* { dg-require-effective-target float128x_runtime } */
+/* { dg-additional-options "-fsignaling-nans" } */
+/* Workaround for PR57484 on ia32: */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { ia32 && sse2_runtime } } } */
+
+#define WIDTH 128
+#define EXT 1
+#include "builtin-issignaling-1.c"
@@ -0,0 +1,80 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fsignaling-nans" } */
+
+#if __LDBL_MANT_DIG__ == 64
+union U { struct { unsigned long long m; unsigned short e; } p; long double l; };
+union U zero = { { 0, 0 } };
+union U mzero = { { 0, 0x8000 } };
+union U denorm = { { 42, 0 } };
+union U mdenorm = { { 42, 0x8000 } };
+union U pseudodenorm = { { 0x8000000000000000ULL, 0 } };
+union U mpseudodenorm = { { 0x8000000000000000ULL, 0x8000 } };
+union U pseudodenorm1 = { { 0x8000000000000042ULL, 0 } };
+union U mpseudodenorm1 = { { 0x8000000000000042ULL, 0x8000 } };
+union U pseudoinf = { { 0, 0x7fff } };
+union U mpseudoinf = { { 0, 0xffff } };
+union U pseudonan = { { 42, 0x7fff } };
+union U mpseudonan = { { 42, 0xffff } };
+union U pseudonan1 = { { 0x4000000000000000ULL, 0x7fff } };
+union U mpseudonan1 = { { 0x4000000000000000ULL, 0xffff } };
+union U pseudonan2 = { { 0x4000000000000042ULL, 0x7fff } };
+union U mpseudonan2 = { { 0x4000000000000042ULL, 0xffff } };
+union U inf = { { 0x8000000000000000ULL, 0x7fff } };
+union U minf = { { 0x8000000000000000ULL, 0xffff } };
+union U snan = { { 0x8000000000000042ULL, 0x7fff } };
+union U msnan = { { 0x8000000000000042ULL, 0xffff } };
+union U indefinite = { { 0xc000000000000000ULL, 0x7fff } };
+union U mindefinite = { { 0xc000000000000000ULL, 0xffff } };
+union U qnan = { { 0xc000000000000042ULL, 0x7fff } };
+union U mqnan = { { 0xc000000000000042ULL, 0xffff } };
+union U unnormal = { { 0, 0x42 } };
+union U munnormal = { { 0, 0x8042 } };
+union U unnormal1 = { { 42, 0x42 } };
+union U munnormal1 = { { 42, 0x8042 } };
+union U normal = { { 0x8000000000000000ULL, 0x42 } };
+union U mnormal = { { 0x8000000000000000ULL, 0x8042 } };
+union U normal1 = { { 0x8000000000000042ULL, 0x42 } };
+union U mnormal1 = { { 0x8000000000000042ULL, 0x8042 } };
+#endif
+
+int
+main ()
+{
+#if __LDBL_MANT_DIG__ == 64
+ asm volatile ("" : : : "memory");
+ if (__builtin_issignaling (zero.l)
+ || __builtin_issignaling (mzero.l)
+ || __builtin_issignaling (denorm.l)
+ || __builtin_issignaling (mdenorm.l)
+ || __builtin_issignaling (pseudodenorm.l)
+ || __builtin_issignaling (mpseudodenorm.l)
+ || __builtin_issignaling (pseudodenorm1.l)
+ || __builtin_issignaling (mpseudodenorm1.l)
+ || !__builtin_issignaling (pseudoinf.l)
+ || !__builtin_issignaling (mpseudoinf.l)
+ || !__builtin_issignaling (pseudonan.l)
+ || !__builtin_issignaling (mpseudonan.l)
+ || !__builtin_issignaling (pseudonan1.l)
+ || !__builtin_issignaling (mpseudonan1.l)
+ || !__builtin_issignaling (pseudonan2.l)
+ || !__builtin_issignaling (mpseudonan2.l)
+ || __builtin_issignaling (inf.l)
+ || __builtin_issignaling (minf.l)
+ || !__builtin_issignaling (snan.l)
+ || !__builtin_issignaling (msnan.l)
+ || __builtin_issignaling (indefinite.l)
+ || __builtin_issignaling (mindefinite.l)
+ || __builtin_issignaling (qnan.l)
+ || __builtin_issignaling (mqnan.l)
+ || !__builtin_issignaling (unnormal.l)
+ || !__builtin_issignaling (munnormal.l)
+ || !__builtin_issignaling (unnormal1.l)
+ || !__builtin_issignaling (munnormal1.l)
+ || __builtin_issignaling (normal.l)
+ || __builtin_issignaling (mnormal.l)
+ || __builtin_issignaling (normal1.l)
+ || __builtin_issignaling (mnormal1.l))
+ __builtin_abort ();
+#endif
+ return 0;
+}