@@ -3,7 +3,7 @@ softfp_int_modes := si di ti
softfp_extensions := sftf dftf hftf bfsf
softfp_truncations := tfsf tfdf tfhf tfbf dfbf sfbf hfbf
softfp_exclude_libgcc2 := n
-softfp_extras := fixhfti fixunshfti floattihf floatuntihf \
+softfp_extras += fixhfti fixunshfti floattihf floatuntihf \
floatdibf floatundibf floattibf floatuntibf
TARGET_LIBGCC2_CFLAGS += -Wno-missing-prototypes
@@ -1,4 +1,4 @@
-softfp_extras := fixhfti fixunshfti floattihf floatuntihf \
+softfp_extras += fixhfti fixunshfti floattihf floatuntihf \
floattibf floatuntibf
CFLAGS-fixhfti.c += -msse2
@@ -226,3 +226,13 @@ GCC_13.0.0 {
__truncxfbf2
__trunchfbf2
}
+
+%inherit GCC_14.0.0 GCC_13.0.0
+GCC_14.0.0 {
+ __PFX__fixxfbitint
+ __PFX__fixtfbitint
+ __PFX__floatbitintbf
+ __PFX__floatbitinthf
+ __PFX__floatbitintxf
+ __PFX__floatbitinttf
+}
@@ -10,7 +10,7 @@ softfp_extensions := hfsf hfdf hftf hfxf
softfp_truncations := tfhf xfhf dfhf sfhf tfsf dfsf tfdf tfxf \
tfbf xfbf dfbf sfbf hfbf
-softfp_extras += eqhf2
+softfp_extras += eqhf2 fixxfbitint $(foreach m,hf bf xf,floatbitint$(m))
CFLAGS-extendhfsf2.c += -msse2
CFLAGS-extendhfdf2.c += -msse2
@@ -28,6 +28,9 @@ CFLAGS-truncxfbf2.c += -msse2
CFLAGS-trunctfbf2.c += -msse2
CFLAGS-trunchfbf2.c += -msse2
+CFLAGS-floatbitintbf.c += -msse2
+CFLAGS-floatbitinthf.c += -msse2
+
CFLAGS-eqhf2.c += -msse2
CFLAGS-_divhc3.c += -msse2
CFLAGS-_mulhc3.c += -msse2
@@ -13,7 +13,7 @@ softfp_extensions := sftf dftf
softfp_truncations := tfsf tfdf
# Enable divide routines to make -mno-fdiv work.
-softfp_extras := divsf3 divdf3
+softfp_extras += divsf3 divdf3
else
# !ABI_DOUBLE
@@ -28,7 +28,7 @@ else
# ABI_SINGLE
# Enable divide routines to make -mno-fdiv work.
-softfp_extras := divsf3
+softfp_extras += divsf3
endif
@@ -38,7 +38,7 @@ else
# ABI_QUAD
# Enable divide routines to make -mno-fdiv work.
-softfp_extras := divsf3 divdf3 divtf3
+softfp_extras += divsf3 divdf3 divtf3
endif
@@ -29,4 +29,4 @@ softfp_int_modes := si di
softfp_extensions := sfdf
softfp_truncations := dfsf
softfp_exclude_libgcc2 := n
-softfp_extras := unordsf2
+softfp_extras += unordsf2
@@ -23,4 +23,4 @@ softfp_int_modes :=
softfp_extensions :=
softfp_truncations :=
softfp_exclude_libgcc2 := n
-softfp_extras := unordsf2 unorddf2
+softfp_extras += unordsf2 unorddf2
@@ -64,12 +64,14 @@ softfp_float_funcs = add$(m)3 div$(m)3 e
neg$(m)2 sub$(m)3 unord$(m)2
softfp_floatint_funcs = fix$(m)$(i) fixuns$(m)$(i) \
float$(i)$(m) floatun$(i)$(m)
+softfp_floatbitint_funcs = fix$(m)bitint floatbitint$(m)
softfp_func_list := \
$(foreach m,$(softfp_float_modes), \
$(softfp_float_funcs) \
$(foreach i,$(softfp_int_modes), \
$(softfp_floatint_funcs))) \
+ $(foreach m,sf df,$(softfp_floatbitint_funcs)) \
$(foreach e,$(softfp_extensions),extend$(e)2) \
$(foreach t,$(softfp_truncations),trunc$(t)2) \
$(softfp_extras)
@@ -2,4 +2,5 @@ softfp_float_modes := sf df tf
softfp_int_modes := si di
softfp_extensions := sfdf sftf dftf xftf
softfp_truncations := dfsf tfsf tfdf tfxf
+softfp_extras += fixtfbitint floatbitinttf
softfp_exclude_libgcc2 := n
@@ -2,4 +2,5 @@ softfp_float_modes := tf
softfp_int_modes := si di ti
softfp_extensions := sftf dftf xftf
softfp_truncations := tfsf tfdf tfxf
+softfp_extras += fixtfbitint floatbitinttf
softfp_exclude_libgcc2 := n
@@ -1301,6 +1301,687 @@ __udivdi3 (UDWtype n, UDWtype d)
}
#endif
+#if (defined(__BITINT_MAXWIDTH__) \
+ && (defined(L_mulbitint3) || defined(L_divmodbitint4)))
+/* _BitInt support. */
+
+/* If *P is zero or sign extended (the latter only for PREC < 0) from
+ some narrower _BitInt value, reduce precision. */
+
+static inline __attribute__((__always_inline__)) SItype
+bitint_reduce_prec (const UWtype **p, SItype prec)
+{
+ UWtype mslimb;
+ SItype i;
+ if (prec < 0)
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ i = 0;
+#else
+ i = ((USItype) -1 - prec) / W_TYPE_SIZE;
+#endif
+ mslimb = (*p)[i];
+ if (mslimb & ((UWtype) 1 << (((USItype) -1 - prec) % W_TYPE_SIZE)))
+ {
+ SItype n = ((USItype) -prec) % W_TYPE_SIZE;
+ if (n)
+ {
+ mslimb |= ((UWtype) -1 << (((USItype) -1 - prec) % W_TYPE_SIZE));
+ if (mslimb == (UWtype) -1)
+ {
+ prec += n;
+ if (prec >= -1)
+ return -2;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ n = 0;
+ }
+ }
+ while (mslimb == (UWtype) -1)
+ {
+ prec += W_TYPE_SIZE;
+ if (prec >= -1)
+ return -2;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ if (n == 0)
+ {
+ if ((Wtype) mslimb >= 0)
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ --p;
+#endif
+ return prec - 1;
+ }
+ }
+ return prec;
+ }
+ else
+ prec = -prec;
+ }
+ else
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ i = 0;
+#else
+ i = ((USItype) prec - 1) / W_TYPE_SIZE;
+#endif
+ mslimb = (*p)[i];
+ }
+ SItype n = ((USItype) prec) % W_TYPE_SIZE;
+ if (n)
+ {
+ mslimb &= ((UWtype) 1 << (((USItype) prec) % W_TYPE_SIZE)) - 1;
+ if (mslimb == 0)
+ {
+ prec -= n;
+ if (prec == 0)
+ return 1;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ }
+ while (mslimb == 0)
+ {
+ prec -= W_TYPE_SIZE;
+ if (prec == 0)
+ return 1;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ return prec;
+}
+
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+# define BITINT_INC -1
+# define BITINT_END(be, le) (be)
+#else
+# define BITINT_INC 1
+# define BITINT_END(be, le) (le)
+#endif
+
+#ifdef L_mulbitint3
+/* D = S * L. */
+
+static UWtype
+bitint_mul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
+{
+ UWtype sv, hi, lo, c = 0;
+ do
+ {
+ sv = *s;
+ s += BITINT_INC;
+ umul_ppmm (hi, lo, sv, l);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ *d = lo;
+ d += BITINT_INC;
+ }
+ while (--n);
+ return c;
+}
+
+/* D += S * L. */
+
+static UWtype
+bitint_addmul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
+{
+ UWtype sv, hi, lo, c = 0;
+ do
+ {
+ sv = *s;
+ s += BITINT_INC;
+ umul_ppmm (hi, lo, sv, l);
+ hi += __builtin_add_overflow (lo, *d, &lo);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ *d = lo;
+ d += BITINT_INC;
+ }
+ while (--n);
+ return c;
+}
+
+/* If XPREC is positive, it is precision in bits
+ of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
+ full limbs and if Xprec%W_TYPE_SIZE one partial limb.
+ If Xprec is negative, -XPREC is precision in bits
+ of a signed _BitInt operand. RETPREC should be always
+ positive. */
+
+void
+__mulbitint3 (UWtype *ret, SItype retprec,
+ const UWtype *u, SItype uprec,
+ const UWtype *v, SItype vprec)
+{
+ uprec = bitint_reduce_prec (&u, uprec);
+ vprec = bitint_reduce_prec (&v, vprec);
+ USItype auprec = uprec < 0 ? -uprec : uprec;
+ USItype avprec = vprec < 0 ? -vprec : vprec;
+
+ /* Prefer non-negative U.
+ Otherwise make sure V doesn't have higher precision than U. */
+ if ((uprec < 0 && vprec >= 0)
+ || (avprec > auprec && !(uprec >= 0 && vprec < 0)))
+ {
+ SItype p;
+ const UWtype *t;
+ p = uprec; uprec = vprec; vprec = p;
+ p = auprec; auprec = avprec; avprec = p;
+ t = u; u = v; v = t;
+ }
+
+ USItype un = auprec / W_TYPE_SIZE;
+ USItype un2 = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype vn = avprec / W_TYPE_SIZE;
+ USItype vn2 = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype retn = ((USItype) retprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype retidx, uidx, vidx;
+ UWtype vv;
+ /* Indexes of least significant limb. */
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ retidx = retn - 1;
+ uidx = un2 - 1;
+ vidx = vn2 - 1;
+#else
+ retidx = 0;
+ uidx = 0;
+ vidx = 0;
+#endif
+ if (__builtin_expect (auprec <= W_TYPE_SIZE, 0) && vprec < 0)
+ {
+ UWtype uu = u[uidx];
+ if (__builtin_expect (auprec < W_TYPE_SIZE, 0))
+ uu &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
+ if (uu == 0)
+ {
+ /* 0 * negative would be otherwise mishandled below, so
+ handle it specially. */
+ __builtin_memset (ret, 0, retn * sizeof (UWtype));
+ return;
+ }
+ }
+ vv = v[vidx];
+ if (__builtin_expect (avprec < W_TYPE_SIZE, 0))
+ {
+ if (vprec > 0)
+ vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
+ else
+ vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
+ }
+
+ USItype n = un > retn ? retn : un;
+ USItype n2 = n;
+ USItype retidx2 = retidx + n * BITINT_INC;
+ UWtype c = 0, uv = 0;
+ if (n)
+ c = bitint_mul_1 (ret + retidx, u + uidx, vv, n);
+ if (retn > un && un2 != un)
+ {
+ UWtype hi, lo;
+ uv = u[uidx + n * BITINT_INC];
+ if (uprec > 0)
+ uv &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
+ else
+ uv |= (UWtype) -1 << (auprec % W_TYPE_SIZE);
+ umul_ppmm (hi, lo, uv, vv);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ ret[retidx2] = lo;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ if (retn > un2)
+ {
+ if (uprec < 0)
+ {
+ while (n2 < retn)
+ {
+ if (n2 >= un2 + vn2)
+ break;
+ UWtype hi, lo;
+ umul_ppmm (hi, lo, (UWtype) -1, vv);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ ret[retidx2] = lo;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ }
+ else
+ {
+ ret[retidx2] = c;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ /* If RET has more limbs than U after precision reduction,
+ fill in the remaining limbs. */
+ while (n2 < retn)
+ {
+ if (n2 < un2 + vn2 || (uprec ^ vprec) >= 0)
+ c = 0;
+ else
+ c = (UWtype) -1;
+ ret[retidx2] = c;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ }
+ /* N is now number of possibly non-zero limbs in RET (ignoring
+ limbs above UN2 + VN2 which if any have been finalized already). */
+ USItype end = vprec < 0 ? un2 + vn2 : vn2;
+ if (retn > un2 + vn2) retn = un2 + vn2;
+ if (end > retn) end = retn;
+ for (USItype m = 1; m < end; ++m)
+ {
+ retidx += BITINT_INC;
+ vidx += BITINT_INC;
+ if (m < vn2)
+ {
+ vv = v[vidx];
+ if (__builtin_expect (m == vn, 0))
+ {
+ if (vprec > 0)
+ vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
+ else
+ vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
+ }
+ }
+ else
+ vv = (UWtype) -1;
+ if (m + n > retn)
+ n = retn - m;
+ c = 0;
+ if (n)
+ c = bitint_addmul_1 (ret + retidx, u + uidx, vv, n);
+ n2 = m + n;
+ retidx2 = retidx + n * BITINT_INC;
+ if (n2 < retn && un2 != un)
+ {
+ UWtype hi, lo;
+ umul_ppmm (hi, lo, uv, vv);
+ hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ ret[retidx2] = lo;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ if (uprec < 0)
+ while (n2 < retn)
+ {
+ UWtype hi, lo;
+ umul_ppmm (hi, lo, (UWtype) -1, vv);
+ hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
+ c = __builtin_add_overflow (lo, c, &lo) + hi;
+ ret[retidx2] = lo;
+ retidx2 += BITINT_INC;
+ ++n2;
+ }
+ else if (n2 < retn)
+ {
+ ret[retidx2] = c;
+ retidx2 += BITINT_INC;
+ }
+ }
+}
+#endif
+
+#ifdef L_divmodbitint4
+static void
+bitint_negate (UWtype *d, const UWtype *s, SItype n)
+{
+ UWtype c = 1;
+ do
+ {
+ UWtype sv = *s, lo;
+ s += BITINT_INC;
+ c = __builtin_add_overflow (~sv, c, &lo);
+ *d = lo;
+ d += BITINT_INC;
+ }
+ while (--n);
+}
+
+/* D -= S * L. */
+
+static UWtype
+bitint_submul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
+{
+ UWtype sv, hi, lo, c = 0;
+ do
+ {
+ sv = *s;
+ s += BITINT_INC;
+ umul_ppmm (hi, lo, sv, l);
+ hi += __builtin_sub_overflow (*d, lo, &lo);
+ c = __builtin_sub_overflow (lo, c, &lo) + hi;
+ *d = lo;
+ d += BITINT_INC;
+ }
+ while (--n);
+ return c;
+}
+
+/* If XPREC is positive, it is precision in bits
+ of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
+ full limbs and if Xprec%W_TYPE_SIZE one partial limb.
+ If Xprec is negative, -XPREC is precision in bits
+ of a signed _BitInt operand. QPREC and RPREC should be
+ always non-negative. If either Q or R is NULL (at least
+ one should be non-NULL), then corresponding QPREC or RPREC
+ should be 0. */
+
+void
+__divmodbitint4 (UWtype *q, SItype qprec,
+ UWtype *r, SItype rprec,
+ const UWtype *u, SItype uprec,
+ const UWtype *v, SItype vprec)
+{
+ uprec = bitint_reduce_prec (&u, uprec);
+ vprec = bitint_reduce_prec (&v, vprec);
+ USItype auprec = uprec < 0 ? -uprec : uprec;
+ USItype avprec = vprec < 0 ? -vprec : vprec;
+ USItype un = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype vn = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype qn = ((USItype) qprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype rn = ((USItype) rprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
+ USItype up = auprec % W_TYPE_SIZE;
+ USItype vp = avprec % W_TYPE_SIZE;
+ if (__builtin_expect (un < vn, 0))
+ {
+ /* If abs(v) > abs(u), then q is 0 and r is u. */
+ if (q)
+ __builtin_memset (q, 0, qn * sizeof (UWtype));
+ if (r == NULL)
+ return;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ r += rn - 1;
+ u += un - 1;
+#endif
+ if (up)
+ --un;
+ if (rn < un)
+ un = rn;
+ for (rn -= un; un; --un)
+ {
+ *r = *u;
+ r += BITINT_INC;
+ u += BITINT_INC;
+ }
+ if (!rn)
+ return;
+ if (up)
+ {
+ if (uprec > 0)
+ *r = *u & (((UWtype) 1 << up) - 1);
+ else
+ *r = *u | ((UWtype) -1 << up);
+ r += BITINT_INC;
+ if (!--rn)
+ return;
+ }
+ UWtype c = uprec < 0 ? (UWtype) -1 : (UWtype) 0;
+ for (; rn; --rn)
+ {
+ *r = c;
+ r += BITINT_INC;
+ }
+ return;
+ }
+ USItype qn2 = un - vn + 1;
+ if (qn >= qn2)
+ qn2 = 0;
+ USItype sz = un + 1 + vn + qn2;
+ UWtype *buf = __builtin_alloca (sz * sizeof (UWtype));
+ USItype uidx, vidx;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ uidx = un - 1;
+ vidx = vn - 1;
+#else
+ uidx = 0;
+ vidx = 0;
+#endif
+ if (uprec < 0)
+ bitint_negate (buf + BITINT_END (uidx + 1, 0), u + uidx, un);
+ else
+ __builtin_memcpy (buf + BITINT_END (1, 0), u, un * sizeof (UWtype));
+ if (up)
+ buf[BITINT_END (1, un - 1)] &= (((UWtype) 1 << up) - 1);
+ if (vprec < 0)
+ bitint_negate (buf + un + 1 + vidx, v + vidx, vn);
+ else
+ __builtin_memcpy (buf + un + 1, v, vn * sizeof (UWtype));
+ if (vp)
+ buf[un + 1 + BITINT_END (0, vn - 1)] &= (((UWtype) 1 << vp) - 1);
+ UWtype *u2 = buf;
+ UWtype *v2 = u2 + un + 1;
+ UWtype *q2 = v2 + vn;
+ if (!qn2)
+ q2 = q + BITINT_END (qn - (un - vn + 1), 0);
+
+ /* Knuth's algorithm. See also ../gcc/wide-int.cc (divmod_internal_2). */
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+ /* Handle single limb divisor first. */
+ if (vn == 1)
+ {
+ UWtype vv = v2[0];
+ if (vv == 0)
+ vv = 1 / vv; /* Divide intentionally by zero. */
+ UWtype k = 0;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (SItype i = 0; i <= un - 1; ++i)
+#else
+ for (SItype i = un - 1; i >= 0; --i)
+#endif
+ udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
+ if (r != NULL)
+ r[BITINT_END (rn - 1, 0)] = k;
+ }
+ else
+#endif
+ {
+ SItype s;
+#ifdef UDIV_NEEDS_NORMALIZATION
+ if (vn == 1 && v2[0] == 0)
+ s = 0;
+ else
+#endif
+ if (sizeof (0U) == sizeof (UWtype))
+ s = __builtin_clz (v2[BITINT_END (0, vn - 1)]);
+ else if (sizeof (0UL) == sizeof (UWtype))
+ s = __builtin_clzl (v2[BITINT_END (0, vn - 1)]);
+ else
+ s = __builtin_clzll (v2[BITINT_END (0, vn - 1)]);
+ if (s)
+ {
+ /* Normalize by shifting v2 left so that it has msb set. */
+ const SItype n = sizeof (UWtype) * __CHAR_BIT__;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (SItype i = 0; i < vn - 1; ++i)
+#else
+ for (SItype i = vn - 1; i > 0; --i)
+#endif
+ v2[i] = (v2[i] << s) | (v2[i - BITINT_INC] >> (n - s));
+ v2[vidx] = v2[vidx] << s;
+ /* And shift u2 left by the same amount. */
+ u2[BITINT_END (0, un)] = u2[BITINT_END (1, un - 1)] >> (n - s);
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (SItype i = 1; i < un; ++i)
+#else
+ for (SItype i = un - 1; i > 0; --i)
+#endif
+ u2[i] = (u2[i] << s) | (u2[i - BITINT_INC] >> (n - s));
+ u2[BITINT_END (un, 0)] = u2[BITINT_END (un, 0)] << s;
+ }
+ else
+ u2[BITINT_END (0, un)] = 0;
+#ifdef UDIV_NEEDS_NORMALIZATION
+ /* Handle single limb divisor first. */
+ if (vn == 1)
+ {
+ UWtype vv = v2[0];
+ if (vv == 0)
+ vv = 1 / vv; /* Divide intentionally by zero. */
+ UWtype k = u2[BITINT_END (0, un)];
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (SItype i = 0; i <= un - 1; ++i)
+#else
+ for (SItype i = un - 1; i >= 0; --i)
+#endif
+ udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
+ if (r != NULL)
+ r[BITINT_END (rn - 1, 0)] = k >> s;
+ }
+ else
+#endif
+ {
+ UWtype vv1 = v2[BITINT_END (0, vn - 1)];
+ UWtype vv0 = v2[BITINT_END (1, vn - 2)];
+ /* Main loop. */
+ for (SItype j = un - vn; j >= 0; --j)
+ {
+ /* Compute estimate in qhat. */
+ UWtype uv1 = u2[BITINT_END (un - j - vn, j + vn)];
+ UWtype uv0 = u2[BITINT_END (un - j - vn + 1, j + vn - 1)];
+ UWtype qhat, rhat, hi, lo, c;
+ if (uv1 >= vv1)
+ {
+ /* udiv_qrnnd doesn't support quotients which don't
+ fit into UWtype, so subtract from uv1:uv0 vv1
+ first. */
+ uv1 -= vv1 + __builtin_sub_overflow (uv0, vv1, &uv0);
+ udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
+ if (!__builtin_add_overflow (rhat, vv1, &rhat))
+ goto again;
+ }
+ else
+ {
+ udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
+ again:
+ umul_ppmm (hi, lo, qhat, vv0);
+ if (hi > rhat
+ || (hi == rhat
+ && lo > u2[BITINT_END (un - j - vn + 2,
+ j + vn - 2)]))
+ {
+ --qhat;
+ if (!__builtin_add_overflow (rhat, vv1, &rhat))
+ goto again;
+ }
+ }
+
+ c = bitint_submul_1 (u2 + BITINT_END (un - j, j),
+ v2 + BITINT_END (vn - 1, 0), qhat, vn);
+ u2[BITINT_END (un - j - vn, j + vn)] -= c;
+ /* If we've subtracted too much, decrease qhat and
+ and add back. */
+ if ((Wtype) u2[BITINT_END (un - j - vn, j + vn)] < 0)
+ {
+ --qhat;
+ c = 0;
+ for (USItype i = 0; i < vn; ++i)
+ {
+ UWtype s = v2[BITINT_END (vn - 1 - i, i)];
+ UWtype d = u2[BITINT_END (un - i - j, i + j)];
+ UWtype c1 = __builtin_add_overflow (d, s, &d);
+ UWtype c2 = __builtin_add_overflow (d, c, &d);
+ c = c1 + c2;
+ u2[BITINT_END (un - i - j, i + j)] = d;
+ }
+ u2[BITINT_END (un - j - vn, j + vn)] += c;
+ }
+ q2[BITINT_END (un - vn - j, j)] = qhat;
+ }
+ if (r != NULL)
+ {
+ if (s)
+ {
+ const SItype n = sizeof (UWtype) * __CHAR_BIT__;
+ /* Unnormalize remainder. */
+ USItype i;
+ for (i = 0; i < vn && i < rn; ++i)
+ r[BITINT_END (rn - 1 - i, i)]
+ = ((u2[BITINT_END (un - i, i)] >> s)
+ | (u2[BITINT_END (un - i - 1, i + 1)] << (n - s)));
+ if (i < rn)
+ r[BITINT_END (rn - vn, vn - 1)]
+ = u2[BITINT_END (un - vn + 1, vn - 1)] >> s;
+ }
+ else if (rn > vn)
+ __builtin_memcpy (&r[BITINT_END (rn - vn, 0)],
+ &u2[BITINT_END (un + 1 - vn, 0)],
+ vn * sizeof (UWtype));
+ else
+ __builtin_memcpy (&r[0], &u2[BITINT_END (un + 1 - rn, 0)],
+ rn * sizeof (UWtype));
+ }
+ }
+ }
+ if (q != NULL)
+ {
+ if ((uprec < 0) ^ (vprec < 0))
+ {
+ /* Negative quotient. */
+ USItype n;
+ if (un - vn + 1 > qn)
+ n = qn;
+ else
+ n = un - vn + 1;
+ bitint_negate (q + BITINT_END (qn - 1, 0),
+ q2 + BITINT_END (un - vn, 0), n);
+ if (qn > n)
+ __builtin_memset (q + BITINT_END (0, n), -1,
+ (qn - n) * sizeof (UWtype));
+ }
+ else
+ {
+ /* Positive quotient. */
+ if (qn2)
+ __builtin_memcpy (q, q2 + BITINT_END (un - vn + 1 - qn, 0),
+ qn * sizeof (UWtype));
+ else if (qn > un - vn + 1)
+ __builtin_memset (q + BITINT_END (0, un - vn + 1), 0,
+ (qn - (un - vn + 1)) * sizeof (UWtype));
+ }
+ }
+ if (r != NULL)
+ {
+ if (uprec < 0)
+ {
+ /* Negative remainder. */
+ bitint_negate (r + BITINT_END (rn - 1, 0),
+ r + BITINT_END (rn - 1, 0),
+ rn > vn ? vn : rn);
+ if (rn > vn)
+ __builtin_memset (r + BITINT_END (0, vn), -1,
+ (rn - vn) * sizeof (UWtype));
+ }
+ else
+ {
+ /* Positive remainder. */
+ if (rn > vn)
+ __builtin_memset (r + BITINT_END (0, vn), 0,
+ (rn - vn) * sizeof (UWtype));
+ }
+ }
+}
+#endif
+#endif
+
#ifdef L_cmpdi2
cmp_return_type
__cmpdi2 (DWtype a, DWtype b)
@@ -181,6 +181,12 @@ typedef int shift_count_type __attribute
#define float bogus_type
#define double bogus_type
+#if (defined(__BITINT_MAXWIDTH__) \
+ && (defined(L_mulbitint3) || defined(L_divmodbitint4)))
+#undef LIBGCC2_UNITS_PER_WORD
+#define LIBGCC2_UNITS_PER_WORD (__LIBGCC_BITINT_LIMB_WIDTH__ / __CHAR_BIT__)
+#endif
+
/* Versions prior to 3.4.4 were not taking into account the word size for
the 5 trapping arithmetic functions absv, addv, subv, mulv and negv. As
a consequence, the si and di variants were always and the only ones emitted.
@@ -390,6 +396,15 @@ extern DWtype __divmoddi4 (DWtype, DWtyp
extern UDWtype __udivmoddi4 (UDWtype, UDWtype, UDWtype *);
#endif
+#if (defined(__BITINT_MAXWIDTH__) \
+ && (defined(L_mulbitint3) || defined(L_divmodbitint4)))
+/* _BitInt support. */
+extern void __mulbitint3 (UWtype *, SItype, const UWtype *, SItype,
+ const UWtype *, SItype);
+extern void __divmodbitint4 (UWtype *, SItype, UWtype *, SItype,
+ const UWtype *, SItype, const UWtype *, SItype);
+#endif
+
/* __negdi2 is static inline when building other libgcc2 portions. */
#if !defined(L_divdi3) && !defined(L_moddi3)
extern DWtype __negdi2 (DWtype);
@@ -1944,3 +1944,13 @@ GCC_7.0.0 {
__PFX__divmoddi4
__PFX__divmodti4
}
+
+%inherit GCC_14.0.0 GCC_7.0.0
+GCC_14.0.0 {
+ __PFX__mulbitint3
+ __PFX__divmodbitint4
+ __PFX__fixsfbitint
+ __PFX__fixdfbitint
+ __PFX__floatbitintsf
+ __PFX__floatbitintdf
+}
@@ -446,7 +446,7 @@ lib2funcs = _muldi3 _negdi2 _lshrdi3 _as
_paritysi2 _paritydi2 _powisf2 _powidf2 _powixf2 _powitf2 \
_mulhc3 _mulsc3 _muldc3 _mulxc3 _multc3 _divhc3 _divsc3 \
_divdc3 _divxc3 _divtc3 _bswapsi2 _bswapdi2 _clrsbsi2 \
- _clrsbdi2
+ _clrsbdi2 _mulbitint3
# The floating-point conversion routines that involve a single-word integer.
# XX stands for the integer mode.
@@ -466,7 +466,8 @@ endif
# These might cause a divide overflow trap and so are compiled with
# unwinder info.
LIB2_DIVMOD_FUNCS = _divdi3 _moddi3 _divmoddi4 \
- _udivdi3 _umoddi3 _udivmoddi4 _udiv_w_sdiv
+ _udivdi3 _umoddi3 _udivmoddi4 _udiv_w_sdiv \
+ _divmodbitint4
# Remove any objects from lib2funcs and LIB2_DIVMOD_FUNCS that are
# defined as optimized assembly code in LIB1ASMFUNCS or as C code
@@ -0,0 +1,306 @@
+/* Software floating-point emulation.
+ Definitions for _BitInt implementation details.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SOFT_FP_BITINT_H
+#define GCC_SOFT_FP_BITINT_H
+
+#ifdef __BITINT_MAXWIDTH__
+#define BIL_UNITS_PER_WORD (__LIBGCC_BITINT_LIMB_WIDTH__ / __CHAR_BIT__)
+
+#if BIL_UNITS_PER_WORD == 8
+#define BIL_TYPE_SIZE (8 * __CHAR_BIT__)
+#define BILtype DItype
+#define UBILtype UDItype
+#elif BIL_UNITS_PER_WORD == 4
+#define BIL_TYPE_SIZE (4 * __CHAR_BIT__)
+#define BILtype SItype
+#define UBILtype USItype
+#elif BIL_UNITS_PER_WORD == 2
+#define BIL_TYPE_SIZE (2 * __CHAR_BIT__)
+#define BILtype HItype
+#define UBILtype UHItype
+#else
+#define BIL_TYPE_SIZE __CHAR_BIT__
+#define BILtype QItype
+#define UBILtype UQItype
+#endif
+
+/* If *P is zero or sign extended (the latter only for PREC < 0) from
+ some narrower _BitInt value, reduce precision. */
+
+static inline __attribute__((__always_inline__)) SItype
+bitint_reduce_prec (const UBILtype **p, SItype prec)
+{
+ UBILtype mslimb;
+ SItype i;
+ if (prec < 0)
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ i = 0;
+#else
+ i = ((USItype) -1 - prec) / BIL_TYPE_SIZE;
+#endif
+ mslimb = (*p)[i];
+ if (mslimb & ((UBILtype) 1 << (((USItype) -1 - prec) % BIL_TYPE_SIZE)))
+ {
+ SItype n = ((USItype) -prec) % BIL_TYPE_SIZE;
+ if (n)
+ {
+ mslimb |= ((UBILtype) -1 << (((USItype) -1 - prec) % BIL_TYPE_SIZE));
+ if (mslimb == (UBILtype) -1)
+ {
+ prec += n;
+ if (prec >= -1)
+ return -2;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ n = 0;
+ }
+ }
+ while (mslimb == (UBILtype) -1)
+ {
+ prec += BIL_TYPE_SIZE;
+ if (prec >= -1)
+ return -2;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ if (n == 0)
+ {
+ if ((BILtype) mslimb >= 0)
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ --p;
+#endif
+ return prec - 1;
+ }
+ }
+ return prec;
+ }
+ else
+ prec = -prec;
+ }
+ else
+ {
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ i = 0;
+#else
+ i = ((USItype) prec - 1) / BIL_TYPE_SIZE;
+#endif
+ mslimb = (*p)[i];
+ }
+ SItype n = ((USItype) prec) % BIL_TYPE_SIZE;
+ if (n)
+ {
+ mslimb &= ((UBILtype) 1 << (((USItype) prec) % BIL_TYPE_SIZE)) - 1;
+ if (mslimb == 0)
+ {
+ prec -= n;
+ if (prec == 0)
+ return 1;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ }
+ while (mslimb == 0)
+ {
+ prec -= BIL_TYPE_SIZE;
+ if (prec == 0)
+ return 1;
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+ ++p;
+#else
+ --i;
+#endif
+ mslimb = (*p)[i];
+ }
+ return prec;
+}
+
+#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
+# define BITINT_INC -1
+# define BITINT_END(be, le) (be)
+#else
+# define BITINT_INC 1
+# define BITINT_END(be, le) (le)
+#endif
+
+#define FP_TO_BITINT(r, rn, arprec, shift, rv, rsize, rsigned, ovf, DI) \
+ if (ovf) \
+ { \
+ if ((rv & 1) != 0) \
+ __builtin_memset (r, -1, rn * sizeof (UBILtype)); \
+ else \
+ __builtin_memset (r, 0, rn * sizeof (UBILtype)); \
+ if (rv & (((U##DI##type) 1) << (rsize - 1))) \
+ r[BITINT_END (0, rn - 1)] \
+ |= (UBILtype) -1 << ((arprec - 1) % BIL_TYPE_SIZE); \
+ else \
+ r[BITINT_END (0, rn - 1)] \
+ &= ~((UBILtype) -1 << ((arprec - 1) % BIL_TYPE_SIZE)); \
+ } \
+ else \
+ { \
+ USItype shiftl = shift / BIL_TYPE_SIZE; \
+ rsize = DI##_BITS; \
+ if (rsigned && (DI##type) rv >= 0) \
+ rsigned = 0; \
+ if (shift + DI##_BITS > arprec) \
+ rsize = arprec - shift; \
+ USItype shiftr = shift % BIL_TYPE_SIZE; \
+ if (shiftl) \
+ __builtin_memset (r + BITINT_END (rn - shiftl, 0), 0, \
+ shiftl * sizeof (UBILtype)); \
+ USItype idx = BITINT_END (rn - shiftl - 1, shiftl); \
+ DI##type rvs = rv; \
+ if (shiftr) \
+ { \
+ r[idx] = (rsigned ? (UBILtype) rvs : (UBILtype) rv) << shiftr;\
+ idx += BITINT_INC; \
+ if (rsize > BIL_TYPE_SIZE - shiftr) \
+ { \
+ rv >>= BIL_TYPE_SIZE - shiftr; \
+ rvs >>= BIL_TYPE_SIZE - shiftr; \
+ rsize -= BIL_TYPE_SIZE - shiftr; \
+ } \
+ else \
+ rsize = 0; \
+ } \
+ while (rsize) \
+ { \
+ r[idx] = rsigned ? (UBILtype) rvs : (UBILtype) rv; \
+ idx += BITINT_INC; \
+ if (rsize <= BIL_TYPE_SIZE) \
+ break; \
+ rv >>= (DI##_BITS > BIL_TYPE_SIZE ? BIL_TYPE_SIZE : 0); \
+ rvs >>= (DI##_BITS > BIL_TYPE_SIZE ? BIL_TYPE_SIZE : 0); \
+ rsize -= BIL_TYPE_SIZE; \
+ } \
+ if (idx < rn) \
+ __builtin_memset (r + BITINT_END (0, idx), rsigned ? -1 : 0, \
+ BITINT_END (idx + 1, rn - idx) \
+ * sizeof (UBILtype)); \
+ }
+
+#define FP_FROM_BITINT(i, iprec, iv, shift, DI) \
+ do \
+ { \
+ iprec = bitint_reduce_prec (&i, iprec); \
+ USItype aiprec = iprec < 0 ? -iprec : iprec; \
+ USItype in = (aiprec + BIL_TYPE_SIZE - 1) / BIL_TYPE_SIZE; \
+ USItype idx = BITINT_END (0, in - 1); \
+ UBILtype msb = i[idx]; \
+ SItype n = 0; \
+ if (aiprec % BIL_TYPE_SIZE) \
+ { \
+ if (iprec > 0) \
+ msb &= ((UBILtype) 1 << (aiprec % BIL_TYPE_SIZE)) - 1; \
+ else \
+ msb |= (UBILtype) -1 << (aiprec % BIL_TYPE_SIZE); \
+ } \
+ if (iprec < 0) \
+ { \
+ n = sizeof (0ULL) * __CHAR_BIT__ + 1 - __builtin_clzll (~msb);\
+ if (BIL_TYPE_SIZE > DI##_BITS && n > DI##_BITS) \
+ { \
+ iv = msb >> (n - DI##_BITS); \
+ shift = n - DI##_BITS; \
+ n = 0; \
+ } \
+ else \
+ { \
+ iv = (BILtype) msb; \
+ n = DI##_BITS - n; \
+ } \
+ } \
+ /* bitint_reduce_prec guarantees that if msb is 0, then whole \
+ i must be zero, otherwise it would have reduced the \
+ precision. */ \
+ else if (msb == 0) \
+ iv = 0; \
+ else \
+ { \
+ n = sizeof (0ULL) * __CHAR_BIT__ - __builtin_clzll (msb); \
+ if (BIL_TYPE_SIZE >= DI##_BITS && n >= DI##_BITS) \
+ { \
+ iv = msb >> (n - DI##_BITS + 1); \
+ shift = n - DI##_BITS + 1; \
+ n = 0; \
+ } \
+ else \
+ { \
+ iv = msb; \
+ n = DI##_BITS - 1 - n; \
+ } \
+ } \
+ while (n && BITINT_END (idx < in - 1, idx)) \
+ { \
+ idx -= BITINT_INC; \
+ msb = i[idx]; \
+ if (BIL_TYPE_SIZE < DI##_BITS && n >= BIL_TYPE_SIZE) \
+ { \
+ iv = (U##DI##type) iv << (BIL_TYPE_SIZE < DI##_BITS \
+ ? BIL_TYPE_SIZE : 0); \
+ iv |= msb; \
+ n -= BIL_TYPE_SIZE; \
+ } \
+ else \
+ { \
+ iv = (U##DI##type) iv << n; \
+ iv |= msb >> (BIL_TYPE_SIZE - n); \
+ shift = BIL_TYPE_SIZE - n; \
+ break; \
+ } \
+ } \
+ \
+ UBILtype low_bits = 0; \
+ if (shift) \
+ low_bits = msb & (((UBILtype) 1 << shift) - 1); \
+ shift += BITINT_END (in - 1 - idx, idx) * BIL_TYPE_SIZE; \
+ while (!low_bits && BITINT_END (idx < in - 1, idx)) \
+ { \
+ idx -= BITINT_INC; \
+ low_bits |= i[idx]; \
+ } \
+ iv |= (low_bits != 0); \
+ } \
+ while (0)
+
+#endif /* __BITINT_MAXWIDTH__ */
+
+#endif /* GCC_SOFT_FP_BITINT_H */
@@ -0,0 +1,71 @@
+/* Software floating-point emulation.
+ Convert IEEE double to signed or unsigned _BitInt.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "double.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+void
+__fixdfbitint (UBILtype *r, SItype rprec, DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D (A);
+ USItype arprec = rprec < 0 ? -rprec : rprec;
+ USItype rn = ((USItype) arprec + BIL_TYPE_SIZE - 1) / BIL_TYPE_SIZE;
+ UDItype rv;
+ USItype rsize = arprec > DI_BITS ? DI_BITS : arprec;
+ USItype rsigned = rprec < 0;
+ USItype ovf = 0;
+ USItype shift = 0;
+
+ FP_INIT_EXCEPTIONS;
+ FP_UNPACK_RAW_D (A, a);
+ if (arprec > DI_BITS)
+ {
+ if (A_e < _FP_EXPBIAS_D || (A_s && !rsigned))
+ ovf = 1;
+ else if (A_e >= (_FP_EXPMAX_D < _FP_EXPBIAS_D + arprec
+ ? _FP_EXPMAX_D
+ : _FP_EXPBIAS_D + arprec - rsigned))
+ {
+ ovf = 1;
+ if (A_s
+ && A_e == _FP_EXPBIAS_D + arprec - 1
+ && A_e < _FP_EXPMAX_D)
+ A_e -= arprec - DI_BITS;
+ }
+ else if (A_e >= _FP_EXPBIAS_D + DI_BITS - rsigned)
+ {
+ shift = A_e - (_FP_EXPBIAS_D + DI_BITS - rsigned - 1);
+ A_e -= shift;
+ }
+ }
+ FP_TO_INT_D (rv, A, rsize, rsigned);
+ FP_HANDLE_EXCEPTIONS;
+ FP_TO_BITINT (r, rn, arprec, shift, rv, rsize, rsigned, ovf, DI);
+}
+#endif
@@ -0,0 +1,71 @@
+/* Software floating-point emulation.
+ Convert IEEE single to signed or unsigned _BitInt.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+void
+__fixsfbitint (UBILtype *r, SItype rprec, SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S (A);
+ USItype arprec = rprec < 0 ? -rprec : rprec;
+ USItype rn = ((USItype) arprec + BIL_TYPE_SIZE - 1) / BIL_TYPE_SIZE;
+ USItype rv;
+ USItype rsize = arprec > SI_BITS ? SI_BITS : arprec;
+ USItype rsigned = rprec < 0;
+ USItype ovf = 0;
+ USItype shift = 0;
+
+ FP_INIT_EXCEPTIONS;
+ FP_UNPACK_RAW_S (A, a);
+ if (arprec > SI_BITS)
+ {
+ if (A_e < _FP_EXPBIAS_S || (A_s && !rsigned))
+ ovf = 1;
+ else if (A_e >= (_FP_EXPMAX_S < _FP_EXPBIAS_S + arprec
+ ? _FP_EXPMAX_S
+ : _FP_EXPBIAS_S + arprec - rsigned))
+ {
+ ovf = 1;
+ if (A_s
+ && A_e == _FP_EXPBIAS_S + arprec - 1
+ && A_e < _FP_EXPMAX_S)
+ A_e -= arprec - SI_BITS;
+ }
+ else if (A_e >= _FP_EXPBIAS_S + SI_BITS - rsigned)
+ {
+ shift = A_e - (_FP_EXPBIAS_S + SI_BITS - rsigned - 1);
+ A_e -= shift;
+ }
+ }
+ FP_TO_INT_S (rv, A, rsize, rsigned);
+ FP_HANDLE_EXCEPTIONS;
+ FP_TO_BITINT (r, rn, arprec, shift, rv, rsize, rsigned, ovf, SI);
+}
+#endif
@@ -0,0 +1,81 @@
+/* Software floating-point emulation.
+ Convert IEEE quad to signed or unsigned _BitInt.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "quad.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+
+#ifndef TI_BITS
+/* As mantissa is 112 bits + 1 implicit bit, we need 128-bit
+ type, but on most 32-bit architectures TImode isn't supported.
+ Use _BitInt(128) instead. */
+typedef _BitInt(128) TItype;
+typedef unsigned _BitInt(128) UTItype;
+#define TI_BITS 128
+#endif
+
+void
+__fixtfbitint (UBILtype *r, SItype rprec, TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q (A);
+ USItype arprec = rprec < 0 ? -rprec : rprec;
+ USItype rn = ((USItype) arprec + BIL_TYPE_SIZE - 1) / BIL_TYPE_SIZE;
+ UTItype rv;
+ USItype rsize = arprec > TI_BITS ? TI_BITS : arprec;
+ USItype rsigned = rprec < 0;
+ USItype ovf = 0;
+ USItype shift = 0;
+
+ FP_INIT_EXCEPTIONS;
+ FP_UNPACK_RAW_Q (A, a);
+ if (arprec > TI_BITS)
+ {
+ if (A_e < _FP_EXPBIAS_Q || (A_s && !rsigned))
+ ovf = 1;
+ else if (A_e >= (_FP_EXPMAX_Q < _FP_EXPBIAS_Q + arprec
+ ? _FP_EXPMAX_Q
+ : _FP_EXPBIAS_Q + arprec - rsigned))
+ {
+ ovf = 1;
+ if (A_s
+ && A_e == _FP_EXPBIAS_Q + arprec - 1
+ && A_e < _FP_EXPMAX_Q)
+ A_e -= arprec - TI_BITS;
+ }
+ else if (A_e >= _FP_EXPBIAS_Q + TI_BITS - rsigned)
+ {
+ shift = A_e - (_FP_EXPBIAS_Q + TI_BITS - rsigned - 1);
+ A_e -= shift;
+ }
+ }
+ FP_TO_INT_Q (rv, A, rsize, rsigned);
+ FP_HANDLE_EXCEPTIONS;
+ FP_TO_BITINT (r, rn, arprec, shift, rv, rsize, rsigned, ovf, TI);
+}
+#endif
@@ -0,0 +1,82 @@
+/* Software floating-point emulation.
+ Convert IEEE extended to signed or unsigned _BitInt.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "extended.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+
+#ifndef TI_BITS
+/* While mantissa is 64 bits including 1 explicit bit, extended.h uses
+ op-2.h for W_TYPE_SIZE 64 and op-4.h for W_TYPE_SIZE 32, so we have
+ to use 128-bit type here. On most 32-bit architectures TImode isn't
+ supported, so use _BitInt(128) instead. */
+typedef _BitInt(128) TItype;
+typedef unsigned _BitInt(128) UTItype;
+#define TI_BITS 128
+#endif
+
+void
+__fixxfbitint (UBILtype *r, SItype rprec, XFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_E (A);
+ USItype arprec = rprec < 0 ? -rprec : rprec;
+ USItype rn = ((USItype) arprec + BIL_TYPE_SIZE - 1) / BIL_TYPE_SIZE;
+ UTItype rv;
+ USItype rsize = arprec > TI_BITS ? TI_BITS : arprec;
+ USItype rsigned = rprec < 0;
+ USItype ovf = 0;
+ USItype shift = 0;
+
+ FP_INIT_EXCEPTIONS;
+ FP_UNPACK_RAW_E (A, a);
+ if (arprec > TI_BITS)
+ {
+ if (A_e < _FP_EXPBIAS_E || (A_s && !rsigned))
+ ovf = 1;
+ else if (A_e >= (_FP_EXPMAX_E < _FP_EXPBIAS_E + arprec
+ ? _FP_EXPMAX_E
+ : _FP_EXPBIAS_E + arprec - rsigned))
+ {
+ ovf = 1;
+ if (A_s
+ && A_e == _FP_EXPBIAS_E + arprec - 1
+ && A_e < _FP_EXPMAX_E)
+ A_e -= arprec - TI_BITS;
+ }
+ else if (A_e >= _FP_EXPBIAS_E + TI_BITS - rsigned)
+ {
+ shift = A_e - (_FP_EXPBIAS_E + TI_BITS - rsigned - 1);
+ A_e -= shift;
+ }
+ }
+ FP_TO_INT_E (rv, A, rsize, rsigned);
+ FP_HANDLE_EXCEPTIONS;
+ FP_TO_BITINT (r, rn, arprec, shift, rv, rsize, rsigned, ovf, TI);
+}
+#endif
@@ -0,0 +1,59 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to bfloat16.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "brain.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+BFtype
+__floatbitintbf (const UBILtype *i, SItype iprec)
+{
+ SItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_B (A);
+ BFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, SI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_B (A, iv, SI_BITS, USItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_B)
+ {
+ /* Exponent too big; overflow to infinity. */
+ _FP_OVERFLOW_SEMIRAW (B, 1, A);
+ _FP_PACK_SEMIRAW (B, 1, A);
+ }
+ }
+ FP_PACK_RAW_B (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -0,0 +1,64 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to IEEE double.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "double.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+DFtype
+__floatbitintdf (const UBILtype *i, SItype iprec)
+{
+ DItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_D (A);
+ DFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, DI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_D (A, iv, DI_BITS, UDItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_D)
+ {
+ /* Exponent too big; overflow to infinity. */
+#if _FP_W_TYPE_SIZE < 64
+ _FP_OVERFLOW_SEMIRAW (D, 2, A);
+ _FP_PACK_SEMIRAW (D, 2, A);
+#else
+ _FP_OVERFLOW_SEMIRAW (D, 1, A);
+ _FP_PACK_SEMIRAW (D, 1, A);
+#endif
+ }
+ }
+ FP_PACK_RAW_D (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -0,0 +1,59 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to IEEE half.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "half.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+HFtype
+__floatbitinthf (const UBILtype *i, SItype iprec)
+{
+ SItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_H (A);
+ HFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, SI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_H (A, iv, SI_BITS, USItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_H)
+ {
+ /* Exponent too big; overflow to infinity. */
+ _FP_OVERFLOW_SEMIRAW (H, 1, A);
+ _FP_PACK_SEMIRAW (H, 1, A);
+ }
+ }
+ FP_PACK_RAW_H (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -0,0 +1,59 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to IEEE single.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+SFtype
+__floatbitintsf (const UBILtype *i, SItype iprec)
+{
+ SItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_S (A);
+ SFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, SI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_S (A, iv, SI_BITS, USItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_S)
+ {
+ /* Exponent too big; overflow to infinity. */
+ _FP_OVERFLOW_SEMIRAW (S, 1, A);
+ _FP_PACK_SEMIRAW (S, 1, A);
+ }
+ }
+ FP_PACK_RAW_S (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -0,0 +1,73 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to IEEE quad.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "quad.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+#ifndef TI_BITS
+/* As mantissa is 112 bits + 1 implicit bit, we need 128-bit
+ type, but on most 32-bit architectures TImode isn't supported.
+ Use _BitInt(128) instead. */
+typedef _BitInt(128) TItype;
+typedef unsigned _BitInt(128) UTItype;
+#define TI_BITS 128
+#endif
+
+TFtype
+__floatbitinttf (const UBILtype *i, SItype iprec)
+{
+ TItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_Q (A);
+ TFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, TI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_Q (A, iv, TI_BITS, UTItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_Q)
+ {
+ /* Exponent too big; overflow to infinity. */
+#if _FP_W_TYPE_SIZE < 64
+ _FP_OVERFLOW_SEMIRAW (Q, 4, A);
+ _FP_PACK_SEMIRAW (Q, 4, A);
+#else
+ _FP_OVERFLOW_SEMIRAW (Q, 2, A);
+ _FP_PACK_SEMIRAW (Q, 2, A);
+#endif
+ }
+ }
+ FP_PACK_RAW_Q (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -0,0 +1,74 @@
+/* Software floating-point emulation.
+ Convert a _BitInt to IEEE extended.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "soft-fp.h"
+#include "extended.h"
+#include "bitint.h"
+
+#ifdef __BITINT_MAXWIDTH__
+#ifndef TI_BITS
+/* While mantissa is 64 bits including 1 explicit bit, extended.h uses
+ op-2.h for W_TYPE_SIZE 64 and op-4.h for W_TYPE_SIZE 32, so we have
+ to use 128-bit type here. On most 32-bit architectures TImode isn't
+ supported, so use _BitInt(128) instead. */
+typedef _BitInt(128) TItype;
+typedef unsigned _BitInt(128) UTItype;
+#define TI_BITS 128
+#endif
+
+XFtype
+__floatbitintxf (const UBILtype *i, SItype iprec)
+{
+ TItype iv;
+ USItype shift = 0;
+ FP_DECL_EX;
+ FP_DECL_E (A);
+ XFtype a;
+
+ FP_FROM_BITINT (i, iprec, iv, shift, TI);
+ FP_INIT_ROUNDMODE;
+ FP_FROM_INT_E (A, iv, TI_BITS, UTItype);
+ if (shift)
+ {
+ A_e += shift;
+ if (A_e >= _FP_EXPMAX_E)
+ {
+ /* Exponent too big; overflow to infinity. */
+#if _FP_W_TYPE_SIZE < 64
+ _FP_OVERFLOW_SEMIRAW (E, 4, A);
+ _FP_PACK_SEMIRAW (E, 4, A);
+#else
+ _FP_OVERFLOW_SEMIRAW (E, 2, A);
+ _FP_PACK_SEMIRAW (E, 2, A);
+#endif
+ }
+ }
+ FP_PACK_RAW_E (a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
+#endif
@@ -1800,7 +1800,7 @@
if ((X##_s = ((r) < 0))) \
_FP_FROM_INT_ur = -_FP_FROM_INT_ur; \
\
- _FP_STATIC_ASSERT ((rsize) <= 2 * _FP_W_TYPE_SIZE, \
+ _FP_STATIC_ASSERT ((rsize) <= 4 * _FP_W_TYPE_SIZE, \
"rsize too large"); \
(void) (((rsize) <= _FP_W_TYPE_SIZE) \
? ({ \
@@ -1810,13 +1810,38 @@
X##_e = (_FP_EXPBIAS_##fs + _FP_W_TYPE_SIZE - 1 \
- _FP_FROM_INT_lz); \
}) \
- : ({ \
+ : ((rsize) <= 2 * _FP_W_TYPE_SIZE) \
+ ? ({ \
int _FP_FROM_INT_lz; \
__FP_CLZ_2 (_FP_FROM_INT_lz, \
(_FP_W_TYPE) (_FP_FROM_INT_ur \
>> _FP_W_TYPE_SIZE), \
(_FP_W_TYPE) _FP_FROM_INT_ur); \
- X##_e = (_FP_EXPBIAS_##fs + 2 * _FP_W_TYPE_SIZE - 1 \
+ X##_e = (_FP_EXPBIAS_##fs \
+ + 2 * _FP_W_TYPE_SIZE - 1 \
+ - _FP_FROM_INT_lz); \
+ }) \
+ : ({ \
+ int _FP_FROM_INT_lz; \
+ if (_FP_FROM_INT_ur >> (2 * _FP_W_TYPE_SIZE)) \
+ { \
+ rtype _FP_FROM_INT_uru \
+ = _FP_FROM_INT_ur >> (2 * _FP_W_TYPE_SIZE); \
+ __FP_CLZ_2 (_FP_FROM_INT_lz, \
+ (_FP_W_TYPE) (_FP_FROM_INT_uru \
+ >> _FP_W_TYPE_SIZE),\
+ (_FP_W_TYPE) _FP_FROM_INT_uru); \
+ } \
+ else \
+ { \
+ __FP_CLZ_2 (_FP_FROM_INT_lz, \
+ (_FP_W_TYPE) (_FP_FROM_INT_ur \
+ >> _FP_W_TYPE_SIZE),\
+ (_FP_W_TYPE) _FP_FROM_INT_ur); \
+ _FP_FROM_INT_lz += 2 * _FP_W_TYPE_SIZE; \
+ } \
+ X##_e = (_FP_EXPBIAS_##fs \
+ + 4 * _FP_W_TYPE_SIZE - 1 \
- _FP_FROM_INT_lz); \
})); \
\