lower-bitint: Avoid sign-extending cast to unsigned types feeding div/mod/float [PR113614]

Message ID ZbS69KumgXsoLLlW@tucnak
State Unresolved
Headers
Series lower-bitint: Avoid sign-extending cast to unsigned types feeding div/mod/float [PR113614] |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

Jakub Jelinek Jan. 27, 2024, 8:12 a.m. UTC
  Hi!

The following testcase is miscompiled, because some narrower value
is sign-extended to wider unsigned _BitInt used as division operand.
handle_operand_addr for that case returns the narrower value and
precision -prec_of_narrower_value.  That works fine for multiplication
(at least, normal multiplication, but we don't merge casts with
.MUL_OVERFLOW or the ubsan multiplication right now), because the
result is the same whether we treat the arguments as signed or unsigned.
But is completely wrong for division/modulo or conversions to
floating-point, if we pass negative prec for an input operand of a libgcc
handler, those treat it like a negative number, not an unsigned one
sign-extended from something smaller (and it doesn't know to what precision
it has been extended).

So, the following patch fixes it by making sure we don't merge such
sign-extensions to unsigned _BitInt type with division, modulo or
conversions to floating point.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2024-01-27  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/113614
	* gimple-lower-bitint.cc (gimple_lower_bitint): Don't merge
	widening casts from signed to unsigned types with TRUNC_DIV_EXPR,
	TRUNC_MOD_EXPR or FLOAT_EXPR uses.

	* gcc.dg/torture/bitint-54.c: New test.


	Jakub
  

Comments

Richard Biener Jan. 27, 2024, 12:01 p.m. UTC | #1
> Am 27.01.2024 um 09:18 schrieb Jakub Jelinek <jakub@redhat.com>:
> 
> Hi!
> 
> The following testcase is miscompiled, because some narrower value
> is sign-extended to wider unsigned _BitInt used as division operand.
> handle_operand_addr for that case returns the narrower value and
> precision -prec_of_narrower_value.  That works fine for multiplication
> (at least, normal multiplication, but we don't merge casts with
> .MUL_OVERFLOW or the ubsan multiplication right now), because the
> result is the same whether we treat the arguments as signed or unsigned.
> But is completely wrong for division/modulo or conversions to
> floating-point, if we pass negative prec for an input operand of a libgcc
> handler, those treat it like a negative number, not an unsigned one
> sign-extended from something smaller (and it doesn't know to what precision
> it has been extended).
> 
> So, the following patch fixes it by making sure we don't merge such
> sign-extensions to unsigned _BitInt type with division, modulo or
> conversions to floating point.
> 
> Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

Ok

Richard 

> 2024-01-27  Jakub Jelinek  <jakub@redhat.com>
> 
>    PR tree-optimization/113614
>    * gimple-lower-bitint.cc (gimple_lower_bitint): Don't merge
>    widening casts from signed to unsigned types with TRUNC_DIV_EXPR,
>    TRUNC_MOD_EXPR or FLOAT_EXPR uses.
> 
>    * gcc.dg/torture/bitint-54.c: New test.
> 
> --- gcc/gimple-lower-bitint.cc.jj    2024-01-26 18:05:24.461891138 +0100
> +++ gcc/gimple-lower-bitint.cc    2024-01-26 19:04:07.948780942 +0100
> @@ -6102,17 +6102,27 @@ gimple_lower_bitint (void)
>              && (TREE_CODE (rhs1) != SSA_NAME
>              || !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)))
>            {
> -              if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
> -              || (bitint_precision_kind (TREE_TYPE (rhs1))
> -                  < bitint_prec_large))
> -            continue;
>              if (is_gimple_assign (use_stmt))
>            switch (gimple_assign_rhs_code (use_stmt))
>              {
> -              case MULT_EXPR:
>              case TRUNC_DIV_EXPR:
>              case TRUNC_MOD_EXPR:
>              case FLOAT_EXPR:
> +                /* For division, modulo and casts to floating
> +                   point, avoid representing unsigned operands
> +                   using negative prec if they were sign-extended
> +                   from narrower precision.  */
> +                if (TYPE_UNSIGNED (TREE_TYPE (s))
> +                && !TYPE_UNSIGNED (TREE_TYPE (rhs1))
> +                && (TYPE_PRECISION (TREE_TYPE (s))
> +                    > TYPE_PRECISION (TREE_TYPE (rhs1))))
> +                  goto force_name;
> +                /* FALLTHRU */
> +              case MULT_EXPR:
> +                if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
> +                || (bitint_precision_kind (TREE_TYPE (rhs1))
> +                    < bitint_prec_large))
> +                  continue;
>                /* Uses which use handle_operand_addr can't
>                   deal with nested casts.  */
>                if (TREE_CODE (rhs1) == SSA_NAME
> @@ -6126,6 +6136,10 @@ gimple_lower_bitint (void)
>              default:
>                break;
>            }
> +              if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
> +              || (bitint_precision_kind (TREE_TYPE (rhs1))
> +                  < bitint_prec_large))
> +            continue;
>              if ((TYPE_PRECISION (TREE_TYPE (rhs1))
>               >= TYPE_PRECISION (TREE_TYPE (s)))
>              && mergeable_op (use_stmt))
> --- gcc/testsuite/gcc.dg/torture/bitint-54.c.jj    2024-01-26 19:09:01.436688318 +0100
> +++ gcc/testsuite/gcc.dg/torture/bitint-54.c    2024-01-26 19:16:24.908504368 +0100
> @@ -0,0 +1,29 @@
> +/* PR tree-optimization/113614 */
> +/* { dg-do run { target bitint } } */
> +/* { dg-options "-std=c23 -pedantic-errors" } */
> +/* { dg-skip-if "" { ! run_expensive_tests }  { "*" } { "-O0" "-O2" } } */
> +/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
> +
> +_BitInt(8) a;
> +_BitInt(8) b;
> +_BitInt(8) c;
> +
> +#if __BITINT_MAXWIDTH__ >= 256
> +_BitInt(256)
> +foo (_BitInt(8) y, unsigned _BitInt(256) z)
> +{
> +  unsigned _BitInt(256) d = -y;
> +  z /= d;
> +  return z + a + b + c;
> +}
> +#endif
> +
> +int
> +main ()
> +{
> +#if __BITINT_MAXWIDTH__ >= 256
> +  if (foo (0xfwb, 0x24euwb))
> +    __builtin_abort ();
> +#endif
> +  return 0;
> +}
> 
>    Jakub
>
  

Patch

--- gcc/gimple-lower-bitint.cc.jj	2024-01-26 18:05:24.461891138 +0100
+++ gcc/gimple-lower-bitint.cc	2024-01-26 19:04:07.948780942 +0100
@@ -6102,17 +6102,27 @@  gimple_lower_bitint (void)
 		      && (TREE_CODE (rhs1) != SSA_NAME
 			  || !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)))
 		    {
-		      if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
-			  || (bitint_precision_kind (TREE_TYPE (rhs1))
-			      < bitint_prec_large))
-			continue;
 		      if (is_gimple_assign (use_stmt))
 			switch (gimple_assign_rhs_code (use_stmt))
 			  {
-			  case MULT_EXPR:
 			  case TRUNC_DIV_EXPR:
 			  case TRUNC_MOD_EXPR:
 			  case FLOAT_EXPR:
+			    /* For division, modulo and casts to floating
+			       point, avoid representing unsigned operands
+			       using negative prec if they were sign-extended
+			       from narrower precision.  */
+			    if (TYPE_UNSIGNED (TREE_TYPE (s))
+				&& !TYPE_UNSIGNED (TREE_TYPE (rhs1))
+				&& (TYPE_PRECISION (TREE_TYPE (s))
+				    > TYPE_PRECISION (TREE_TYPE (rhs1))))
+			      goto force_name;
+			    /* FALLTHRU */
+			  case MULT_EXPR:
+			    if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
+				|| (bitint_precision_kind (TREE_TYPE (rhs1))
+				    < bitint_prec_large))
+			      continue;
 			    /* Uses which use handle_operand_addr can't
 			       deal with nested casts.  */
 			    if (TREE_CODE (rhs1) == SSA_NAME
@@ -6126,6 +6136,10 @@  gimple_lower_bitint (void)
 			  default:
 			    break;
 			}
+		      if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
+			  || (bitint_precision_kind (TREE_TYPE (rhs1))
+			      < bitint_prec_large))
+			continue;
 		      if ((TYPE_PRECISION (TREE_TYPE (rhs1))
 			   >= TYPE_PRECISION (TREE_TYPE (s)))
 			  && mergeable_op (use_stmt))
--- gcc/testsuite/gcc.dg/torture/bitint-54.c.jj	2024-01-26 19:09:01.436688318 +0100
+++ gcc/testsuite/gcc.dg/torture/bitint-54.c	2024-01-26 19:16:24.908504368 +0100
@@ -0,0 +1,29 @@ 
+/* PR tree-optimization/113614 */
+/* { dg-do run { target bitint } } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
+/* { dg-skip-if "" { ! run_expensive_tests }  { "*" } { "-O0" "-O2" } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
+
+_BitInt(8) a;
+_BitInt(8) b;
+_BitInt(8) c;
+
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt(256)
+foo (_BitInt(8) y, unsigned _BitInt(256) z)
+{
+  unsigned _BitInt(256) d = -y;
+  z /= d;
+  return z + a + b + c;
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 256
+  if (foo (0xfwb, 0x24euwb))
+    __builtin_abort ();
+#endif
+  return 0;
+}