cfgexpand: Workaround CSE of ADDR_EXPRs in VAR_DECL partitioning [PR113372]
Checks
Commit Message
Hi!
The following patch adds a quick workaround to bugs in VAR_DECL
partitioning.
The problem is that there is no dependency between ADDR_EXPRs of local
decls and CLOBBERs of those vars, so VN can CSE uses of ADDR_EXPRs
(including ivopts integral variants thereof), which can break
add_scope_conflicts discovery of what variables are actually used
in certain region.
E.g. we can have
ivtmp.40_3 = (unsigned long) &MEM <unsigned long[100]> [(void *)&bitint.6 + 8B];
...
uses of ivtmp.40_3
...
bitint.6 ={v} {CLOBBER(eos)};
...
ivtmp.28_43 = (unsigned long) &MEM <unsigned long[100]> [(void *)&bitint.6 + 8B];
...
uses of ivtmp.28_43
before VN (such as dom3), which the add_scope_conflicts code identifies as 2
independent uses of bitint.6 variable (which is correct), but then VN
determines ivtmp.28_43 is the same as ivtmp.40_3 and just uses ivtmp.40_3
even in the second region; at that point add_scope_conflict thinks the
bitint.6 variable is not used in that region anymore.
The following patch does a simple single def-stmt check for such ADDR_EXPRs
(rather than say trying to do a full propagation of what SSA_NAMEs can
contain ADDR_EXPRs of local variables), which seems to workaround all 4 PRs.
In addition to this patch I've used the attached one to gather statistics
on the total size of all variable partitions in a function and seems besides
the new testcases nothing is really affected compared to no patch (I've
actually just modified the patch to == OMP_SCAN instead of == ADDR_EXPR, so
it looks the same except that it never triggers). The comparison wasn't
perfect because I've only gathered BITS_PER_WORD, main_input_filename (did
some replacement of build directories and /tmp/ccXXXXXX names of LTO to make
it more similar between the two bootstraps/regtests), current_function_name
and the total size of all variable partitions if any, because I didn't
record e.g. the optimization options and so e.g. torture tests which iterate
over options could have different partition sizes even in one compiler when
BITS_PER_WORD, main_input_filename and current_function_name are all equal.
So had to write an awk script to check if the first triple in the second
build appeared in the first one and the quadruple in the second build
appeared in the first one too, otherwise print result and that only
triggered in the new tests.
Also, the cc1plus binary according to objdump -dr is identical between the
two builds except for the ADDR_EXPR vs. OMP_SCAN constant in the two spots.
Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?
Or should I just use INTEGRAL_TYPE_P instead of ptrofftype_p?
2024-01-15 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/113372
PR middle-end/90348
PR middle-end/110115
PR middle-end/111422
* cfgexpand.cc (add_scope_conflicts_2): New function.
(add_scope_conflicts_1): Use it.
* gcc.dg/torture/bitint-49.c: New test.
* gcc.c-torture/execute/pr90348.c: New test.
* gcc.c-torture/execute/pr110115.c: New test.
* gcc.c-torture/execute/pr111422.c: New test.
Jakub
--- gcc/cfgexpand.cc.jj 2024-01-15 16:08:00.686573778 +0100
+++ gcc/cfgexpand.cc 2024-01-15 16:19:20.452082787 +0100
@@ -2312,6 +2312,20 @@ expand_used_vars (bitmap forced_stack_va
partition_stack_vars ();
if (dump_file)
dump_stack_var_partition ();
+size_t si, i, n = stack_vars_num;
+unsigned long long sum = 0;
+for (si = 0; si < n; ++si)
+{
+unsigned HOST_WIDE_INT sz;
+i = stack_vars_sorted[si];
+if (stack_vars[i].representative != i)
+continue;
+if (stack_vars[i].size.is_constant (&sz))
+sum += sz;
+}
+FILE *f = fopen ("/tmp/stackvars", "a");
+fprintf (f, "%d %s %s %lld\n", (int) BITS_PER_WORD, main_input_filename ? main_input_filename : "-", current_function_name (), sum);
+fclose (f);
}
Comments
On Tue, 16 Jan 2024, Jakub Jelinek wrote:
> Hi!
>
> The following patch adds a quick workaround to bugs in VAR_DECL
> partitioning.
> The problem is that there is no dependency between ADDR_EXPRs of local
> decls and CLOBBERs of those vars, so VN can CSE uses of ADDR_EXPRs
> (including ivopts integral variants thereof), which can break
> add_scope_conflicts discovery of what variables are actually used
> in certain region.
> E.g. we can have
> ivtmp.40_3 = (unsigned long) &MEM <unsigned long[100]> [(void *)&bitint.6 + 8B];
> ...
> uses of ivtmp.40_3
> ...
> bitint.6 ={v} {CLOBBER(eos)};
> ...
> ivtmp.28_43 = (unsigned long) &MEM <unsigned long[100]> [(void *)&bitint.6 + 8B];
> ...
> uses of ivtmp.28_43
> before VN (such as dom3), which the add_scope_conflicts code identifies as 2
> independent uses of bitint.6 variable (which is correct), but then VN
> determines ivtmp.28_43 is the same as ivtmp.40_3 and just uses ivtmp.40_3
> even in the second region; at that point add_scope_conflict thinks the
> bitint.6 variable is not used in that region anymore.
>
> The following patch does a simple single def-stmt check for such ADDR_EXPRs
> (rather than say trying to do a full propagation of what SSA_NAMEs can
> contain ADDR_EXPRs of local variables), which seems to workaround all 4 PRs.
>
> In addition to this patch I've used the attached one to gather statistics
> on the total size of all variable partitions in a function and seems besides
> the new testcases nothing is really affected compared to no patch (I've
> actually just modified the patch to == OMP_SCAN instead of == ADDR_EXPR, so
> it looks the same except that it never triggers). The comparison wasn't
> perfect because I've only gathered BITS_PER_WORD, main_input_filename (did
> some replacement of build directories and /tmp/ccXXXXXX names of LTO to make
> it more similar between the two bootstraps/regtests), current_function_name
> and the total size of all variable partitions if any, because I didn't
> record e.g. the optimization options and so e.g. torture tests which iterate
> over options could have different partition sizes even in one compiler when
> BITS_PER_WORD, main_input_filename and current_function_name are all equal.
> So had to write an awk script to check if the first triple in the second
> build appeared in the first one and the quadruple in the second build
> appeared in the first one too, otherwise print result and that only
> triggered in the new tests.
> Also, the cc1plus binary according to objdump -dr is identical between the
> two builds except for the ADDR_EXPR vs. OMP_SCAN constant in the two spots.
>
> Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?
>
> Or should I just use INTEGRAL_TYPE_P instead of ptrofftype_p?
On GIMPLE we allow
/* Allow conversions from pointer type to integral type only if
there is no sign or zero extension involved.
For targets were the precision of ptrofftype doesn't match that
of pointers we allow conversions to types where
POINTERS_EXTEND_UNSIGNED specifies how that works. */
if ((POINTER_TYPE_P (lhs_type)
&& INTEGRAL_TYPE_P (rhs1_type))
|| (POINTER_TYPE_P (rhs1_type)
&& INTEGRAL_TYPE_P (lhs_type)
&& (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION
(lhs_type)
#if defined(POINTERS_EXTEND_UNSIGNED)
|| (TYPE_MODE (rhs1_type) == ptr_mode
&& (TYPE_PRECISION (lhs_type)
== BITS_PER_WORD /* word_mode */
|| (TYPE_PRECISION (lhs_type)
== GET_MODE_PRECISION (Pmode))))
#endif
)))
so the "complicated" expression applies. Of course a truncation
would lose information and so not be interesting but the extension
case is what would matter here. Note while we have different
address-spaces there's no 'POINTERS_EXTEND_UNSIGNED' for them
so the above applies to all address-spaces and restricts their use
(ideally POINTERS_EXTEND_UNSIGNED would be made another
TARGET_ADDR_SPACE hook).
I'm not sure how fancy we need to get with this workaround, so
changing to INTEGRAL_TYPE_P works for me.
OK with or without changing ptrofftype_p at your discretion.
Thanks,
Richard.
> 2024-01-15 Jakub Jelinek <jakub@redhat.com>
>
> PR tree-optimization/113372
> PR middle-end/90348
> PR middle-end/110115
> PR middle-end/111422
> * cfgexpand.cc (add_scope_conflicts_2): New function.
> (add_scope_conflicts_1): Use it.
>
> * gcc.dg/torture/bitint-49.c: New test.
> * gcc.c-torture/execute/pr90348.c: New test.
> * gcc.c-torture/execute/pr110115.c: New test.
> * gcc.c-torture/execute/pr111422.c: New test.
>
> --- gcc/cfgexpand.cc.jj 2024-01-03 11:51:28.262776560 +0100
> +++ gcc/cfgexpand.cc 2024-01-15 16:08:00.686573778 +0100
> @@ -571,6 +571,25 @@ visit_conflict (gimple *, tree op, tree,
> return false;
> }
>
> +/* Helper function for add_scope_conflicts_1. For USE on
> + a stmt, if it is a SSA_NAME and in its SSA_NAME_DEF_STMT is known to be
> + based on some ADDR_EXPR, invoke VISIT on that ADDR_EXPR. */
> +
> +static inline void
> +add_scope_conflicts_2 (tree use, bitmap work,
> + walk_stmt_load_store_addr_fn visit)
> +{
> + if (TREE_CODE (use) == SSA_NAME
> + && (POINTER_TYPE_P (TREE_TYPE (use)) || ptrofftype_p (TREE_TYPE (use))))
> + {
> + gimple *g = SSA_NAME_DEF_STMT (use);
> + if (is_gimple_assign (g))
> + if (tree op = gimple_assign_rhs1 (g))
> + if (TREE_CODE (op) == ADDR_EXPR)
> + visit (g, TREE_OPERAND (op, 0), op, work);
> + }
> +}
> +
> /* Helper routine for add_scope_conflicts, calculating the active partitions
> at the end of BB, leaving the result in WORK. We're called to generate
> conflicts when FOR_CONFLICT is true, otherwise we're just tracking
> @@ -583,6 +602,8 @@ add_scope_conflicts_1 (basic_block bb, b
> edge_iterator ei;
> gimple_stmt_iterator gsi;
> walk_stmt_load_store_addr_fn visit;
> + use_operand_p use_p;
> + ssa_op_iter iter;
>
> bitmap_clear (work);
> FOR_EACH_EDGE (e, ei, bb->preds)
> @@ -593,7 +614,10 @@ add_scope_conflicts_1 (basic_block bb, b
> for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> {
> gimple *stmt = gsi_stmt (gsi);
> + gphi *phi = as_a <gphi *> (stmt);
> walk_stmt_load_store_addr_ops (stmt, work, NULL, NULL, visit);
> + FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
> + add_scope_conflicts_2 (USE_FROM_PTR (use_p), work, visit);
> }
> for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> {
> @@ -613,8 +637,7 @@ add_scope_conflicts_1 (basic_block bb, b
> }
> else if (!is_gimple_debug (stmt))
> {
> - if (for_conflict
> - && visit == visit_op)
> + if (for_conflict && visit == visit_op)
> {
> /* If this is the first real instruction in this BB we need
> to add conflicts for everything live at this point now.
> @@ -634,6 +657,8 @@ add_scope_conflicts_1 (basic_block bb, b
> visit = visit_conflict;
> }
> walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit);
> + FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
> + add_scope_conflicts_2 (USE_FROM_PTR (use_p), work, visit);
> }
> }
> }
> --- gcc/testsuite/gcc.dg/torture/bitint-49.c.jj 2024-01-15 15:13:54.590737604 +0100
> +++ gcc/testsuite/gcc.dg/torture/bitint-49.c 2024-01-15 15:14:19.864387438 +0100
> @@ -0,0 +1,28 @@
> +/* PR tree-optimization/113372 */
> +/* { dg-do run { target bitint } } */
> +/* { dg-options "-std=c23 -pedantic-errors" } */
> +/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O1" "-O2" } } */
> +/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
> +
> +_BitInt(8) a, b, c;
> +
> +#if __BITINT_MAXWIDTH__ >= 6384
> +_BitInt(8)
> +foo (_BitInt(6384) y)
> +{
> + _BitInt(4745) x = -(b % y) * b;
> + int i = __builtin_sub_overflow_p (-y, 0, 0);
> + c |= __builtin_add_overflow_p (i, 0, a);
> + return x;
> +}
> +#endif
> +
> +int
> +main ()
> +{
> +#if __BITINT_MAXWIDTH__ >= 6384
> + if (foo (4) != 0 || c != 0)
> + __builtin_abort ();
> +#endif
> + return 0;
> +}
> --- gcc/testsuite/gcc.c-torture/execute/pr90348.c.jj 2024-01-15 15:15:24.369493734 +0100
> +++ gcc/testsuite/gcc.c-torture/execute/pr90348.c 2019-05-07 13:02:47.155066415 +0200
> @@ -0,0 +1,38 @@
> +/* PR middle-end/90348 */
> +
> +void __attribute__ ((noipa))
> +set_one (unsigned char *ptr)
> +{
> + *ptr = 1;
> +}
> +
> +void __attribute__ ((noipa))
> +check_zero (unsigned char const *in, unsigned int len)
> +{
> + for (unsigned int i = 0; i < len; ++i)
> + if (in[i] != 0)
> + __builtin_abort ();
> +}
> +
> +static void
> +set_one_on_stack (void)
> +{
> + unsigned char buf[1];
> + set_one (buf);
> +}
> +
> +int
> +main ()
> +{
> + for (int i = 0; i <= 4; ++i)
> + {
> + unsigned char in[4];
> + for (int j = 0; j < i; ++j)
> + {
> + in[j] = 0;
> + set_one_on_stack ();
> + }
> + check_zero (in, i);
> + }
> + return 0;
> +}
> --- gcc/testsuite/gcc.c-torture/execute/pr110115.c.jj 2024-01-15 15:18:42.327751055 +0100
> +++ gcc/testsuite/gcc.c-torture/execute/pr110115.c 2024-01-15 15:18:33.094878974 +0100
> @@ -0,0 +1,45 @@
> +/* PR middle-end/110115 */
> +
> +int a;
> +signed char b;
> +
> +static int
> +foo (signed char *e, int f)
> +{
> + int d;
> + for (d = 0; d < f; d++)
> + e[d] = 0;
> + return d;
> +}
> +
> +int
> +bar (signed char e, int f)
> +{
> + signed char h[20];
> + int i = foo (h, f);
> + return i;
> +}
> +
> +int
> +baz ()
> +{
> + switch (a)
> + {
> + case 'f':
> + return 0;
> + default:
> + return ~0;
> + }
> +}
> +
> +int
> +main ()
> +{
> + {
> + signed char *k[3];
> + int d;
> + for (d = 0; bar (8, 15) - 15 + d < 1; d++)
> + k[baz () + 1] = &b;
> + *k[0] = -*k[0];
> + }
> +}
> --- gcc/testsuite/gcc.c-torture/execute/pr111422.c.jj 2024-01-15 15:21:35.205355850 +0100
> +++ gcc/testsuite/gcc.c-torture/execute/pr111422.c 2024-01-15 15:21:26.866471388 +0100
> @@ -0,0 +1,40 @@
> +/* PR middle-end/111422 */
> +
> +int a, b;
> +int *c = &b;
> +unsigned d;
> +signed char e;
> +int f = 1;
> +
> +int
> +foo (int k, signed char *l)
> +{
> + if (k < 6)
> + return a;
> + l[0] = l[1] = l[k - 1] = 8;
> + return 0;
> +}
> +
> +int
> +bar (int k)
> +{
> + signed char g[11];
> + int h = foo (k, g);
> + return h;
> +}
> +
> +int
> +main ()
> +{
> + for (; b < 8; b = b + 1)
> + ;
> + int j;
> + int *n[8];
> + j = 0;
> + for (;18446744073709551608ULL + bar (*c) + *c + j < 2; j++)
> + n[j] = &f;
> + for (; e <= 4; e++)
> + d = *n[0] == f;
> + if (d != 1)
> + __builtin_abort ();
> +}
>
> Jakub
>
On Tue, Jan 16, 2024 at 10:00:09AM +0100, Richard Biener wrote:
> I'm not sure how fancy we need to get with this workaround, so
> changing to INTEGRAL_TYPE_P works for me.
I'll go for it.
BTW, I've also built linux kernel allyesconfig, and in there
per the statistics gathering patch there are some small differences
in the sizes of all partitions in various functions, but nothing major
and the differences are actually in both directions, in some cases it is a
few bytes more like (- for vanilla, + for patched gcc):
-64 /usr/src/kernel/linux-2.6/fs/gfs2/file.c do_flock 296
+64 /usr/src/kernel/linux-2.6/fs/gfs2/file.c do_flock 304
or
-64 /usr/src/kernel/linux-2.6/fs/lockd/svcsubs.c nlm_unlock_files 288
+64 /usr/src/kernel/linux-2.6/fs/lockd/svcsubs.c nlm_unlock_files 296
but in other cases it is a few bytes less (presumably the added extra
conflicts result in different partitioning decisions and that sometimes
is actually smaller):
-64 /usr/src/kernel/linux-2.6/arch/x86/kernel/cpu/microcode/core.c load_late_stop_cpus 336
+64 /usr/src/kernel/linux-2.6/arch/x86/kernel/cpu/microcode/core.c load_late_stop_cpus 328
or
-64 /usr/src/kernel/linux-2.6/drivers/dma-buf/dma-resv.c dma_resv_lockdep 896
+64 /usr/src/kernel/linux-2.6/drivers/dma-buf/dma-resv.c dma_resv_lockdep 888
or
-64 /usr/src/kernel/linux-2.6/fs/ext4/mballoc.c test_new_blocks_simple 1380
+64 /usr/src/kernel/linux-2.6/fs/ext4/mballoc.c test_new_blocks_simple 1356
The sum of the partition size sums on same TU/same functions with differences is
11191 (vanilla)
vs.
10575 (patched)
Jakub
@@ -571,6 +571,25 @@ visit_conflict (gimple *, tree op, tree,
return false;
}
+/* Helper function for add_scope_conflicts_1. For USE on
+ a stmt, if it is a SSA_NAME and in its SSA_NAME_DEF_STMT is known to be
+ based on some ADDR_EXPR, invoke VISIT on that ADDR_EXPR. */
+
+static inline void
+add_scope_conflicts_2 (tree use, bitmap work,
+ walk_stmt_load_store_addr_fn visit)
+{
+ if (TREE_CODE (use) == SSA_NAME
+ && (POINTER_TYPE_P (TREE_TYPE (use)) || ptrofftype_p (TREE_TYPE (use))))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (use);
+ if (is_gimple_assign (g))
+ if (tree op = gimple_assign_rhs1 (g))
+ if (TREE_CODE (op) == ADDR_EXPR)
+ visit (g, TREE_OPERAND (op, 0), op, work);
+ }
+}
+
/* Helper routine for add_scope_conflicts, calculating the active partitions
at the end of BB, leaving the result in WORK. We're called to generate
conflicts when FOR_CONFLICT is true, otherwise we're just tracking
@@ -583,6 +602,8 @@ add_scope_conflicts_1 (basic_block bb, b
edge_iterator ei;
gimple_stmt_iterator gsi;
walk_stmt_load_store_addr_fn visit;
+ use_operand_p use_p;
+ ssa_op_iter iter;
bitmap_clear (work);
FOR_EACH_EDGE (e, ei, bb->preds)
@@ -593,7 +614,10 @@ add_scope_conflicts_1 (basic_block bb, b
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
+ gphi *phi = as_a <gphi *> (stmt);
walk_stmt_load_store_addr_ops (stmt, work, NULL, NULL, visit);
+ FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
+ add_scope_conflicts_2 (USE_FROM_PTR (use_p), work, visit);
}
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -613,8 +637,7 @@ add_scope_conflicts_1 (basic_block bb, b
}
else if (!is_gimple_debug (stmt))
{
- if (for_conflict
- && visit == visit_op)
+ if (for_conflict && visit == visit_op)
{
/* If this is the first real instruction in this BB we need
to add conflicts for everything live at this point now.
@@ -634,6 +657,8 @@ add_scope_conflicts_1 (basic_block bb, b
visit = visit_conflict;
}
walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit);
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
+ add_scope_conflicts_2 (USE_FROM_PTR (use_p), work, visit);
}
}
}
@@ -0,0 +1,28 @@
+/* PR tree-optimization/113372 */
+/* { dg-do run { target bitint } } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O1" "-O2" } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
+
+_BitInt(8) a, b, c;
+
+#if __BITINT_MAXWIDTH__ >= 6384
+_BitInt(8)
+foo (_BitInt(6384) y)
+{
+ _BitInt(4745) x = -(b % y) * b;
+ int i = __builtin_sub_overflow_p (-y, 0, 0);
+ c |= __builtin_add_overflow_p (i, 0, a);
+ return x;
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 6384
+ if (foo (4) != 0 || c != 0)
+ __builtin_abort ();
+#endif
+ return 0;
+}
@@ -0,0 +1,38 @@
+/* PR middle-end/90348 */
+
+void __attribute__ ((noipa))
+set_one (unsigned char *ptr)
+{
+ *ptr = 1;
+}
+
+void __attribute__ ((noipa))
+check_zero (unsigned char const *in, unsigned int len)
+{
+ for (unsigned int i = 0; i < len; ++i)
+ if (in[i] != 0)
+ __builtin_abort ();
+}
+
+static void
+set_one_on_stack (void)
+{
+ unsigned char buf[1];
+ set_one (buf);
+}
+
+int
+main ()
+{
+ for (int i = 0; i <= 4; ++i)
+ {
+ unsigned char in[4];
+ for (int j = 0; j < i; ++j)
+ {
+ in[j] = 0;
+ set_one_on_stack ();
+ }
+ check_zero (in, i);
+ }
+ return 0;
+}
@@ -0,0 +1,45 @@
+/* PR middle-end/110115 */
+
+int a;
+signed char b;
+
+static int
+foo (signed char *e, int f)
+{
+ int d;
+ for (d = 0; d < f; d++)
+ e[d] = 0;
+ return d;
+}
+
+int
+bar (signed char e, int f)
+{
+ signed char h[20];
+ int i = foo (h, f);
+ return i;
+}
+
+int
+baz ()
+{
+ switch (a)
+ {
+ case 'f':
+ return 0;
+ default:
+ return ~0;
+ }
+}
+
+int
+main ()
+{
+ {
+ signed char *k[3];
+ int d;
+ for (d = 0; bar (8, 15) - 15 + d < 1; d++)
+ k[baz () + 1] = &b;
+ *k[0] = -*k[0];
+ }
+}
@@ -0,0 +1,40 @@
+/* PR middle-end/111422 */
+
+int a, b;
+int *c = &b;
+unsigned d;
+signed char e;
+int f = 1;
+
+int
+foo (int k, signed char *l)
+{
+ if (k < 6)
+ return a;
+ l[0] = l[1] = l[k - 1] = 8;
+ return 0;
+}
+
+int
+bar (int k)
+{
+ signed char g[11];
+ int h = foo (k, g);
+ return h;
+}
+
+int
+main ()
+{
+ for (; b < 8; b = b + 1)
+ ;
+ int j;
+ int *n[8];
+ j = 0;
+ for (;18446744073709551608ULL + bar (*c) + *c + j < 2; j++)
+ n[j] = &f;
+ for (; e <= 4; e++)
+ d = *n[0] == f;
+ if (d != 1)
+ __builtin_abort ();
+}