lower-bitint, v3: Fix up -fnon-call-exceptions bit-field load lowering [PR112668]
Checks
Commit Message
On Thu, Nov 23, 2023 at 01:10:02PM +0100, Richard Biener wrote:
> Looks a bit better. As for constructing a gsi_end_p () iterator for a
> basic-block
> I'd simply add a new gsi_end_{bb,seq} ({basic_block,gimple_seq}).
Ok, here it is (just used gsi_end without _seq suffix for gimple_seq &
because it is then consistent with gsi_start/gsi_last etc.).
2023-11-23 Jakub Jelinek <jakub@redhat.com>
PR middle-end/112668
* gimple-iterator.h (gsi_end, gsi_end_bb): New inline functions.
* gimple-lower-bitint.cc (bitint_large_huge::handle_cast): After
temporarily adding statements after m_init_gsi, update m_init_gsi
such that later additions after it will be after the added statements.
(bitint_large_huge::handle_load): Likewise. When splitting
gsi_bb (m_init_gsi) basic block, update m_preheader_bb if needed
and update saved m_gsi as well if needed.
(bitint_large_huge::lower_mergeable_stmt,
bitint_large_huge::lower_comparison_stmt,
bitint_large_huge::lower_mul_overflow,
bitint_large_huge::lower_bit_query): Use gsi_end_bb.
* gcc.dg/bitint-40.c: New test.
Jakub
@@ -169,6 +169,41 @@ gsi_last_bb (basic_block bb)
return i;
}
+/* Return a new iterator pointing to before the first statement or after
+ last statement (depending on whether adding statements after it or before it)
+ in a GIMPLE_SEQ. */
+
+inline gimple_stmt_iterator
+gsi_end (gimple_seq &seq)
+{
+ gimple_stmt_iterator i;
+ gimple *g = gimple_seq_last (seq);
+
+ i.ptr = NULL;
+ i.seq = &seq;
+ i.bb = g ? gimple_bb (g) : NULL;
+
+ return i;
+}
+
+/* Return a new iterator pointing to before the first statement or after
+ last statement (depending on whether adding statements after it or before it)
+ in basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_end_bb (basic_block bb)
+{
+ gimple_stmt_iterator i;
+ gimple_seq *seq;
+
+ seq = bb_seq_addr (bb);
+ i.ptr = NULL;
+ i.seq = seq;
+ i.bb = bb;
+
+ return i;
+}
+
/* Return true if I is at the end of its sequence. */
inline bool
@@ -1294,6 +1294,11 @@ bitint_large_huge::handle_cast (tree lhs
g = gimple_build_assign (n, RSHIFT_EXPR, t, lpm1);
insert_before (g);
m_data[save_data_cnt + 1] = add_cast (m_limb_type, n);
+ m_init_gsi = m_gsi;
+ if (gsi_end_p (m_init_gsi))
+ m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi));
+ else
+ gsi_prev (&m_init_gsi);
m_gsi = save_gsi;
}
else if (m_upwards_2limb * limb_prec < TYPE_PRECISION (rhs_type))
@@ -1523,6 +1528,11 @@ bitint_large_huge::handle_cast (tree lhs
insert_before (g);
rext = add_cast (m_limb_type, gimple_assign_lhs (g));
}
+ m_init_gsi = m_gsi;
+ if (gsi_end_p (m_init_gsi))
+ m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi));
+ else
+ gsi_prev (&m_init_gsi);
m_gsi = save_gsi;
}
tree t;
@@ -1687,9 +1697,23 @@ bitint_large_huge::handle_load (gimple *
edge e = split_block (gsi_bb (m_gsi), g);
make_edge (e->src, eh_edge->dest, EDGE_EH)->probability
= profile_probability::very_unlikely ();
- m_init_gsi.bb = e->dest;
+ m_gsi = gsi_after_labels (e->dest);
+ if (gsi_bb (save_gsi) == e->src)
+ {
+ if (gsi_end_p (save_gsi))
+ save_gsi = gsi_end_bb (e->dest);
+ else
+ save_gsi = gsi_for_stmt (gsi_stmt (save_gsi));
+ }
+ if (m_preheader_bb == e->src)
+ m_preheader_bb = e->dest;
}
}
+ m_init_gsi = m_gsi;
+ if (gsi_end_p (m_init_gsi))
+ m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi));
+ else
+ gsi_prev (&m_init_gsi);
m_gsi = save_gsi;
tree out;
prepare_data_in_out (iv, idx, &out);
@@ -2359,11 +2383,7 @@ bitint_large_huge::lower_mergeable_stmt
edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
edge_bb = e->src;
if (kind == bitint_prec_large)
- {
- m_gsi = gsi_last_bb (edge_bb);
- if (!gsi_end_p (m_gsi))
- gsi_next (&m_gsi);
- }
+ m_gsi = gsi_end_bb (edge_bb);
}
else
m_after_stmt = stmt;
@@ -2816,9 +2836,7 @@ bitint_large_huge::lower_comparison_stmt
gsi_prev (&gsi);
edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
edge_bb = e->src;
- m_gsi = gsi_last_bb (edge_bb);
- if (!gsi_end_p (m_gsi))
- gsi_next (&m_gsi);
+ m_gsi = gsi_end_bb (edge_bb);
edge *edges = XALLOCAVEC (edge, cnt * 2);
for (unsigned i = 0; i < cnt; i++)
@@ -4288,9 +4306,7 @@ bitint_large_huge::lower_mul_overflow (t
gsi_prev (&gsi);
edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
edge_bb = e->src;
- m_gsi = gsi_last_bb (edge_bb);
- if (!gsi_end_p (m_gsi))
- gsi_next (&m_gsi);
+ m_gsi = gsi_end_bb (edge_bb);
tree cmp = build_zero_cst (m_limb_type);
for (unsigned i = 0; i < cnt; i++)
@@ -4560,11 +4576,7 @@ bitint_large_huge::lower_bit_query (gimp
edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
edge_bb = e->src;
if (kind == bitint_prec_large)
- {
- m_gsi = gsi_last_bb (edge_bb);
- if (!gsi_end_p (m_gsi))
- gsi_next (&m_gsi);
- }
+ m_gsi = gsi_end_bb (edge_bb);
bqp = XALLOCAVEC (struct bq_details, cnt);
}
else
@@ -4717,9 +4729,7 @@ bitint_large_huge::lower_bit_query (gimp
gsi_prev (&gsi);
edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
edge_bb = e->src;
- m_gsi = gsi_last_bb (edge_bb);
- if (!gsi_end_p (m_gsi))
- gsi_next (&m_gsi);
+ m_gsi = gsi_end_bb (edge_bb);
if (ifn == IFN_CLZ)
bqp = XALLOCAVEC (struct bq_details, cnt);
@@ -0,0 +1,29 @@
+/* PR middle-end/112668 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-std=c23 -fnon-call-exceptions" } */
+
+#if __BITINT_MAXWIDTH__ >= 156
+struct T156 { _BitInt(156) a : 2; unsigned _BitInt(156) b : 135; _BitInt(156) c : 2; };
+extern void foo156 (struct T156 *);
+
+unsigned _BitInt(156)
+bar156 (int i)
+{
+ struct T156 r156[12];
+ foo156 (&r156[0]);
+ return r156[i].b;
+}
+#endif
+
+#if __BITINT_MAXWIDTH__ >= 495
+struct T495 { _BitInt(495) a : 2; unsigned _BitInt(495) b : 471; _BitInt(495) c : 2; };
+extern void foo495 (struct T495 *r495);
+
+unsigned _BitInt(495)
+bar495 (int i)
+{
+ struct T495 r495[12];
+ foo495 (r495);
+ return r495[i].b;
+}
+#endif