From 0011c1d0498f25c231844f4a4ff75019b9a48d99 Mon Sep 17 00:00:00 2001
From: Aldy Hernandez <aldyh@redhat.com>
Date: Sat, 19 Nov 2022 11:11:25 +0100
Subject: [PATCH] Remove unused (legacy) VRP code.
---
gcc/doc/invoke.texi | 10 -
gcc/params.opt | 21 -
gcc/range-op.cc | 28 +
gcc/tree-vrp.cc | 3838 +++----------------------------------------
gcc/tree-vrp.h | 25 -
gcc/value-range.cc | 28 -
gcc/value-range.h | 1 -
gcc/vr-values.cc | 1871 +--------------------
gcc/vr-values.h | 100 +-
9 files changed, 253 insertions(+), 5669 deletions(-)
@@ -15781,19 +15781,9 @@ The maximum number of may-defs we analyze when looking for a must-def
specifying the dynamic type of an object that invokes a virtual call
we may be able to devirtualize speculatively.
-@item max-vrp-switch-assertions
-The maximum number of assertions to add along the default edge of a switch
-statement during VRP.
-
@item evrp-sparse-threshold
Maximum number of basic blocks before EVRP uses a sparse cache.
-@item vrp1-mode
-Specifies the mode VRP pass 1 should operate in.
-
-@item vrp2-mode
-Specifies the mode VRP pass 2 should operate in.
-
@item ranger-debug
Specifies the type of debug output to be issued for ranges.
@@ -746,10 +746,6 @@ Max. size of var tracking hash tables.
Common Joined UInteger Var(param_max_find_base_term_values) Init(200) Param Optimization
Maximum number of VALUEs handled during a single find_base_term call.
--param=max-vrp-switch-assertions=
-Common Joined UInteger Var(param_max_vrp_switch_assertions) Init(10) Param Optimization
-Maximum number of assertions to add along the default edge of a switch statement during VRP.
-
-param=min-crossjump-insns=
Common Joined UInteger Var(param_min_crossjump_insns) Init(5) IntegerRange(1, 65536) Param Optimization
The minimum number of matching instructions to consider for crossjumping.
@@ -1165,21 +1161,4 @@ The maximum factor which the loop vectorizer applies to the cost of statements i
Common Joined UInteger Var(param_vect_induction_float) Init(1) IntegerRage(0, 1) Param Optimization
Enable loop vectorization of floating point inductions.
--param=vrp1-mode=
-Common Joined Var(param_vrp1_mode) Enum(vrp_mode) Init(VRP_MODE_RANGER) Param Optimization
---param=vrp1-mode=[vrp|ranger] Specifies the mode VRP1 should operate in.
-
--param=vrp2-mode=
-Common Joined Var(param_vrp2_mode) Enum(vrp_mode) Init(VRP_MODE_RANGER) Param Optimization
---param=vrp2-mode=[vrp|ranger] Specifies the mode VRP2 should operate in.
-
-Enum
-Name(vrp_mode) Type(enum vrp_mode) UnknownError(unknown vrp mode %qs)
-
-EnumValue
-Enum(vrp_mode) String(vrp) Value(VRP_MODE_VRP)
-
-EnumValue
-Enum(vrp_mode) String(ranger) Value(VRP_MODE_RANGER)
-
; This comment is to ensure we retain the blank line above.
@@ -3082,6 +3082,34 @@ set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
r.set_varying (type);
}
+/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
+ (otherwise return VAL). VAL and MASK must be zero-extended for
+ precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
+ (to transform signed values into unsigned) and at the end xor
+ SGNBIT back. */
+
+wide_int
+masked_increment (const wide_int &val_in, const wide_int &mask,
+ const wide_int &sgnbit, unsigned int prec)
+{
+ wide_int bit = wi::one (prec), res;
+ unsigned int i;
+
+ wide_int val = val_in ^ sgnbit;
+ for (i = 0; i < prec; i++, bit += bit)
+ {
+ res = mask;
+ if ((res & bit) == 0)
+ continue;
+ res = bit - 1;
+ res = wi::bit_and_not (val + bit, res);
+ res &= mask;
+ if (wi::gtu_p (res, val))
+ return res ^ sgnbit;
+ }
+ return val ^ sgnbit;
+}
+
// This was shamelessly stolen from register_edge_assert_for_2 and
// adjusted to work with iranges.
@@ -229,109 +229,6 @@ remove_unreachable::remove_and_update_globals (bool final_p)
return change;
}
-/* Set of SSA names found live during the RPO traversal of the function
- for still active basic-blocks. */
-class live_names
-{
-public:
- live_names ();
- ~live_names ();
- void set (tree, basic_block);
- void clear (tree, basic_block);
- void merge (basic_block dest, basic_block src);
- bool live_on_block_p (tree, basic_block);
- bool live_on_edge_p (tree, edge);
- bool block_has_live_names_p (basic_block);
- void clear_block (basic_block);
-
-private:
- sbitmap *live;
- unsigned num_blocks;
- void init_bitmap_if_needed (basic_block);
-};
-
-void
-live_names::init_bitmap_if_needed (basic_block bb)
-{
- unsigned i = bb->index;
- if (!live[i])
- {
- live[i] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[i]);
- }
-}
-
-bool
-live_names::block_has_live_names_p (basic_block bb)
-{
- unsigned i = bb->index;
- return live[i] && bitmap_empty_p (live[i]);
-}
-
-void
-live_names::clear_block (basic_block bb)
-{
- unsigned i = bb->index;
- if (live[i])
- {
- sbitmap_free (live[i]);
- live[i] = NULL;
- }
-}
-
-void
-live_names::merge (basic_block dest, basic_block src)
-{
- init_bitmap_if_needed (dest);
- init_bitmap_if_needed (src);
- bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
-}
-
-void
-live_names::set (tree name, basic_block bb)
-{
- init_bitmap_if_needed (bb);
- bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
-}
-
-void
-live_names::clear (tree name, basic_block bb)
-{
- unsigned i = bb->index;
- if (live[i])
- bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
-}
-
-live_names::live_names ()
-{
- num_blocks = last_basic_block_for_fn (cfun);
- live = XCNEWVEC (sbitmap, num_blocks);
-}
-
-live_names::~live_names ()
-{
- for (unsigned i = 0; i < num_blocks; ++i)
- if (live[i])
- sbitmap_free (live[i]);
- XDELETEVEC (live);
-}
-
-bool
-live_names::live_on_block_p (tree name, basic_block bb)
-{
- return (live[bb->index]
- && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
-}
-
-/* Return true if the SSA name NAME is live on the edge E. */
-
-bool
-live_names::live_on_edge_p (tree name, edge e)
-{
- return live_on_block_p (name, e->dest);
-}
-
-
/* VR_TYPE describes a range with mininum value *MIN and maximum
value *MAX. Restrict the range to the set of values that have
no bits set outside NONZERO_BITS. Update *MIN and *MAX and
@@ -417,7 +314,7 @@ range_int_cst_p (const value_range *vr)
otherwise. We only handle additive operations and set NEG to true if the
symbol is negated and INV to the invariant part, if any. */
-tree
+static tree
get_single_symbol (tree t, bool *neg, tree *inv)
{
bool neg_;
@@ -468,24 +365,6 @@ get_single_symbol (tree t, bool *neg, tree *inv)
return t;
}
-/* The reverse operation: build a symbolic expression with TYPE
- from symbol SYM, negated according to NEG, and invariant INV. */
-
-static tree
-build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
-{
- const bool pointer_p = POINTER_TYPE_P (type);
- tree t = sym;
-
- if (neg)
- t = build1 (NEGATE_EXPR, type, t);
-
- if (integer_zerop (inv))
- return t;
-
- return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
-}
-
/* Return
1 if VAL < VAL2
0 if !(VAL < VAL2)
@@ -697,411 +576,6 @@ compare_values (tree val1, tree val2)
return compare_values_warnv (val1, val2, &sop);
}
-/* If BOUND will include a symbolic bound, adjust it accordingly,
- otherwise leave it as is.
-
- CODE is the original operation that combined the bounds (PLUS_EXPR
- or MINUS_EXPR).
-
- TYPE is the type of the original operation.
-
- SYM_OPn is the symbolic for OPn if it has a symbolic.
-
- NEG_OPn is TRUE if the OPn was negated. */
-
-static void
-adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
- tree sym_op0, tree sym_op1,
- bool neg_op0, bool neg_op1)
-{
- bool minus_p = (code == MINUS_EXPR);
- /* If the result bound is constant, we're done; otherwise, build the
- symbolic lower bound. */
- if (sym_op0 == sym_op1)
- ;
- else if (sym_op0)
- bound = build_symbolic_expr (type, sym_op0,
- neg_op0, bound);
- else if (sym_op1)
- {
- /* We may not negate if that might introduce
- undefined overflow. */
- if (!minus_p
- || neg_op1
- || TYPE_OVERFLOW_WRAPS (type))
- bound = build_symbolic_expr (type, sym_op1,
- neg_op1 ^ minus_p, bound);
- else
- bound = NULL_TREE;
- }
-}
-
-/* Combine OP1 and OP1, which are two parts of a bound, into one wide
- int bound according to CODE. CODE is the operation combining the
- bound (either a PLUS_EXPR or a MINUS_EXPR).
-
- TYPE is the type of the combine operation.
-
- WI is the wide int to store the result.
-
- OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
- if over/underflow occurred. */
-
-static void
-combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
- tree type, tree op0, tree op1)
-{
- bool minus_p = (code == MINUS_EXPR);
- const signop sgn = TYPE_SIGN (type);
- const unsigned int prec = TYPE_PRECISION (type);
-
- /* Combine the bounds, if any. */
- if (op0 && op1)
- {
- if (minus_p)
- wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
- else
- wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
- }
- else if (op0)
- wi = wi::to_wide (op0);
- else if (op1)
- {
- if (minus_p)
- wi = wi::neg (wi::to_wide (op1), &ovf);
- else
- wi = wi::to_wide (op1);
- }
- else
- wi = wi::shwi (0, prec);
-}
-
-/* Given a range in [WMIN, WMAX], adjust it for possible overflow and
- put the result in VR.
-
- TYPE is the type of the range.
-
- MIN_OVF and MAX_OVF indicate what type of overflow, if any,
- occurred while originally calculating WMIN or WMAX. -1 indicates
- underflow. +1 indicates overflow. 0 indicates neither. */
-
-static void
-set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
- tree type,
- const wide_int &wmin, const wide_int &wmax,
- wi::overflow_type min_ovf,
- wi::overflow_type max_ovf)
-{
- const signop sgn = TYPE_SIGN (type);
- const unsigned int prec = TYPE_PRECISION (type);
-
- /* For one bit precision if max < min, then the swapped
- range covers all values. */
- if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
- {
- kind = VR_VARYING;
- return;
- }
-
- if (TYPE_OVERFLOW_WRAPS (type))
- {
- /* If overflow wraps, truncate the values and adjust the
- range kind and bounds appropriately. */
- wide_int tmin = wide_int::from (wmin, prec, sgn);
- wide_int tmax = wide_int::from (wmax, prec, sgn);
- if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
- {
- /* If the limits are swapped, we wrapped around and cover
- the entire range. */
- if (wi::gt_p (tmin, tmax, sgn))
- kind = VR_VARYING;
- else
- {
- kind = VR_RANGE;
- /* No overflow or both overflow or underflow. The
- range kind stays VR_RANGE. */
- min = wide_int_to_tree (type, tmin);
- max = wide_int_to_tree (type, tmax);
- }
- return;
- }
- else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
- || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
- {
- /* Min underflow or max overflow. The range kind
- changes to VR_ANTI_RANGE. */
- bool covers = false;
- wide_int tem = tmin;
- tmin = tmax + 1;
- if (wi::cmp (tmin, tmax, sgn) < 0)
- covers = true;
- tmax = tem - 1;
- if (wi::cmp (tmax, tem, sgn) > 0)
- covers = true;
- /* If the anti-range would cover nothing, drop to varying.
- Likewise if the anti-range bounds are outside of the
- types values. */
- if (covers || wi::cmp (tmin, tmax, sgn) > 0)
- {
- kind = VR_VARYING;
- return;
- }
- kind = VR_ANTI_RANGE;
- min = wide_int_to_tree (type, tmin);
- max = wide_int_to_tree (type, tmax);
- return;
- }
- else
- {
- /* Other underflow and/or overflow, drop to VR_VARYING. */
- kind = VR_VARYING;
- return;
- }
- }
- else
- {
- /* If overflow does not wrap, saturate to the types min/max
- value. */
- wide_int type_min = wi::min_value (prec, sgn);
- wide_int type_max = wi::max_value (prec, sgn);
- kind = VR_RANGE;
- if (min_ovf == wi::OVF_UNDERFLOW)
- min = wide_int_to_tree (type, type_min);
- else if (min_ovf == wi::OVF_OVERFLOW)
- min = wide_int_to_tree (type, type_max);
- else
- min = wide_int_to_tree (type, wmin);
-
- if (max_ovf == wi::OVF_UNDERFLOW)
- max = wide_int_to_tree (type, type_min);
- else if (max_ovf == wi::OVF_OVERFLOW)
- max = wide_int_to_tree (type, type_max);
- else
- max = wide_int_to_tree (type, wmax);
- }
-}
-
-/* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
-
-static void
-extract_range_from_pointer_plus_expr (value_range *vr,
- enum tree_code code,
- tree expr_type,
- const value_range *vr0,
- const value_range *vr1)
-{
- gcc_checking_assert (POINTER_TYPE_P (expr_type)
- && code == POINTER_PLUS_EXPR);
- /* For pointer types, we are really only interested in asserting
- whether the expression evaluates to non-NULL.
- With -fno-delete-null-pointer-checks we need to be more
- conservative. As some object might reside at address 0,
- then some offset could be added to it and the same offset
- subtracted again and the result would be NULL.
- E.g.
- static int a[12]; where &a[0] is NULL and
- ptr = &a[6];
- ptr -= 6;
- ptr will be NULL here, even when there is POINTER_PLUS_EXPR
- where the first range doesn't include zero and the second one
- doesn't either. As the second operand is sizetype (unsigned),
- consider all ranges where the MSB could be set as possible
- subtractions where the result might be NULL. */
- if ((!range_includes_zero_p (vr0)
- || !range_includes_zero_p (vr1))
- && !TYPE_OVERFLOW_WRAPS (expr_type)
- && (flag_delete_null_pointer_checks
- || (range_int_cst_p (vr1)
- && !tree_int_cst_sign_bit (vr1->max ()))))
- vr->set_nonzero (expr_type);
- else if (vr0->zero_p () && vr1->zero_p ())
- vr->set_zero (expr_type);
- else
- vr->set_varying (expr_type);
-}
-
-/* Extract range information from a PLUS/MINUS_EXPR and store the
- result in *VR. */
-
-static void
-extract_range_from_plus_minus_expr (value_range *vr,
- enum tree_code code,
- tree expr_type,
- const value_range *vr0_,
- const value_range *vr1_)
-{
- gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
-
- value_range vr0 = *vr0_, vr1 = *vr1_;
- value_range vrtem0, vrtem1;
-
- /* Now canonicalize anti-ranges to ranges when they are not symbolic
- and express ~[] op X as ([]' op X) U ([]'' op X). */
- if (vr0.kind () == VR_ANTI_RANGE
- && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
- {
- extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
- if (!vrtem1.undefined_p ())
- {
- value_range vrres;
- extract_range_from_plus_minus_expr (&vrres, code, expr_type,
- &vrtem1, vr1_);
- vr->union_ (vrres);
- }
- return;
- }
- /* Likewise for X op ~[]. */
- if (vr1.kind () == VR_ANTI_RANGE
- && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
- {
- extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
- if (!vrtem1.undefined_p ())
- {
- value_range vrres;
- extract_range_from_plus_minus_expr (&vrres, code, expr_type,
- vr0_, &vrtem1);
- vr->union_ (vrres);
- }
- return;
- }
-
- value_range_kind kind;
- value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
- tree vr0_min = vr0.min (), vr0_max = vr0.max ();
- tree vr1_min = vr1.min (), vr1_max = vr1.max ();
- tree min = NULL_TREE, max = NULL_TREE;
-
- /* This will normalize things such that calculating
- [0,0] - VR_VARYING is not dropped to varying, but is
- calculated as [MIN+1, MAX]. */
- if (vr0.varying_p ())
- {
- vr0_kind = VR_RANGE;
- vr0_min = vrp_val_min (expr_type);
- vr0_max = vrp_val_max (expr_type);
- }
- if (vr1.varying_p ())
- {
- vr1_kind = VR_RANGE;
- vr1_min = vrp_val_min (expr_type);
- vr1_max = vrp_val_max (expr_type);
- }
-
- const bool minus_p = (code == MINUS_EXPR);
- tree min_op0 = vr0_min;
- tree min_op1 = minus_p ? vr1_max : vr1_min;
- tree max_op0 = vr0_max;
- tree max_op1 = minus_p ? vr1_min : vr1_max;
- tree sym_min_op0 = NULL_TREE;
- tree sym_min_op1 = NULL_TREE;
- tree sym_max_op0 = NULL_TREE;
- tree sym_max_op1 = NULL_TREE;
- bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
-
- neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
-
- /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
- single-symbolic ranges, try to compute the precise resulting range,
- but only if we know that this resulting range will also be constant
- or single-symbolic. */
- if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
- && (TREE_CODE (min_op0) == INTEGER_CST
- || (sym_min_op0
- = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
- && (TREE_CODE (min_op1) == INTEGER_CST
- || (sym_min_op1
- = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
- && (!(sym_min_op0 && sym_min_op1)
- || (sym_min_op0 == sym_min_op1
- && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
- && (TREE_CODE (max_op0) == INTEGER_CST
- || (sym_max_op0
- = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
- && (TREE_CODE (max_op1) == INTEGER_CST
- || (sym_max_op1
- = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
- && (!(sym_max_op0 && sym_max_op1)
- || (sym_max_op0 == sym_max_op1
- && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
- {
- wide_int wmin, wmax;
- wi::overflow_type min_ovf = wi::OVF_NONE;
- wi::overflow_type max_ovf = wi::OVF_NONE;
-
- /* Build the bounds. */
- combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
- combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
-
- /* If the resulting range will be symbolic, we need to eliminate any
- explicit or implicit overflow introduced in the above computation
- because compare_values could make an incorrect use of it. That's
- why we require one of the ranges to be a singleton. */
- if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
- && ((bool)min_ovf || (bool)max_ovf
- || (min_op0 != max_op0 && min_op1 != max_op1)))
- {
- vr->set_varying (expr_type);
- return;
- }
-
- /* Adjust the range for possible overflow. */
- set_value_range_with_overflow (kind, min, max, expr_type,
- wmin, wmax, min_ovf, max_ovf);
- if (kind == VR_VARYING)
- {
- vr->set_varying (expr_type);
- return;
- }
-
- /* Build the symbolic bounds if needed. */
- adjust_symbolic_bound (min, code, expr_type,
- sym_min_op0, sym_min_op1,
- neg_min_op0, neg_min_op1);
- adjust_symbolic_bound (max, code, expr_type,
- sym_max_op0, sym_max_op1,
- neg_max_op0, neg_max_op1);
- }
- else
- {
- /* For other cases, for example if we have a PLUS_EXPR with two
- VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
- to compute a precise range for such a case.
- ??? General even mixed range kind operations can be expressed
- by for example transforming ~[3, 5] + [1, 2] to range-only
- operations and a union primitive:
- [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
- [-INF+1, 4] U [6, +INF(OVF)]
- though usually the union is not exactly representable with
- a single range or anti-range as the above is
- [-INF+1, +INF(OVF)] intersected with ~[5, 5]
- but one could use a scheme similar to equivalences for this. */
- vr->set_varying (expr_type);
- return;
- }
-
- /* If either MIN or MAX overflowed, then set the resulting range to
- VARYING. */
- if (min == NULL_TREE
- || TREE_OVERFLOW_P (min)
- || max == NULL_TREE
- || TREE_OVERFLOW_P (max))
- {
- vr->set_varying (expr_type);
- return;
- }
-
- int cmp = compare_values (min, max);
- if (cmp == -2 || cmp == 1)
- {
- /* If the new range has its limits swapped around (MIN > MAX),
- then the operation caused one of them to wrap around, mark
- the new range VARYING. */
- vr->set_varying (expr_type);
- }
- else
- vr->set (min, max, kind);
-}
-
/* If the types passed are supported, return TRUE, otherwise set VR to
VARYING and return FALSE. */
@@ -1134,89 +608,6 @@ defined_ranges_p (value_range *vr,
return true;
}
-static value_range
-drop_undefines_to_varying (const value_range *vr, tree expr_type)
-{
- if (vr->undefined_p ())
- return value_range (expr_type);
- else
- return *vr;
-}
-
-/* If any operand is symbolic, perform a binary operation on them and
- return TRUE, otherwise return FALSE. */
-
-static bool
-range_fold_binary_symbolics_p (value_range *vr,
- tree_code code,
- tree expr_type,
- const value_range *vr0_,
- const value_range *vr1_)
-{
- if (vr0_->symbolic_p () || vr1_->symbolic_p ())
- {
- value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
- value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
- if ((code == PLUS_EXPR || code == MINUS_EXPR))
- {
- extract_range_from_plus_minus_expr (vr, code, expr_type,
- &vr0, &vr1);
- return true;
- }
- if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
- {
- extract_range_from_pointer_plus_expr (vr, code, expr_type,
- &vr0, &vr1);
- return true;
- }
- range_op_handler op (code, expr_type);
- if (!op)
- vr->set_varying (expr_type);
- vr0.normalize_symbolics ();
- vr1.normalize_symbolics ();
- return op.fold_range (*vr, expr_type, vr0, vr1);
- }
- return false;
-}
-
-/* If operand is symbolic, perform a unary operation on it and return
- TRUE, otherwise return FALSE. */
-
-static bool
-range_fold_unary_symbolics_p (value_range *vr,
- tree_code code,
- tree expr_type,
- const value_range *vr0)
-{
- if (vr0->symbolic_p ())
- {
- if (code == NEGATE_EXPR)
- {
- /* -X is simply 0 - X. */
- value_range zero;
- zero.set_zero (vr0->type ());
- range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
- return true;
- }
- if (code == BIT_NOT_EXPR)
- {
- /* ~X is simply -1 - X. */
- value_range minusone;
- tree t = build_int_cst (vr0->type (), -1);
- minusone.set (t, t);
- range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
- return true;
- }
- range_op_handler op (code, expr_type);
- if (!op)
- vr->set_varying (expr_type);
- value_range vr0_cst (*vr0);
- vr0_cst.normalize_symbolics ();
- return op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
- }
- return false;
-}
-
/* Perform a binary operation on a pair of ranges. */
void
@@ -1236,9 +627,6 @@ range_fold_binary_expr (value_range *vr,
return;
}
- if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
- return;
-
value_range vr0 (*vr0_);
value_range vr1 (*vr1_);
if (vr0.undefined_p ())
@@ -1269,245 +657,42 @@ range_fold_unary_expr (value_range *vr,
return;
}
- if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
- return;
-
value_range vr0_cst (*vr0);
vr0_cst.normalize_addresses ();
if (!op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type)))
vr->set_varying (expr_type);
}
-/* If the range of values taken by OP can be inferred after STMT executes,
- return the comparison code (COMP_CODE_P) and value (VAL_P) that
- describes the inferred range. Return true if a range could be
- inferred. */
-
-bool
-infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
-{
- *val_p = NULL_TREE;
- *comp_code_p = ERROR_MARK;
-
- /* Do not attempt to infer anything in names that flow through
- abnormal edges. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
- return false;
-
- /* If STMT is the last statement of a basic block with no normal
- successors, there is no point inferring anything about any of its
- operands. We would not be able to find a proper insertion point
- for the assertion, anyway. */
- if (stmt_ends_bb_p (stmt))
- {
- edge_iterator ei;
- edge e;
-
- FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
- if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
- break;
- if (e == NULL)
- return false;
- }
-
- if (infer_nonnull_range (stmt, op))
- {
- *val_p = build_int_cst (TREE_TYPE (op), 0);
- *comp_code_p = NE_EXPR;
- return true;
- }
+/* Helper for overflow_comparison_p
- return false;
-}
+ OP0 CODE OP1 is a comparison. Examine the comparison and potentially
+ OP1's defining statement to see if it ultimately has the form
+ OP0 CODE (OP0 PLUS INTEGER_CST)
-/* Dump assert_info structure. */
+ If so, return TRUE indicating this is an overflow test and store into
+ *NEW_CST an updated constant that can be used in a narrowed range test.
-void
-dump_assert_info (FILE *file, const assert_info &assert)
-{
- fprintf (file, "Assert for: ");
- print_generic_expr (file, assert.name);
- fprintf (file, "\n\tPREDICATE: expr=[");
- print_generic_expr (file, assert.expr);
- fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
- fprintf (file, "val=[");
- print_generic_expr (file, assert.val);
- fprintf (file, "]\n\n");
-}
+ REVERSED indicates if the comparison was originally:
-DEBUG_FUNCTION void
-debug (const assert_info &assert)
-{
- dump_assert_info (stderr, assert);
-}
+ OP1 CODE' OP0.
-/* Dump a vector of assert_info's. */
+ This affects how we build the updated constant. */
-void
-dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
+static bool
+overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
+ bool follow_assert_exprs, bool reversed, tree *new_cst)
{
- for (unsigned i = 0; i < asserts.length (); ++i)
+ /* See if this is a relational operation between two SSA_NAMES with
+ unsigned, overflow wrapping values. If so, check it more deeply. */
+ if ((code == LT_EXPR || code == LE_EXPR
+ || code == GE_EXPR || code == GT_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && TYPE_UNSIGNED (TREE_TYPE (op0))
+ && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
{
- dump_assert_info (file, asserts[i]);
- fprintf (file, "\n");
- }
-}
-
-DEBUG_FUNCTION void
-debug (const vec<assert_info> &asserts)
-{
- dump_asserts_info (stderr, asserts);
-}
-
-/* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
-
-static void
-add_assert_info (vec<assert_info> &asserts,
- tree name, tree expr, enum tree_code comp_code, tree val)
-{
- assert_info info;
- info.comp_code = comp_code;
- info.name = name;
- if (TREE_OVERFLOW_P (val))
- val = drop_tree_overflow (val);
- info.val = val;
- info.expr = expr;
- asserts.safe_push (info);
- if (dump_enabled_p ())
- dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
- "Adding assert for %T from %T %s %T\n",
- name, expr, op_symbol_code (comp_code), val);
-}
-
-/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
- Extract a suitable test code and value and store them into *CODE_P and
- *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
-
- If no extraction was possible, return FALSE, otherwise return TRUE.
-
- If INVERT is true, then we invert the result stored into *CODE_P. */
-
-static bool
-extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
- tree cond_op0, tree cond_op1,
- bool invert, enum tree_code *code_p,
- tree *val_p)
-{
- enum tree_code comp_code;
- tree val;
-
- /* Otherwise, we have a comparison of the form NAME COMP VAL
- or VAL COMP NAME. */
- if (name == cond_op1)
- {
- /* If the predicate is of the form VAL COMP NAME, flip
- COMP around because we need to register NAME as the
- first operand in the predicate. */
- comp_code = swap_tree_comparison (cond_code);
- val = cond_op0;
- }
- else if (name == cond_op0)
- {
- /* The comparison is of the form NAME COMP VAL, so the
- comparison code remains unchanged. */
- comp_code = cond_code;
- val = cond_op1;
- }
- else
- gcc_unreachable ();
-
- /* Invert the comparison code as necessary. */
- if (invert)
- comp_code = invert_tree_comparison (comp_code, 0);
-
- /* VRP only handles integral and pointer types. */
- if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
- && ! POINTER_TYPE_P (TREE_TYPE (val)))
- return false;
-
- /* Do not register always-false predicates.
- FIXME: this works around a limitation in fold() when dealing with
- enumerations. Given 'enum { N1, N2 } x;', fold will not
- fold 'if (x > N2)' to 'if (0)'. */
- if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
- && INTEGRAL_TYPE_P (TREE_TYPE (val)))
- {
- tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
- tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
-
- if (comp_code == GT_EXPR
- && (!max
- || compare_values (val, max) == 0))
- return false;
-
- if (comp_code == LT_EXPR
- && (!min
- || compare_values (val, min) == 0))
- return false;
- }
- *code_p = comp_code;
- *val_p = val;
- return true;
-}
-
-/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
- (otherwise return VAL). VAL and MASK must be zero-extended for
- precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
- (to transform signed values into unsigned) and at the end xor
- SGNBIT back. */
-
-wide_int
-masked_increment (const wide_int &val_in, const wide_int &mask,
- const wide_int &sgnbit, unsigned int prec)
-{
- wide_int bit = wi::one (prec), res;
- unsigned int i;
-
- wide_int val = val_in ^ sgnbit;
- for (i = 0; i < prec; i++, bit += bit)
- {
- res = mask;
- if ((res & bit) == 0)
- continue;
- res = bit - 1;
- res = wi::bit_and_not (val + bit, res);
- res &= mask;
- if (wi::gtu_p (res, val))
- return res ^ sgnbit;
- }
- return val ^ sgnbit;
-}
-
-/* Helper for overflow_comparison_p
-
- OP0 CODE OP1 is a comparison. Examine the comparison and potentially
- OP1's defining statement to see if it ultimately has the form
- OP0 CODE (OP0 PLUS INTEGER_CST)
-
- If so, return TRUE indicating this is an overflow test and store into
- *NEW_CST an updated constant that can be used in a narrowed range test.
-
- REVERSED indicates if the comparison was originally:
-
- OP1 CODE' OP0.
-
- This affects how we build the updated constant. */
-
-static bool
-overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
- bool follow_assert_exprs, bool reversed, tree *new_cst)
-{
- /* See if this is a relational operation between two SSA_NAMES with
- unsigned, overflow wrapping values. If so, check it more deeply. */
- if ((code == LT_EXPR || code == LE_EXPR
- || code == GE_EXPR || code == GT_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && TYPE_UNSIGNED (TREE_TYPE (op0))
- && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
- {
- gimple *op1_def = SSA_NAME_DEF_STMT (op1);
+ gimple *op1_def = SSA_NAME_DEF_STMT (op1);
/* If requested, follow any ASSERT_EXPRs backwards for OP1. */
if (follow_assert_exprs)
@@ -1589,2844 +774,236 @@ overflow_comparison_p (tree_code code, tree name, tree val,
use_equiv_p, true, new_cst);
}
+/* Handle
+ _4 = x_3 & 31;
+ if (_4 != 0)
+ goto <bb 6>;
+ else
+ goto <bb 7>;
+ <bb 6>:
+ __builtin_unreachable ();
+ <bb 7>:
+ x_5 = ASSERT_EXPR <x_3, ...>;
+ If x_3 has no other immediate uses (checked by caller),
+ var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
+ from the non-zero bitmask. */
-/* Try to register an edge assertion for SSA name NAME on edge E for
- the condition COND contributing to the conditional jump pointed to by BSI.
- Invert the condition COND if INVERT is true. */
-
-static void
-register_edge_assert_for_2 (tree name, edge e,
- enum tree_code cond_code,
- tree cond_op0, tree cond_op1, bool invert,
- vec<assert_info> &asserts)
+void
+maybe_set_nonzero_bits (edge e, tree var)
{
- tree val;
- enum tree_code comp_code;
+ basic_block cond_bb = e->src;
+ gimple *stmt = last_stmt (cond_bb);
+ tree cst;
- if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
- cond_op0,
- cond_op1,
- invert, &comp_code, &val))
+ if (stmt == NULL
+ || gimple_code (stmt) != GIMPLE_COND
+ || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
+ ? EQ_EXPR : NE_EXPR)
+ || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
+ || !integer_zerop (gimple_cond_rhs (stmt)))
return;
- /* Queue the assert. */
- tree x;
- if (overflow_comparison_p (comp_code, name, val, false, &x))
- {
- enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
- ? GT_EXPR : LE_EXPR);
- add_assert_info (asserts, name, name, new_code, x);
- }
- add_assert_info (asserts, name, name, comp_code, val);
-
- /* In the case of NAME <= CST and NAME being defined as
- NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
- and NAME2 <= CST - CST2. We can do the same for NAME > CST.
- This catches range and anti-range tests. */
- if ((comp_code == LE_EXPR
- || comp_code == GT_EXPR)
- && TREE_CODE (val) == INTEGER_CST
- && TYPE_UNSIGNED (TREE_TYPE (val)))
+ stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
+ if (!is_gimple_assign (stmt)
+ || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+ || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
+ return;
+ if (gimple_assign_rhs1 (stmt) != var)
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
+ gimple *stmt2;
- /* Extract CST2 from the (optional) addition. */
- if (is_gimple_assign (def_stmt)
- && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
- {
- name2 = gimple_assign_rhs1 (def_stmt);
- cst2 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (name2) == SSA_NAME
- && TREE_CODE (cst2) == INTEGER_CST)
- def_stmt = SSA_NAME_DEF_STMT (name2);
- }
+ if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+ return;
+ stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ if (!gimple_assign_cast_p (stmt2)
+ || gimple_assign_rhs1 (stmt2) != var
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
+ || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
+ != TYPE_PRECISION (TREE_TYPE (var))))
+ return;
+ }
+ cst = gimple_assign_rhs2 (stmt);
+ set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+ wi::to_wide (cst)));
+}
- /* Extract NAME2 from the (optional) sign-changing cast. */
- if (gassign *ass = dyn_cast <gassign *> (def_stmt))
- {
- if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
- && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
- && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
- == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
- name3 = gimple_assign_rhs1 (ass);
- }
+/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
+ that includes the value VAL. The search is restricted to the range
+ [START_IDX, n - 1] where n is the size of VEC.
- /* If name3 is used later, create an ASSERT_EXPR for it. */
- if (name3 != NULL_TREE
- && TREE_CODE (name3) == SSA_NAME
- && (cst2 == NULL_TREE
- || TREE_CODE (cst2) == INTEGER_CST)
- && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
- {
- tree tmp;
+ If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
+ returned.
- /* Build an expression for the range test. */
- tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
- if (cst2 != NULL_TREE)
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
- add_assert_info (asserts, name3, tmp, comp_code, val);
- }
+ If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
+ it is placed in IDX and false is returned.
- /* If name2 is used later, create an ASSERT_EXPR for it. */
- if (name2 != NULL_TREE
- && TREE_CODE (name2) == SSA_NAME
- && TREE_CODE (cst2) == INTEGER_CST
- && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
- {
- tree tmp;
-
- /* Build an expression for the range test. */
- tmp = name2;
- if (TREE_TYPE (name) != TREE_TYPE (name2))
- tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
- if (cst2 != NULL_TREE)
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
- add_assert_info (asserts, name2, tmp, comp_code, val);
- }
- }
+ If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
+ returned. */
- /* In the case of post-in/decrement tests like if (i++) ... and uses
- of the in/decremented value on the edge the extra name we want to
- assert for is not on the def chain of the name compared. Instead
- it is in the set of use stmts.
- Similar cases happen for conversions that were simplified through
- fold_{sign_changed,widened}_comparison. */
- if ((comp_code == NE_EXPR
- || comp_code == EQ_EXPR)
- && TREE_CODE (val) == INTEGER_CST)
- {
- imm_use_iterator ui;
- gimple *use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
- {
- if (!is_gimple_assign (use_stmt))
- continue;
+bool
+find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
+{
+ size_t n = gimple_switch_num_labels (stmt);
+ size_t low, high;
- /* Cut off to use-stmts that are dominating the predecessor. */
- if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
- continue;
+ /* Find case label for minimum of the value range or the next one.
+ At each iteration we are searching in [low, high - 1]. */
- tree name2 = gimple_assign_lhs (use_stmt);
- if (TREE_CODE (name2) != SSA_NAME)
- continue;
+ for (low = start_idx, high = n; high != low; )
+ {
+ tree t;
+ int cmp;
+ /* Note that i != high, so we never ask for n. */
+ size_t i = (high + low) / 2;
+ t = gimple_switch_label (stmt, i);
- enum tree_code code = gimple_assign_rhs_code (use_stmt);
- tree cst;
- if (code == PLUS_EXPR
- || code == MINUS_EXPR)
- {
- cst = gimple_assign_rhs2 (use_stmt);
- if (TREE_CODE (cst) != INTEGER_CST)
- continue;
- cst = int_const_binop (code, val, cst);
- }
- else if (CONVERT_EXPR_CODE_P (code))
- {
- /* For truncating conversions we cannot record
- an inequality. */
- if (comp_code == NE_EXPR
- && (TYPE_PRECISION (TREE_TYPE (name2))
- < TYPE_PRECISION (TREE_TYPE (name))))
- continue;
- cst = fold_convert (TREE_TYPE (name2), val);
- }
- else
- continue;
+ /* Cache the result of comparing CASE_LOW and val. */
+ cmp = tree_int_cst_compare (CASE_LOW (t), val);
- if (TREE_OVERFLOW_P (cst))
- cst = drop_tree_overflow (cst);
- add_assert_info (asserts, name2, name2, comp_code, cst);
- }
- }
-
- if (TREE_CODE_CLASS (comp_code) == tcc_comparison
- && TREE_CODE (val) == INTEGER_CST)
- {
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
- tree val2 = NULL_TREE;
- unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
- wide_int mask = wi::zero (prec);
- unsigned int nprec = prec;
- enum tree_code rhs_code = ERROR_MARK;
-
- if (is_gimple_assign (def_stmt))
- rhs_code = gimple_assign_rhs_code (def_stmt);
-
- /* In the case of NAME != CST1 where NAME = A +- CST2 we can
- assert that A != CST1 -+ CST2. */
- if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
- && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
+ if (cmp == 0)
{
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == INTEGER_CST)
- {
- enum tree_code reverse_op = (rhs_code == PLUS_EXPR
- ? MINUS_EXPR : PLUS_EXPR);
- op1 = int_const_binop (reverse_op, val, op1);
- if (TREE_OVERFLOW (op1))
- op1 = drop_tree_overflow (op1);
- add_assert_info (asserts, op0, op0, comp_code, op1);
- }
+ /* Ranges cannot be empty. */
+ *idx = i;
+ return true;
}
-
- /* Add asserts for NAME cmp CST and NAME being defined
- as NAME = (int) NAME2. */
- if (!TYPE_UNSIGNED (TREE_TYPE (val))
- && (comp_code == LE_EXPR || comp_code == LT_EXPR
- || comp_code == GT_EXPR || comp_code == GE_EXPR)
- && gimple_assign_cast_p (def_stmt))
+ else if (cmp > 0)
+ high = i;
+ else
{
- name2 = gimple_assign_rhs1 (def_stmt);
- if (CONVERT_EXPR_CODE_P (rhs_code)
- && TREE_CODE (name2) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && TYPE_UNSIGNED (TREE_TYPE (name2))
- && prec == TYPE_PRECISION (TREE_TYPE (name2))
- && (comp_code == LE_EXPR || comp_code == GT_EXPR
- || !tree_int_cst_equal (val,
- TYPE_MIN_VALUE (TREE_TYPE (val)))))
+ low = i + 1;
+ if (CASE_HIGH (t) != NULL
+ && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
{
- tree tmp, cst;
- enum tree_code new_comp_code = comp_code;
-
- cst = fold_convert (TREE_TYPE (name2),
- TYPE_MIN_VALUE (TREE_TYPE (val)));
- /* Build an expression for the range test. */
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
- cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
- fold_convert (TREE_TYPE (name2), val));
- if (comp_code == LT_EXPR || comp_code == GE_EXPR)
- {
- new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
- cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
- build_int_cst (TREE_TYPE (name2), 1));
- }
- add_assert_info (asserts, name2, tmp, new_comp_code, cst);
+ *idx = i;
+ return true;
}
}
+ }
- /* Add asserts for NAME cmp CST and NAME being defined as
- NAME = NAME2 >> CST2.
+ *idx = high;
+ return false;
+}
- Extract CST2 from the right shift. */
- if (rhs_code == RSHIFT_EXPR)
- {
- name2 = gimple_assign_rhs1 (def_stmt);
- cst2 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (name2) == SSA_NAME
- && tree_fits_uhwi_p (cst2)
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
- && type_has_mode_precision_p (TREE_TYPE (val)))
- {
- mask = wi::mask (tree_to_uhwi (cst2), false, prec);
- val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
- }
- }
- if (val2 != NULL_TREE
- && TREE_CODE (val2) == INTEGER_CST
- && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
- TREE_TYPE (val),
- val2, cst2), val))
- {
- enum tree_code new_comp_code = comp_code;
- tree tmp, new_val;
+/* Searches the case label vector VEC for the range of CASE_LABELs that is used
+ for values between MIN and MAX. The first index is placed in MIN_IDX. The
+ last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
+ then MAX_IDX < MIN_IDX.
+ Returns true if the default label is not needed. */
- tmp = name2;
- if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
- {
- if (!TYPE_UNSIGNED (TREE_TYPE (val)))
- {
- tree type = build_nonstandard_integer_type (prec, 1);
- tmp = build1 (NOP_EXPR, type, name2);
- val2 = fold_convert (type, val2);
- }
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
- new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
- new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
- }
- else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
- {
- wide_int minval
- = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- new_val = val2;
- if (minval == wi::to_wide (new_val))
- new_val = NULL_TREE;
- }
- else
- {
- wide_int maxval
- = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- mask |= wi::to_wide (val2);
- if (wi::eq_p (mask, maxval))
- new_val = NULL_TREE;
- else
- new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
- }
+bool
+find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
+ size_t *max_idx)
+{
+ size_t i, j;
+ bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
+ bool max_take_default = !find_case_label_index (stmt, i, max, &j);
- if (new_val)
- add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
- }
+ if (i == j
+ && min_take_default
+ && max_take_default)
+ {
+ /* Only the default case label reached.
+ Return an empty range. */
+ *min_idx = 1;
+ *max_idx = 0;
+ return false;
+ }
+ else
+ {
+ bool take_default = min_take_default || max_take_default;
+ tree low, high;
+ size_t k;
- /* If we have a conversion that doesn't change the value of the source
- simply register the same assert for it. */
- if (CONVERT_EXPR_CODE_P (rhs_code))
- {
- value_range vr;
- tree rhs1 = gimple_assign_rhs1 (def_stmt);
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
- && TREE_CODE (rhs1) == SSA_NAME
- /* Make sure the relation preserves the upper/lower boundary of
- the range conservatively. */
- && (comp_code == NE_EXPR
- || comp_code == EQ_EXPR
- || (TYPE_SIGN (TREE_TYPE (name))
- == TYPE_SIGN (TREE_TYPE (rhs1)))
- || ((comp_code == LE_EXPR
- || comp_code == LT_EXPR)
- && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
- || ((comp_code == GE_EXPR
- || comp_code == GT_EXPR)
- && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
- /* And the conversion does not alter the value we compare
- against and all values in rhs1 can be represented in
- the converted to type. */
- && int_fits_type_p (val, TREE_TYPE (rhs1))
- && ((TYPE_PRECISION (TREE_TYPE (name))
- > TYPE_PRECISION (TREE_TYPE (rhs1)))
- || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
- && vr.kind () == VR_RANGE)
- && wi::fits_to_tree_p
- (widest_int::from (vr.lower_bound (),
- TYPE_SIGN (TREE_TYPE (rhs1))),
- TREE_TYPE (name))
- && wi::fits_to_tree_p
- (widest_int::from (vr.upper_bound (),
- TYPE_SIGN (TREE_TYPE (rhs1))),
- TREE_TYPE (name)))))
- add_assert_info (asserts, rhs1, rhs1,
- comp_code, fold_convert (TREE_TYPE (rhs1), val));
- }
+ if (max_take_default)
+ j--;
- /* Add asserts for NAME cmp CST and NAME being defined as
- NAME = NAME2 & CST2.
-
- Extract CST2 from the and.
-
- Also handle
- NAME = (unsigned) NAME2;
- casts where NAME's type is unsigned and has smaller precision
- than NAME2's type as if it was NAME = NAME2 & MASK. */
- names[0] = NULL_TREE;
- names[1] = NULL_TREE;
- cst2 = NULL_TREE;
- if (rhs_code == BIT_AND_EXPR
- || (CONVERT_EXPR_CODE_P (rhs_code)
- && INTEGRAL_TYPE_P (TREE_TYPE (val))
- && TYPE_UNSIGNED (TREE_TYPE (val))
- && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
- > prec))
+ /* If the case label range is continuous, we do not need
+ the default case label. Verify that. */
+ high = CASE_LOW (gimple_switch_label (stmt, i));
+ if (CASE_HIGH (gimple_switch_label (stmt, i)))
+ high = CASE_HIGH (gimple_switch_label (stmt, i));
+ for (k = i + 1; k <= j; ++k)
{
- name2 = gimple_assign_rhs1 (def_stmt);
- if (rhs_code == BIT_AND_EXPR)
- cst2 = gimple_assign_rhs2 (def_stmt);
- else
- {
- cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
- nprec = TYPE_PRECISION (TREE_TYPE (name2));
- }
- if (TREE_CODE (name2) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && TREE_CODE (cst2) == INTEGER_CST
- && !integer_zerop (cst2)
- && (nprec > 1
- || TYPE_UNSIGNED (TREE_TYPE (val))))
+ low = CASE_LOW (gimple_switch_label (stmt, k));
+ if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
{
- gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
- if (gimple_assign_cast_p (def_stmt2))
- {
- names[1] = gimple_assign_rhs1 (def_stmt2);
- if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
- || TREE_CODE (names[1]) != SSA_NAME
- || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
- || (TYPE_PRECISION (TREE_TYPE (name2))
- != TYPE_PRECISION (TREE_TYPE (names[1]))))
- names[1] = NULL_TREE;
- }
- names[0] = name2;
+ take_default = true;
+ break;
}
+ high = low;
+ if (CASE_HIGH (gimple_switch_label (stmt, k)))
+ high = CASE_HIGH (gimple_switch_label (stmt, k));
}
- if (names[0] || names[1])
- {
- wide_int minv, maxv, valv, cst2v;
- wide_int tem, sgnbit;
- bool valid_p = false, valn, cst2n;
- enum tree_code ccode = comp_code;
-
- valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
- cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
- valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
- cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
- /* If CST2 doesn't have most significant bit set,
- but VAL is negative, we have comparison like
- if ((x & 0x123) > -4) (always true). Just give up. */
- if (!cst2n && valn)
- ccode = ERROR_MARK;
- if (cst2n)
- sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
- else
- sgnbit = wi::zero (nprec);
- minv = valv & cst2v;
- switch (ccode)
- {
- case EQ_EXPR:
- /* Minimum unsigned value for equality is VAL & CST2
- (should be equal to VAL, otherwise we probably should
- have folded the comparison into false) and
- maximum unsigned value is VAL | ~CST2. */
- maxv = valv | ~cst2v;
- valid_p = true;
- break;
- case NE_EXPR:
- tem = valv | ~cst2v;
- /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
- if (valv == 0)
- {
- cst2n = false;
- sgnbit = wi::zero (nprec);
- goto gt_expr;
- }
- /* If (VAL | ~CST2) is all ones, handle it as
- (X & CST2) < VAL. */
- if (tem == -1)
- {
- cst2n = false;
- valn = false;
- sgnbit = wi::zero (nprec);
- goto lt_expr;
- }
- if (!cst2n && wi::neg_p (cst2v))
- sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
- if (sgnbit != 0)
- {
- if (valv == sgnbit)
- {
- cst2n = true;
- valn = true;
- goto gt_expr;
- }
- if (tem == wi::mask (nprec - 1, false, nprec))
- {
- cst2n = true;
- goto lt_expr;
- }
- if (!cst2n)
- sgnbit = wi::zero (nprec);
- }
- break;
+ *min_idx = i;
+ *max_idx = j;
+ return !take_default;
+ }
+}
- case GE_EXPR:
- /* Minimum unsigned value for >= if (VAL & CST2) == VAL
- is VAL and maximum unsigned value is ~0. For signed
- comparison, if CST2 doesn't have most significant bit
- set, handle it similarly. If CST2 has MSB set,
- the minimum is the same, and maximum is ~0U/2. */
- if (minv != valv)
- {
- /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
- VAL. */
- minv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (minv == valv)
- break;
- }
- maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
- valid_p = true;
- break;
-
- case GT_EXPR:
- gt_expr:
- /* Find out smallest MINV where MINV > VAL
- && (MINV & CST2) == MINV, if any. If VAL is signed and
- CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
- minv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (minv == valv)
- break;
- maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
- valid_p = true;
- break;
-
- case LE_EXPR:
- /* Minimum unsigned value for <= is 0 and maximum
- unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
- Otherwise, find smallest VAL2 where VAL2 > VAL
- && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
- as maximum.
- For signed comparison, if CST2 doesn't have most
- significant bit set, handle it similarly. If CST2 has
- MSB set, the maximum is the same and minimum is INT_MIN. */
- if (minv == valv)
- maxv = valv;
- else
- {
- maxv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (maxv == valv)
- break;
- maxv -= 1;
- }
- maxv |= ~cst2v;
- minv = sgnbit;
- valid_p = true;
- break;
-
- case LT_EXPR:
- lt_expr:
- /* Minimum unsigned value for < is 0 and maximum
- unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
- Otherwise, find smallest VAL2 where VAL2 > VAL
- && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
- as maximum.
- For signed comparison, if CST2 doesn't have most
- significant bit set, handle it similarly. If CST2 has
- MSB set, the maximum is the same and minimum is INT_MIN. */
- if (minv == valv)
- {
- if (valv == sgnbit)
- break;
- maxv = valv;
- }
- else
- {
- maxv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (maxv == valv)
- break;
- }
- maxv -= 1;
- maxv |= ~cst2v;
- minv = sgnbit;
- valid_p = true;
- break;
-
- default:
- break;
- }
- if (valid_p
- && (maxv - minv) != -1)
- {
- tree tmp, new_val, type;
- int i;
-
- for (i = 0; i < 2; i++)
- if (names[i])
- {
- wide_int maxv2 = maxv;
- tmp = names[i];
- type = TREE_TYPE (names[i]);
- if (!TYPE_UNSIGNED (type))
- {
- type = build_nonstandard_integer_type (nprec, 1);
- tmp = build1 (NOP_EXPR, type, names[i]);
- }
- if (minv != 0)
- {
- tmp = build2 (PLUS_EXPR, type, tmp,
- wide_int_to_tree (type, -minv));
- maxv2 = maxv - minv;
- }
- new_val = wide_int_to_tree (type, maxv2);
- add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
- }
- }
- }
- }
-}
-
-/* OP is an operand of a truth value expression which is known to have
- a particular value. Register any asserts for OP and for any
- operands in OP's defining statement.
-
- If CODE is EQ_EXPR, then we want to register OP is zero (false),
- if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
-
-static void
-register_edge_assert_for_1 (tree op, enum tree_code code,
- edge e, vec<assert_info> &asserts)
-{
- gimple *op_def;
- tree val;
- enum tree_code rhs_code;
-
- /* We only care about SSA_NAMEs. */
- if (TREE_CODE (op) != SSA_NAME)
- return;
-
- /* We know that OP will have a zero or nonzero value. */
- val = build_int_cst (TREE_TYPE (op), 0);
- add_assert_info (asserts, op, op, code, val);
-
- /* Now look at how OP is set. If it's set from a comparison,
- a truth operation or some bit operations, then we may be able
- to register information about the operands of that assignment. */
- op_def = SSA_NAME_DEF_STMT (op);
- if (gimple_code (op_def) != GIMPLE_ASSIGN)
- return;
-
- rhs_code = gimple_assign_rhs_code (op_def);
-
- if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
- {
- bool invert = (code == EQ_EXPR ? true : false);
- tree op0 = gimple_assign_rhs1 (op_def);
- tree op1 = gimple_assign_rhs2 (op_def);
-
- if (TREE_CODE (op0) == SSA_NAME)
- register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
- if (TREE_CODE (op1) == SSA_NAME)
- register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
- }
- else if ((code == NE_EXPR
- && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
- || (code == EQ_EXPR
- && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
- {
- /* Recurse on each operand. */
- tree op0 = gimple_assign_rhs1 (op_def);
- tree op1 = gimple_assign_rhs2 (op_def);
- if (TREE_CODE (op0) == SSA_NAME
- && has_single_use (op0))
- register_edge_assert_for_1 (op0, code, e, asserts);
- if (TREE_CODE (op1) == SSA_NAME
- && has_single_use (op1))
- register_edge_assert_for_1 (op1, code, e, asserts);
- }
- else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
- && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
- {
- /* Recurse, flipping CODE. */
- code = invert_tree_comparison (code, false);
- register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
- }
- else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
- {
- /* Recurse through the copy. */
- register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
- }
- else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
- {
- /* Recurse through the type conversion, unless it is a narrowing
- conversion or conversion from non-integral type. */
- tree rhs = gimple_assign_rhs1 (op_def);
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
- && (TYPE_PRECISION (TREE_TYPE (rhs))
- <= TYPE_PRECISION (TREE_TYPE (op))))
- register_edge_assert_for_1 (rhs, code, e, asserts);
- }
-}
-
-/* Check if comparison
- NAME COND_OP INTEGER_CST
- has a form of
- (X & 11...100..0) COND_OP XX...X00...0
- Such comparison can yield assertions like
- X >= XX...X00...0
- X <= XX...X11...1
- in case of COND_OP being EQ_EXPR or
- X < XX...X00...0
- X > XX...X11...1
- in case of NE_EXPR. */
-
-static bool
-is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
- tree *new_name, tree *low, enum tree_code *low_code,
- tree *high, enum tree_code *high_code)
-{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-
- if (!is_gimple_assign (def_stmt)
- || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
- return false;
-
- tree t = gimple_assign_rhs1 (def_stmt);
- tree maskt = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
- return false;
-
- wi::tree_to_wide_ref mask = wi::to_wide (maskt);
- wide_int inv_mask = ~mask;
- /* Must have been removed by now so don't bother optimizing. */
- if (mask == 0 || inv_mask == 0)
- return false;
-
- /* Assume VALT is INTEGER_CST. */
- wi::tree_to_wide_ref val = wi::to_wide (valt);
-
- if ((inv_mask & (inv_mask + 1)) != 0
- || (val & mask) != val)
- return false;
-
- bool is_range = cond_code == EQ_EXPR;
-
- tree type = TREE_TYPE (t);
- wide_int min = wi::min_value (type),
- max = wi::max_value (type);
-
- if (is_range)
- {
- *low_code = val == min ? ERROR_MARK : GE_EXPR;
- *high_code = val == max ? ERROR_MARK : LE_EXPR;
- }
- else
- {
- /* We can still generate assertion if one of alternatives
- is known to always be false. */
- if (val == min)
- {
- *low_code = (enum tree_code) 0;
- *high_code = GT_EXPR;
- }
- else if ((val | inv_mask) == max)
- {
- *low_code = LT_EXPR;
- *high_code = (enum tree_code) 0;
- }
- else
- return false;
- }
-
- *new_name = t;
- *low = wide_int_to_tree (type, val);
- *high = wide_int_to_tree (type, val | inv_mask);
-
- return true;
-}
-
-/* Try to register an edge assertion for SSA name NAME on edge E for
- the condition COND contributing to the conditional jump pointed to by
- SI. */
-
-void
-register_edge_assert_for (tree name, edge e,
- enum tree_code cond_code, tree cond_op0,
- tree cond_op1, vec<assert_info> &asserts)
-{
- tree val;
- enum tree_code comp_code;
- bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
-
- /* Do not attempt to infer anything in names that flow through
- abnormal edges. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
- return;
-
- if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
- cond_op0, cond_op1,
- is_else_edge,
- &comp_code, &val))
- return;
-
- /* Register ASSERT_EXPRs for name. */
- register_edge_assert_for_2 (name, e, cond_code, cond_op0,
- cond_op1, is_else_edge, asserts);
-
-
- /* If COND is effectively an equality test of an SSA_NAME against
- the value zero or one, then we may be able to assert values
- for SSA_NAMEs which flow into COND. */
-
- /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
- statement of NAME we can assert both operands of the BIT_AND_EXPR
- have nonzero value. */
- if ((comp_code == EQ_EXPR && integer_onep (val))
- || (comp_code == NE_EXPR && integer_zerop (val)))
- {
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-
- if (is_gimple_assign (def_stmt)
- && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
- {
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
- register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
- }
- else if (is_gimple_assign (def_stmt)
- && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
- == tcc_comparison))
- register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
- }
-
- /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
- statement of NAME we can assert both operands of the BIT_IOR_EXPR
- have zero value. */
- if ((comp_code == EQ_EXPR && integer_zerop (val))
- || (comp_code == NE_EXPR
- && integer_onep (val)
- && TYPE_PRECISION (TREE_TYPE (name)) == 1))
- {
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-
- /* For BIT_IOR_EXPR only if NAME == 0 both operands have
- necessarily zero value, or if type-precision is one. */
- if (is_gimple_assign (def_stmt)
- && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
- {
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
- register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
- }
- else if (is_gimple_assign (def_stmt)
- && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
- == tcc_comparison))
- register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
- }
-
- /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
- if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
- && TREE_CODE (val) == INTEGER_CST)
- {
- enum tree_code low_code, high_code;
- tree low, high;
- if (is_masked_range_test (name, val, comp_code, &name, &low,
- &low_code, &high, &high_code))
- {
- if (low_code != ERROR_MARK)
- register_edge_assert_for_2 (name, e, low_code, name,
- low, /*invert*/false, asserts);
- if (high_code != ERROR_MARK)
- register_edge_assert_for_2 (name, e, high_code, name,
- high, /*invert*/false, asserts);
- }
- }
-}
-
-/* Handle
- _4 = x_3 & 31;
- if (_4 != 0)
- goto <bb 6>;
- else
- goto <bb 7>;
- <bb 6>:
- __builtin_unreachable ();
- <bb 7>:
- x_5 = ASSERT_EXPR <x_3, ...>;
- If x_3 has no other immediate uses (checked by caller),
- var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
- from the non-zero bitmask. */
-
-void
-maybe_set_nonzero_bits (edge e, tree var)
-{
- basic_block cond_bb = e->src;
- gimple *stmt = last_stmt (cond_bb);
- tree cst;
-
- if (stmt == NULL
- || gimple_code (stmt) != GIMPLE_COND
- || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
- ? EQ_EXPR : NE_EXPR)
- || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
- || !integer_zerop (gimple_cond_rhs (stmt)))
- return;
-
- stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
- if (!is_gimple_assign (stmt)
- || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
- || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
- return;
- if (gimple_assign_rhs1 (stmt) != var)
- {
- gimple *stmt2;
-
- if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
- return;
- stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
- if (!gimple_assign_cast_p (stmt2)
- || gimple_assign_rhs1 (stmt2) != var
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
- || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
- != TYPE_PRECISION (TREE_TYPE (var))))
- return;
- }
- cst = gimple_assign_rhs2 (stmt);
- set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
- wi::to_wide (cst)));
-}
-
-/* Return true if STMT is interesting for VRP. */
-
-bool
-stmt_interesting_for_vrp (gimple *stmt)
-{
- if (gimple_code (stmt) == GIMPLE_PHI)
- {
- tree res = gimple_phi_result (stmt);
- return (!virtual_operand_p (res)
- && (INTEGRAL_TYPE_P (TREE_TYPE (res))
- || POINTER_TYPE_P (TREE_TYPE (res))));
- }
- else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
- {
- tree lhs = gimple_get_lhs (stmt);
-
- /* In general, assignments with virtual operands are not useful
- for deriving ranges, with the obvious exception of calls to
- builtin functions. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME
- && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && (is_gimple_call (stmt)
- || !gimple_vuse (stmt)))
- return true;
- else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_ADD_OVERFLOW:
- case IFN_SUB_OVERFLOW:
- case IFN_MUL_OVERFLOW:
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- /* These internal calls return _Complex integer type,
- but are interesting to VRP nevertheless. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME)
- return true;
- break;
- default:
- break;
- }
- }
- else if (gimple_code (stmt) == GIMPLE_COND
- || gimple_code (stmt) == GIMPLE_SWITCH)
- return true;
-
- return false;
-}
-
-/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
- that includes the value VAL. The search is restricted to the range
- [START_IDX, n - 1] where n is the size of VEC.
-
- If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
- returned.
-
- If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
- it is placed in IDX and false is returned.
-
- If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
- returned. */
-
-bool
-find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
-{
- size_t n = gimple_switch_num_labels (stmt);
- size_t low, high;
-
- /* Find case label for minimum of the value range or the next one.
- At each iteration we are searching in [low, high - 1]. */
-
- for (low = start_idx, high = n; high != low; )
- {
- tree t;
- int cmp;
- /* Note that i != high, so we never ask for n. */
- size_t i = (high + low) / 2;
- t = gimple_switch_label (stmt, i);
-
- /* Cache the result of comparing CASE_LOW and val. */
- cmp = tree_int_cst_compare (CASE_LOW (t), val);
-
- if (cmp == 0)
- {
- /* Ranges cannot be empty. */
- *idx = i;
- return true;
- }
- else if (cmp > 0)
- high = i;
- else
- {
- low = i + 1;
- if (CASE_HIGH (t) != NULL
- && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
- {
- *idx = i;
- return true;
- }
- }
- }
-
- *idx = high;
- return false;
-}
-
-/* Searches the case label vector VEC for the range of CASE_LABELs that is used
- for values between MIN and MAX. The first index is placed in MIN_IDX. The
- last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
- then MAX_IDX < MIN_IDX.
- Returns true if the default label is not needed. */
-
-bool
-find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
- size_t *max_idx)
-{
- size_t i, j;
- bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
- bool max_take_default = !find_case_label_index (stmt, i, max, &j);
-
- if (i == j
- && min_take_default
- && max_take_default)
- {
- /* Only the default case label reached.
- Return an empty range. */
- *min_idx = 1;
- *max_idx = 0;
- return false;
- }
- else
- {
- bool take_default = min_take_default || max_take_default;
- tree low, high;
- size_t k;
-
- if (max_take_default)
- j--;
-
- /* If the case label range is continuous, we do not need
- the default case label. Verify that. */
- high = CASE_LOW (gimple_switch_label (stmt, i));
- if (CASE_HIGH (gimple_switch_label (stmt, i)))
- high = CASE_HIGH (gimple_switch_label (stmt, i));
- for (k = i + 1; k <= j; ++k)
- {
- low = CASE_LOW (gimple_switch_label (stmt, k));
- if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
- {
- take_default = true;
- break;
- }
- high = low;
- if (CASE_HIGH (gimple_switch_label (stmt, k)))
- high = CASE_HIGH (gimple_switch_label (stmt, k));
- }
-
- *min_idx = i;
- *max_idx = j;
- return !take_default;
- }
-}
-
-/* Given a SWITCH_STMT, return the case label that encompasses the
- known possible values for the switch operand. RANGE_OF_OP is a
- range for the known values of the switch operand. */
-
-tree
-find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
-{
- if (range_of_op->undefined_p ()
- || range_of_op->varying_p ()
- || range_of_op->symbolic_p ())
- return NULL_TREE;
-
- size_t i, j;
- tree op = gimple_switch_index (switch_stmt);
- tree type = TREE_TYPE (op);
- tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
- tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
- find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
- if (i == j)
- {
- /* Look for exactly one label that encompasses the range of
- the operand. */
- tree label = gimple_switch_label (switch_stmt, i);
- tree case_high
- = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
- int_range_max label_range (CASE_LOW (label), case_high);
- if (!types_compatible_p (label_range.type (), range_of_op->type ()))
- range_cast (label_range, range_of_op->type ());
- label_range.intersect (*range_of_op);
- if (label_range == *range_of_op)
- return label;
- }
- else if (i > j)
- {
- /* If there are no labels at all, take the default. */
- return gimple_switch_label (switch_stmt, 0);
- }
- else
- {
- /* Otherwise, there are various labels that can encompass
- the range of operand. In which case, see if the range of
- the operand is entirely *outside* the bounds of all the
- (non-default) case labels. If so, take the default. */
- unsigned n = gimple_switch_num_labels (switch_stmt);
- tree min_label = gimple_switch_label (switch_stmt, 1);
- tree max_label = gimple_switch_label (switch_stmt, n - 1);
- tree case_high = CASE_HIGH (max_label);
- if (!case_high)
- case_high = CASE_LOW (max_label);
- int_range_max label_range (CASE_LOW (min_label), case_high);
- if (!types_compatible_p (label_range.type (), range_of_op->type ()))
- range_cast (label_range, range_of_op->type ());
- label_range.intersect (*range_of_op);
- if (label_range.undefined_p ())
- return gimple_switch_label (switch_stmt, 0);
- }
- return NULL_TREE;
-}
-
-struct case_info
-{
- tree expr;
- basic_block bb;
-};
-
-/* Location information for ASSERT_EXPRs. Each instance of this
- structure describes an ASSERT_EXPR for an SSA name. Since a single
- SSA name may have more than one assertion associated with it, these
- locations are kept in a linked list attached to the corresponding
- SSA name. */
-struct assert_locus
-{
- /* Basic block where the assertion would be inserted. */
- basic_block bb;
-
- /* Some assertions need to be inserted on an edge (e.g., assertions
- generated by COND_EXPRs). In those cases, BB will be NULL. */
- edge e;
-
- /* Pointer to the statement that generated this assertion. */
- gimple_stmt_iterator si;
-
- /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
- enum tree_code comp_code;
-
- /* Value being compared against. */
- tree val;
-
- /* Expression to compare. */
- tree expr;
-
- /* Next node in the linked list. */
- assert_locus *next;
-};
-
-/* Class to traverse the flowgraph looking for conditional jumps to
- insert ASSERT_EXPR range expressions. These range expressions are
- meant to provide information to optimizations that need to reason
- in terms of value ranges. They will not be expanded into RTL. */
-
-class vrp_asserts
-{
-public:
- vrp_asserts (struct function *fn) : fun (fn) { }
-
- void insert_range_assertions ();
-
- /* Convert range assertion expressions into the implied copies and
- copy propagate away the copies. */
- void remove_range_assertions ();
-
- /* Dump all the registered assertions for all the names to FILE. */
- void dump (FILE *);
-
- /* Dump all the registered assertions for NAME to FILE. */
- void dump (FILE *file, tree name);
-
- /* Dump all the registered assertions for NAME to stderr. */
- void debug (tree name)
- {
- dump (stderr, name);
- }
-
- /* Dump all the registered assertions for all the names to stderr. */
- void debug ()
- {
- dump (stderr);
- }
-
-private:
- /* Set of SSA names found live during the RPO traversal of the function
- for still active basic-blocks. */
- live_names live;
-
- /* Function to work on. */
- struct function *fun;
-
- /* If bit I is present, it means that SSA name N_i has a list of
- assertions that should be inserted in the IL. */
- bitmap need_assert_for;
-
- /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
- holds a list of ASSERT_LOCUS_T nodes that describe where
- ASSERT_EXPRs for SSA name N_I should be inserted. */
- assert_locus **asserts_for;
-
- /* Finish found ASSERTS for E and register them at GSI. */
- void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
- vec<assert_info> &asserts);
-
- /* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement. The
- last statement of BB must be a SWITCH_EXPR.
-
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
- void find_switch_asserts (basic_block bb, gswitch *last);
-
- /* Do an RPO walk over the function computing SSA name liveness
- on-the-fly and deciding on assert expressions to insert. */
- void find_assert_locations ();
-
- /* Traverse all the statements in block BB looking for statements that
- may generate useful assertions for the SSA names in their operand.
- See method implementation comentary for more information. */
- void find_assert_locations_in_bb (basic_block bb);
-
- /* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement.
- The last statement of BB must be a COND_EXPR.
-
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
- void find_conditional_asserts (basic_block bb, gcond *last);
-
- /* Process all the insertions registered for every name N_i registered
- in NEED_ASSERT_FOR. The list of assertions to be inserted are
- found in ASSERTS_FOR[i]. */
- void process_assert_insertions ();
-
- /* If NAME doesn't have an ASSERT_EXPR registered for asserting
- 'EXPR COMP_CODE VAL' at a location that dominates block BB or
- E->DEST, then register this location as a possible insertion point
- for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
-
- BB, E and SI provide the exact insertion point for the new
- ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
- on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
- BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
- must not be NULL. */
- void register_new_assert_for (tree name, tree expr,
- enum tree_code comp_code,
- tree val, basic_block bb,
- edge e, gimple_stmt_iterator si);
-
- /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
- create a new SSA name N and return the assertion assignment
- 'N = ASSERT_EXPR <V, V OP W>'. */
- gimple *build_assert_expr_for (tree cond, tree v);
-
- /* Create an ASSERT_EXPR for NAME and insert it in the location
- indicated by LOC. Return true if we made any edge insertions. */
- bool process_assert_insertions_for (tree name, assert_locus *loc);
-
- /* Qsort callback for sorting assert locations. */
- template <bool stable> static int compare_assert_loc (const void *,
- const void *);
-
- /* Return false if EXPR is a predicate expression involving floating
- point values. */
- bool fp_predicate (gimple *stmt)
- {
- GIMPLE_CHECK (stmt, GIMPLE_COND);
- return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
- }
-
- bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
- basic_block cond_bb);
-
- static int compare_case_labels (const void *, const void *);
-};
-
-/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
- create a new SSA name N and return the assertion assignment
- 'N = ASSERT_EXPR <V, V OP W>'. */
-
-gimple *
-vrp_asserts::build_assert_expr_for (tree cond, tree v)
-{
- tree a;
- gassign *assertion;
-
- gcc_assert (TREE_CODE (v) == SSA_NAME
- && COMPARISON_CLASS_P (cond));
-
- a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
- assertion = gimple_build_assign (NULL_TREE, a);
-
- /* The new ASSERT_EXPR, creates a new SSA name that replaces the
- operand of the ASSERT_EXPR. Create it so the new name and the old one
- are registered in the replacement table so that we can fix the SSA web
- after adding all the ASSERT_EXPRs. */
- tree new_def = create_new_def_for (v, assertion, NULL);
- /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
- given we have to be able to fully propagate those out to re-create
- valid SSA when removing the asserts. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
- SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
-
- return assertion;
-}
-
-/* Dump all the registered assertions for NAME to FILE. */
-
-void
-vrp_asserts::dump (FILE *file, tree name)
-{
- assert_locus *loc;
-
- fprintf (file, "Assertions to be inserted for ");
- print_generic_expr (file, name);
- fprintf (file, "\n");
-
- loc = asserts_for[SSA_NAME_VERSION (name)];
- while (loc)
- {
- fprintf (file, "\t");
- print_gimple_stmt (file, gsi_stmt (loc->si), 0);
- fprintf (file, "\n\tBB #%d", loc->bb->index);
- if (loc->e)
- {
- fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
- loc->e->dest->index);
- dump_edge_info (file, loc->e, dump_flags, 0);
- }
- fprintf (file, "\n\tPREDICATE: ");
- print_generic_expr (file, loc->expr);
- fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
- print_generic_expr (file, loc->val);
- fprintf (file, "\n\n");
- loc = loc->next;
- }
-
- fprintf (file, "\n");
-}
-
-/* Dump all the registered assertions for all the names to FILE. */
-
-void
-vrp_asserts::dump (FILE *file)
-{
- unsigned i;
- bitmap_iterator bi;
-
- fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
- EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
- dump (file, ssa_name (i));
- fprintf (file, "\n");
-}
-
-/* If NAME doesn't have an ASSERT_EXPR registered for asserting
- 'EXPR COMP_CODE VAL' at a location that dominates block BB or
- E->DEST, then register this location as a possible insertion point
- for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
-
- BB, E and SI provide the exact insertion point for the new
- ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
- on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
- BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
- must not be NULL. */
-
-void
-vrp_asserts::register_new_assert_for (tree name, tree expr,
- enum tree_code comp_code,
- tree val,
- basic_block bb,
- edge e,
- gimple_stmt_iterator si)
-{
- assert_locus *n, *loc, *last_loc;
- basic_block dest_bb;
-
- gcc_checking_assert (bb == NULL || e == NULL);
-
- if (e == NULL)
- gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
- && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
-
- /* Never build an assert comparing against an integer constant with
- TREE_OVERFLOW set. This confuses our undefined overflow warning
- machinery. */
- if (TREE_OVERFLOW_P (val))
- val = drop_tree_overflow (val);
-
- /* The new assertion A will be inserted at BB or E. We need to
- determine if the new location is dominated by a previously
- registered location for A. If we are doing an edge insertion,
- assume that A will be inserted at E->DEST. Note that this is not
- necessarily true.
-
- If E is a critical edge, it will be split. But even if E is
- split, the new block will dominate the same set of blocks that
- E->DEST dominates.
-
- The reverse, however, is not true, blocks dominated by E->DEST
- will not be dominated by the new block created to split E. So,
- if the insertion location is on a critical edge, we will not use
- the new location to move another assertion previously registered
- at a block dominated by E->DEST. */
- dest_bb = (bb) ? bb : e->dest;
-
- /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
- VAL at a block dominating DEST_BB, then we don't need to insert a new
- one. Similarly, if the same assertion already exists at a block
- dominated by DEST_BB and the new location is not on a critical
- edge, then update the existing location for the assertion (i.e.,
- move the assertion up in the dominance tree).
-
- Note, this is implemented as a simple linked list because there
- should not be more than a handful of assertions registered per
- name. If this becomes a performance problem, a table hashed by
- COMP_CODE and VAL could be implemented. */
- loc = asserts_for[SSA_NAME_VERSION (name)];
- last_loc = loc;
- while (loc)
- {
- if (loc->comp_code == comp_code
- && (loc->val == val
- || operand_equal_p (loc->val, val, 0))
- && (loc->expr == expr
- || operand_equal_p (loc->expr, expr, 0)))
- {
- /* If E is not a critical edge and DEST_BB
- dominates the existing location for the assertion, move
- the assertion up in the dominance tree by updating its
- location information. */
- if ((e == NULL || !EDGE_CRITICAL_P (e))
- && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
- {
- loc->bb = dest_bb;
- loc->e = e;
- loc->si = si;
- return;
- }
- }
-
- /* Update the last node of the list and move to the next one. */
- last_loc = loc;
- loc = loc->next;
- }
-
- /* If we didn't find an assertion already registered for
- NAME COMP_CODE VAL, add a new one at the end of the list of
- assertions associated with NAME. */
- n = XNEW (struct assert_locus);
- n->bb = dest_bb;
- n->e = e;
- n->si = si;
- n->comp_code = comp_code;
- n->val = val;
- n->expr = expr;
- n->next = NULL;
-
- if (last_loc)
- last_loc->next = n;
- else
- asserts_for[SSA_NAME_VERSION (name)] = n;
-
- bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
-}
-
-/* Finish found ASSERTS for E and register them at GSI. */
-
-void
-vrp_asserts::finish_register_edge_assert_for (edge e,
- gimple_stmt_iterator gsi,
- vec<assert_info> &asserts)
-{
- for (unsigned i = 0; i < asserts.length (); ++i)
- /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
- reachable from E. */
- if (live.live_on_edge_p (asserts[i].name, e))
- register_new_assert_for (asserts[i].name, asserts[i].expr,
- asserts[i].comp_code, asserts[i].val,
- NULL, e, gsi);
-}
-
-/* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement.
- The last statement of BB must be a COND_EXPR.
-
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
-
-void
-vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
-{
- gimple_stmt_iterator bsi;
- tree op;
- edge_iterator ei;
- edge e;
- ssa_op_iter iter;
-
- bsi = gsi_for_stmt (last);
-
- /* Look for uses of the operands in each of the sub-graphs
- rooted at BB. We need to check each of the outgoing edges
- separately, so that we know what kind of ASSERT_EXPR to
- insert. */
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (e->dest == bb)
- continue;
-
- /* Register the necessary assertions for each operand in the
- conditional predicate. */
- auto_vec<assert_info, 8> asserts;
- FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
- register_edge_assert_for (op, e,
- gimple_cond_code (last),
- gimple_cond_lhs (last),
- gimple_cond_rhs (last), asserts);
- finish_register_edge_assert_for (e, bsi, asserts);
- }
-}
-
-/* Compare two case labels sorting first by the destination bb index
- and then by the case value. */
-
-int
-vrp_asserts::compare_case_labels (const void *p1, const void *p2)
-{
- const struct case_info *ci1 = (const struct case_info *) p1;
- const struct case_info *ci2 = (const struct case_info *) p2;
- int idx1 = ci1->bb->index;
- int idx2 = ci2->bb->index;
-
- if (idx1 < idx2)
- return -1;
- else if (idx1 == idx2)
- {
- /* Make sure the default label is first in a group. */
- if (!CASE_LOW (ci1->expr))
- return -1;
- else if (!CASE_LOW (ci2->expr))
- return 1;
- else
- return tree_int_cst_compare (CASE_LOW (ci1->expr),
- CASE_LOW (ci2->expr));
- }
- else
- return 1;
-}
-
-/* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement.
- The last statement of BB must be a SWITCH_EXPR.
-
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
-
-void
-vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
-{
- gimple_stmt_iterator bsi;
- tree op;
- edge e;
- struct case_info *ci;
- size_t n = gimple_switch_num_labels (last);
-#if GCC_VERSION >= 4000
- unsigned int idx;
-#else
- /* Work around GCC 3.4 bug (PR 37086). */
- volatile unsigned int idx;
-#endif
-
- bsi = gsi_for_stmt (last);
- op = gimple_switch_index (last);
- if (TREE_CODE (op) != SSA_NAME)
- return;
-
- /* Build a vector of case labels sorted by destination label. */
- ci = XNEWVEC (struct case_info, n);
- for (idx = 0; idx < n; ++idx)
- {
- ci[idx].expr = gimple_switch_label (last, idx);
- ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
- }
- edge default_edge = find_edge (bb, ci[0].bb);
- qsort (ci, n, sizeof (struct case_info), compare_case_labels);
-
- for (idx = 0; idx < n; ++idx)
- {
- tree min, max;
- tree cl = ci[idx].expr;
- basic_block cbb = ci[idx].bb;
-
- min = CASE_LOW (cl);
- max = CASE_HIGH (cl);
-
- /* If there are multiple case labels with the same destination
- we need to combine them to a single value range for the edge. */
- if (idx + 1 < n && cbb == ci[idx + 1].bb)
- {
- /* Skip labels until the last of the group. */
- do {
- ++idx;
- } while (idx < n && cbb == ci[idx].bb);
- --idx;
-
- /* Pick up the maximum of the case label range. */
- if (CASE_HIGH (ci[idx].expr))
- max = CASE_HIGH (ci[idx].expr);
- else
- max = CASE_LOW (ci[idx].expr);
- }
-
- /* Can't extract a useful assertion out of a range that includes the
- default label. */
- if (min == NULL_TREE)
- continue;
-
- /* Find the edge to register the assert expr on. */
- e = find_edge (bb, cbb);
-
- /* Register the necessary assertions for the operand in the
- SWITCH_EXPR. */
- auto_vec<assert_info, 8> asserts;
- register_edge_assert_for (op, e,
- max ? GE_EXPR : EQ_EXPR,
- op, fold_convert (TREE_TYPE (op), min),
- asserts);
- if (max)
- register_edge_assert_for (op, e, LE_EXPR, op,
- fold_convert (TREE_TYPE (op), max),
- asserts);
- finish_register_edge_assert_for (e, bsi, asserts);
- }
-
- XDELETEVEC (ci);
-
- if (!live.live_on_edge_p (op, default_edge))
- return;
-
- /* Now register along the default label assertions that correspond to the
- anti-range of each label. */
- int insertion_limit = param_max_vrp_switch_assertions;
- if (insertion_limit == 0)
- return;
-
- /* We can't do this if the default case shares a label with another case. */
- tree default_cl = gimple_switch_default_label (last);
- for (idx = 1; idx < n; idx++)
- {
- tree min, max;
- tree cl = gimple_switch_label (last, idx);
- if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
- continue;
-
- min = CASE_LOW (cl);
- max = CASE_HIGH (cl);
-
- /* Combine contiguous case ranges to reduce the number of assertions
- to insert. */
- for (idx = idx + 1; idx < n; idx++)
- {
- tree next_min, next_max;
- tree next_cl = gimple_switch_label (last, idx);
- if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
- break;
-
- next_min = CASE_LOW (next_cl);
- next_max = CASE_HIGH (next_cl);
-
- wide_int difference = (wi::to_wide (next_min)
- - wi::to_wide (max ? max : min));
- if (wi::eq_p (difference, 1))
- max = next_max ? next_max : next_min;
- else
- break;
- }
- idx--;
-
- if (max == NULL_TREE)
- {
- /* Register the assertion OP != MIN. */
- auto_vec<assert_info, 8> asserts;
- min = fold_convert (TREE_TYPE (op), min);
- register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
- asserts);
- finish_register_edge_assert_for (default_edge, bsi, asserts);
- }
- else
- {
- /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
- which will give OP the anti-range ~[MIN,MAX]. */
- tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
- min = fold_convert (TREE_TYPE (uop), min);
- max = fold_convert (TREE_TYPE (uop), max);
-
- tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
- tree rhs = int_const_binop (MINUS_EXPR, max, min);
- register_new_assert_for (op, lhs, GT_EXPR, rhs,
- NULL, default_edge, bsi);
- }
-
- if (--insertion_limit == 0)
- break;
- }
-}
-
-/* Traverse all the statements in block BB looking for statements that
- may generate useful assertions for the SSA names in their operand.
- If a statement produces a useful assertion A for name N_i, then the
- list of assertions already generated for N_i is scanned to
- determine if A is actually needed.
-
- If N_i already had the assertion A at a location dominating the
- current location, then nothing needs to be done. Otherwise, the
- new location for A is recorded instead.
-
- 1- For every statement S in BB, all the variables used by S are
- added to bitmap FOUND_IN_SUBGRAPH.
-
- 2- If statement S uses an operand N in a way that exposes a known
- value range for N, then if N was not already generated by an
- ASSERT_EXPR, create a new assert location for N. For instance,
- if N is a pointer and the statement dereferences it, we can
- assume that N is not NULL.
-
- 3- COND_EXPRs are a special case of #2. We can derive range
- information from the predicate but need to insert different
- ASSERT_EXPRs for each of the sub-graphs rooted at the
- conditional block. If the last statement of BB is a conditional
- expression of the form 'X op Y', then
-
- a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
-
- b) If the conditional is the only entry point to the sub-graph
- corresponding to the THEN_CLAUSE, recurse into it. On
- return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
- an ASSERT_EXPR is added for the corresponding variable.
-
- c) Repeat step (b) on the ELSE_CLAUSE.
-
- d) Mark X and Y in FOUND_IN_SUBGRAPH.
-
- For instance,
-
- if (a == 9)
- b = a;
- else
- b = c + 1;
-
- In this case, an assertion on the THEN clause is useful to
- determine that 'a' is always 9 on that edge. However, an assertion
- on the ELSE clause would be unnecessary.
-
- 4- If BB does not end in a conditional expression, then we recurse
- into BB's dominator children.
-
- At the end of the recursive traversal, every SSA name will have a
- list of locations where ASSERT_EXPRs should be added. When a new
- location for name N is found, it is registered by calling
- register_new_assert_for. That function keeps track of all the
- registered assertions to prevent adding unnecessary assertions.
- For instance, if a pointer P_4 is dereferenced more than once in a
- dominator tree, only the location dominating all the dereference of
- P_4 will receive an ASSERT_EXPR. */
-
-void
-vrp_asserts::find_assert_locations_in_bb (basic_block bb)
-{
- gimple *last;
-
- last = last_stmt (bb);
-
- /* If BB's last statement is a conditional statement involving integer
- operands, determine if we need to add ASSERT_EXPRs. */
- if (last
- && gimple_code (last) == GIMPLE_COND
- && !fp_predicate (last)
- && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
- find_conditional_asserts (bb, as_a <gcond *> (last));
-
- /* If BB's last statement is a switch statement involving integer
- operands, determine if we need to add ASSERT_EXPRs. */
- if (last
- && gimple_code (last) == GIMPLE_SWITCH
- && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
- find_switch_asserts (bb, as_a <gswitch *> (last));
-
- /* Traverse all the statements in BB marking used names and looking
- for statements that may infer assertions for their used operands. */
- for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
- gsi_prev (&si))
- {
- gimple *stmt;
- tree op;
- ssa_op_iter i;
-
- stmt = gsi_stmt (si);
-
- if (is_gimple_debug (stmt))
- continue;
-
- /* See if we can derive an assertion for any of STMT's operands. */
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- {
- tree value;
- enum tree_code comp_code;
-
- /* If op is not live beyond this stmt, do not bother to insert
- asserts for it. */
- if (!live.live_on_block_p (op, bb))
- continue;
-
- /* If OP is used in such a way that we can infer a value
- range for it, and we don't find a previous assertion for
- it, create a new assertion location node for OP. */
- if (infer_value_range (stmt, op, &comp_code, &value))
- {
- /* If we are able to infer a nonzero value range for OP,
- then walk backwards through the use-def chain to see if OP
- was set via a typecast.
-
- If so, then we can also infer a nonzero value range
- for the operand of the NOP_EXPR. */
- if (comp_code == NE_EXPR && integer_zerop (value))
- {
- tree t = op;
- gimple *def_stmt = SSA_NAME_DEF_STMT (t);
-
- while (is_gimple_assign (def_stmt)
- && CONVERT_EXPR_CODE_P
- (gimple_assign_rhs_code (def_stmt))
- && TREE_CODE
- (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
- && POINTER_TYPE_P
- (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
- {
- t = gimple_assign_rhs1 (def_stmt);
- def_stmt = SSA_NAME_DEF_STMT (t);
-
- /* Note we want to register the assert for the
- operand of the NOP_EXPR after SI, not after the
- conversion. */
- if (live.live_on_block_p (t, bb))
- register_new_assert_for (t, t, comp_code, value,
- bb, NULL, si);
- }
- }
-
- register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
- }
- }
-
- /* Update live. */
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- live.set (op, bb);
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
- live.clear (op, bb);
- }
-
- /* Traverse all PHI nodes in BB, updating live. */
- for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
- gsi_next (&si))
- {
- use_operand_p arg_p;
- ssa_op_iter i;
- gphi *phi = si.phi ();
- tree res = gimple_phi_result (phi);
-
- if (virtual_operand_p (res))
- continue;
-
- FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
- {
- tree arg = USE_FROM_PTR (arg_p);
- if (TREE_CODE (arg) == SSA_NAME)
- live.set (arg, bb);
- }
-
- live.clear (res, bb);
- }
-}
-
-/* Do an RPO walk over the function computing SSA name liveness
- on-the-fly and deciding on assert expressions to insert. */
-
-void
-vrp_asserts::find_assert_locations (void)
-{
- int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
- int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
- int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
- int rpo_cnt, i;
-
- rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
- for (i = 0; i < rpo_cnt; ++i)
- bb_rpo[rpo[i]] = i;
-
- /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
- the order we compute liveness and insert asserts we otherwise
- fail to insert asserts into the loop latch. */
- for (auto loop : loops_list (cfun, 0))
- {
- i = loop->latch->index;
- unsigned int j = single_succ_edge (loop->latch)->dest_idx;
- for (gphi_iterator gsi = gsi_start_phis (loop->header);
- !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gphi *phi = gsi.phi ();
- if (virtual_operand_p (gimple_phi_result (phi)))
- continue;
- tree arg = gimple_phi_arg_def (phi, j);
- if (TREE_CODE (arg) == SSA_NAME)
- live.set (arg, loop->latch);
- }
- }
-
- for (i = rpo_cnt - 1; i >= 0; --i)
- {
- basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
- edge e;
- edge_iterator ei;
-
- /* Process BB and update the live information with uses in
- this block. */
- find_assert_locations_in_bb (bb);
-
- /* Merge liveness into the predecessor blocks and free it. */
- if (!live.block_has_live_names_p (bb))
- {
- int pred_rpo = i;
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- int pred = e->src->index;
- if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
- continue;
-
- live.merge (e->src, bb);
-
- if (bb_rpo[pred] < pred_rpo)
- pred_rpo = bb_rpo[pred];
- }
-
- /* Record the RPO number of the last visited block that needs
- live information from this block. */
- last_rpo[rpo[i]] = pred_rpo;
- }
- else
- live.clear_block (bb);
-
- /* We can free all successors live bitmaps if all their
- predecessors have been visited already. */
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (last_rpo[e->dest->index] == i)
- live.clear_block (e->dest);
- }
-
- XDELETEVEC (rpo);
- XDELETEVEC (bb_rpo);
- XDELETEVEC (last_rpo);
-}
-
-/* Create an ASSERT_EXPR for NAME and insert it in the location
- indicated by LOC. Return true if we made any edge insertions. */
-
-bool
-vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
-{
- /* Build the comparison expression NAME_i COMP_CODE VAL. */
- gimple *stmt;
- tree cond;
- gimple *assert_stmt;
- edge_iterator ei;
- edge e;
-
- /* If we have X <=> X do not insert an assert expr for that. */
- if (loc->expr == loc->val)
- return false;
-
- cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
- assert_stmt = build_assert_expr_for (cond, name);
- if (loc->e)
- {
- /* We have been asked to insert the assertion on an edge. This
- is used only by COND_EXPR and SWITCH_EXPR assertions. */
- gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
- || (gimple_code (gsi_stmt (loc->si))
- == GIMPLE_SWITCH));
-
- gsi_insert_on_edge (loc->e, assert_stmt);
- return true;
- }
-
- /* If the stmt iterator points at the end then this is an insertion
- at the beginning of a block. */
- if (gsi_end_p (loc->si))
- {
- gimple_stmt_iterator si = gsi_after_labels (loc->bb);
- gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
- return false;
-
- }
- /* Otherwise, we can insert right after LOC->SI iff the
- statement must not be the last statement in the block. */
- stmt = gsi_stmt (loc->si);
- if (!stmt_ends_bb_p (stmt))
- {
- gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
- return false;
- }
-
- /* If STMT must be the last statement in BB, we can only insert new
- assertions on the non-abnormal edge out of BB. Note that since
- STMT is not control flow, there may only be one non-abnormal/eh edge
- out of BB. */
- FOR_EACH_EDGE (e, ei, loc->bb->succs)
- if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
- {
- gsi_insert_on_edge (e, assert_stmt);
- return true;
- }
-
- gcc_unreachable ();
-}
-
-/* Qsort helper for sorting assert locations. If stable is true, don't
- use iterative_hash_expr because it can be unstable for -fcompare-debug,
- on the other side some pointers might be NULL. */
-
-template <bool stable>
-int
-vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
-{
- assert_locus * const a = *(assert_locus * const *)pa;
- assert_locus * const b = *(assert_locus * const *)pb;
-
- /* If stable, some asserts might be optimized away already, sort
- them last. */
- if (stable)
- {
- if (a == NULL)
- return b != NULL;
- else if (b == NULL)
- return -1;
- }
-
- if (a->e == NULL && b->e != NULL)
- return 1;
- else if (a->e != NULL && b->e == NULL)
- return -1;
-
- /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
- no need to test both a->e and b->e. */
-
- /* Sort after destination index. */
- if (a->e == NULL)
- ;
- else if (a->e->dest->index > b->e->dest->index)
- return 1;
- else if (a->e->dest->index < b->e->dest->index)
- return -1;
-
- /* Sort after comp_code. */
- if (a->comp_code > b->comp_code)
- return 1;
- else if (a->comp_code < b->comp_code)
- return -1;
-
- hashval_t ha, hb;
-
- /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
- uses DECL_UID of the VAR_DECL, so sorting might differ between
- -g and -g0. When doing the removal of redundant assert exprs
- and commonization to successors, this does not matter, but for
- the final sort needs to be stable. */
- if (stable)
- {
- ha = 0;
- hb = 0;
- }
- else
- {
- ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
- hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
- }
-
- /* Break the tie using hashing and source/bb index. */
- if (ha == hb)
- return (a->e != NULL
- ? a->e->src->index - b->e->src->index
- : a->bb->index - b->bb->index);
- return ha > hb ? 1 : -1;
-}
-
-/* Process all the insertions registered for every name N_i registered
- in NEED_ASSERT_FOR. The list of assertions to be inserted are
- found in ASSERTS_FOR[i]. */
-
-void
-vrp_asserts::process_assert_insertions ()
-{
- unsigned i;
- bitmap_iterator bi;
- bool update_edges_p = false;
- int num_asserts = 0;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump (dump_file);
-
- EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
- {
- assert_locus *loc = asserts_for[i];
- gcc_assert (loc);
-
- auto_vec<assert_locus *, 16> asserts;
- for (; loc; loc = loc->next)
- asserts.safe_push (loc);
- asserts.qsort (compare_assert_loc<false>);
-
- /* Push down common asserts to successors and remove redundant ones. */
- unsigned ecnt = 0;
- assert_locus *common = NULL;
- unsigned commonj = 0;
- for (unsigned j = 0; j < asserts.length (); ++j)
- {
- loc = asserts[j];
- if (! loc->e)
- common = NULL;
- else if (! common
- || loc->e->dest != common->e->dest
- || loc->comp_code != common->comp_code
- || ! operand_equal_p (loc->val, common->val, 0)
- || ! operand_equal_p (loc->expr, common->expr, 0))
- {
- commonj = j;
- common = loc;
- ecnt = 1;
- }
- else if (loc->e == asserts[j-1]->e)
- {
- /* Remove duplicate asserts. */
- if (commonj == j - 1)
- {
- commonj = j;
- common = loc;
- }
- free (asserts[j-1]);
- asserts[j-1] = NULL;
- }
- else
- {
- ecnt++;
- if (EDGE_COUNT (common->e->dest->preds) == ecnt)
- {
- /* We have the same assertion on all incoming edges of a BB.
- Insert it at the beginning of that block. */
- loc->bb = loc->e->dest;
- loc->e = NULL;
- loc->si = gsi_none ();
- common = NULL;
- /* Clear asserts commoned. */
- for (; commonj != j; ++commonj)
- if (asserts[commonj])
- {
- free (asserts[commonj]);
- asserts[commonj] = NULL;
- }
- }
- }
- }
-
- /* The asserts vector sorting above might be unstable for
- -fcompare-debug, sort again to ensure a stable sort. */
- asserts.qsort (compare_assert_loc<true>);
- for (unsigned j = 0; j < asserts.length (); ++j)
- {
- loc = asserts[j];
- if (! loc)
- break;
- update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
- num_asserts++;
- free (loc);
- }
- }
-
- if (update_edges_p)
- gsi_commit_edge_inserts ();
-
- statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
- num_asserts);
-}
-
-/* Traverse the flowgraph looking for conditional jumps to insert range
- expressions. These range expressions are meant to provide information
- to optimizations that need to reason in terms of value ranges. They
- will not be expanded into RTL. For instance, given:
-
- x = ...
- y = ...
- if (x < y)
- y = x - 2;
- else
- x = y + 3;
-
- this pass will transform the code into:
-
- x = ...
- y = ...
- if (x < y)
- {
- x = ASSERT_EXPR <x, x < y>
- y = x - 2
- }
- else
- {
- y = ASSERT_EXPR <y, x >= y>
- x = y + 3
- }
-
- The idea is that once copy and constant propagation have run, other
- optimizations will be able to determine what ranges of values can 'x'
- take in different paths of the code, simply by checking the reaching
- definition of 'x'. */
-
-void
-vrp_asserts::insert_range_assertions (void)
-{
- need_assert_for = BITMAP_ALLOC (NULL);
- asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
-
- calculate_dominance_info (CDI_DOMINATORS);
-
- find_assert_locations ();
- if (!bitmap_empty_p (need_assert_for))
- {
- process_assert_insertions ();
- update_ssa (TODO_update_ssa_no_phi);
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
- dump_function_to_file (current_function_decl, dump_file, dump_flags);
- }
-
- free (asserts_for);
- BITMAP_FREE (need_assert_for);
-}
-
-/* Return true if all imm uses of VAR are either in STMT, or
- feed (optionally through a chain of single imm uses) GIMPLE_COND
- in basic block COND_BB. */
-
-bool
-vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
- gimple *stmt,
- basic_block cond_bb)
-{
- use_operand_p use_p, use2_p;
- imm_use_iterator iter;
-
- FOR_EACH_IMM_USE_FAST (use_p, iter, var)
- if (USE_STMT (use_p) != stmt)
- {
- gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
- if (is_gimple_debug (use_stmt))
- continue;
- while (is_gimple_assign (use_stmt)
- && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
- && single_imm_use (gimple_assign_lhs (use_stmt),
- &use2_p, &use_stmt2))
- use_stmt = use_stmt2;
- if (gimple_code (use_stmt) != GIMPLE_COND
- || gimple_bb (use_stmt) != cond_bb)
- return false;
- }
- return true;
-}
-
-/* Convert range assertion expressions into the implied copies and
- copy propagate away the copies. Doing the trivial copy propagation
- here avoids the need to run the full copy propagation pass after
- VRP.
-
- FIXME, this will eventually lead to copy propagation removing the
- names that had useful range information attached to them. For
- instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
- then N_i will have the range [3, +INF].
-
- However, by converting the assertion into the implied copy
- operation N_i = N_j, we will then copy-propagate N_j into the uses
- of N_i and lose the range information.
-
- The problem with keeping ASSERT_EXPRs around is that passes after
- VRP need to handle them appropriately.
-
- Another approach would be to make the range information a first
- class property of the SSA_NAME so that it can be queried from
- any pass. This is made somewhat more complex by the need for
- multiple ranges to be associated with one SSA_NAME. */
-
-void
-vrp_asserts::remove_range_assertions ()
-{
- basic_block bb;
- gimple_stmt_iterator si;
- /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
- a basic block preceeded by GIMPLE_COND branching to it and
- __builtin_trap, -1 if not yet checked, 0 otherwise. */
- int is_unreachable;
-
- /* Note that the BSI iterator bump happens at the bottom of the
- loop and no bump is necessary if we're removing the statement
- referenced by the current BSI. */
- FOR_EACH_BB_FN (bb, fun)
- for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
- {
- gimple *stmt = gsi_stmt (si);
-
- if (is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree rhs = gimple_assign_rhs1 (stmt);
- tree var;
-
- var = ASSERT_EXPR_VAR (rhs);
-
- if (TREE_CODE (var) == SSA_NAME
- && !POINTER_TYPE_P (TREE_TYPE (lhs))
- && SSA_NAME_RANGE_INFO (lhs))
- {
- if (is_unreachable == -1)
- {
- is_unreachable = 0;
- if (single_pred_p (bb)
- && assert_unreachable_fallthru_edge_p
- (single_pred_edge (bb)))
- is_unreachable = 1;
- }
- /* Handle
- if (x_7 >= 10 && x_7 < 20)
- __builtin_unreachable ();
- x_8 = ASSERT_EXPR <x_7, ...>;
- if the only uses of x_7 are in the ASSERT_EXPR and
- in the condition. In that case, we can copy the
- range info from x_8 computed in this pass also
- for x_7. */
- if (is_unreachable
- && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
- single_pred (bb)))
- {
- if (SSA_NAME_RANGE_INFO (var))
- {
- /* ?? This is a minor wart exposing the
- internals of SSA_NAME_RANGE_INFO in order
- to maintain existing behavior. This is
- because duplicate_ssa_name_range_info below
- needs a NULL destination range. This is
- all slated for removal... */
- ggc_free (SSA_NAME_RANGE_INFO (var));
- SSA_NAME_RANGE_INFO (var) = NULL;
- }
- duplicate_ssa_name_range_info (var, lhs);
- maybe_set_nonzero_bits (single_pred_edge (bb), var);
- }
- }
-
- /* Propagate the RHS into every use of the LHS. For SSA names
- also propagate abnormals as it merely restores the original
- IL in this case (an replace_uses_by would assert). */
- if (TREE_CODE (var) == SSA_NAME)
- {
- imm_use_iterator iter;
- use_operand_p use_p;
- gimple *use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
- FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
- SET_USE (use_p, var);
- }
- else
- replace_uses_by (lhs, var);
-
- /* And finally, remove the copy, it is not needed. */
- gsi_remove (&si, true);
- release_defs (stmt);
- }
- else
- {
- if (!is_gimple_debug (gsi_stmt (si)))
- is_unreachable = 0;
- gsi_next (&si);
- }
- }
-}
-
-class vrp_prop : public ssa_propagation_engine
-{
-public:
- vrp_prop (vr_values *v)
- : ssa_propagation_engine (),
- m_vr_values (v) { }
-
- void initialize (struct function *);
- void finalize ();
-
-private:
- enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
- enum ssa_prop_result visit_phi (gphi *) final override;
-
- struct function *fun;
- vr_values *m_vr_values;
-};
-
-/* Initialization required by ssa_propagate engine. */
-
-void
-vrp_prop::initialize (struct function *fn)
-{
- basic_block bb;
- fun = fn;
-
- FOR_EACH_BB_FN (bb, fun)
- {
- for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
- gsi_next (&si))
- {
- gphi *phi = si.phi ();
- if (!stmt_interesting_for_vrp (phi))
- {
- tree lhs = PHI_RESULT (phi);
- m_vr_values->set_def_to_varying (lhs);
- prop_set_simulate_again (phi, false);
- }
- else
- prop_set_simulate_again (phi, true);
- }
-
- for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
- gsi_next (&si))
- {
- gimple *stmt = gsi_stmt (si);
-
- /* If the statement is a control insn, then we do not
- want to avoid simulating the statement once. Failure
- to do so means that those edges will never get added. */
- if (stmt_ends_bb_p (stmt))
- prop_set_simulate_again (stmt, true);
- else if (!stmt_interesting_for_vrp (stmt))
- {
- m_vr_values->set_defs_to_varying (stmt);
- prop_set_simulate_again (stmt, false);
- }
- else
- prop_set_simulate_again (stmt, true);
- }
- }
-}
-
-/* Evaluate statement STMT. If the statement produces a useful range,
- return SSA_PROP_INTERESTING and record the SSA name with the
- interesting range into *OUTPUT_P.
-
- If STMT is a conditional branch and we can determine its truth
- value, the taken edge is recorded in *TAKEN_EDGE_P.
-
- If STMT produces a varying value, return SSA_PROP_VARYING. */
-
-enum ssa_prop_result
-vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
-{
- tree lhs = gimple_get_lhs (stmt);
- value_range_equiv vr;
- m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
-
- if (*output_p)
- {
- if (m_vr_values->update_value_range (*output_p, &vr))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Found new range for ");
- print_generic_expr (dump_file, *output_p);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr);
- fprintf (dump_file, "\n");
- }
-
- if (vr.varying_p ())
- return SSA_PROP_VARYING;
-
- return SSA_PROP_INTERESTING;
- }
- return SSA_PROP_NOT_INTERESTING;
- }
-
- if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_ADD_OVERFLOW:
- case IFN_SUB_OVERFLOW:
- case IFN_MUL_OVERFLOW:
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- /* These internal calls return _Complex integer type,
- which VRP does not track, but the immediate uses
- thereof might be interesting. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME)
- {
- imm_use_iterator iter;
- use_operand_p use_p;
- enum ssa_prop_result res = SSA_PROP_VARYING;
-
- m_vr_values->set_def_to_varying (lhs);
-
- FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
- {
- gimple *use_stmt = USE_STMT (use_p);
- if (!is_gimple_assign (use_stmt))
- continue;
- enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
- if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
- continue;
- tree rhs1 = gimple_assign_rhs1 (use_stmt);
- tree use_lhs = gimple_assign_lhs (use_stmt);
- if (TREE_CODE (rhs1) != rhs_code
- || TREE_OPERAND (rhs1, 0) != lhs
- || TREE_CODE (use_lhs) != SSA_NAME
- || !stmt_interesting_for_vrp (use_stmt)
- || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
- || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
- || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
- continue;
-
- /* If there is a change in the value range for any of the
- REALPART_EXPR/IMAGPART_EXPR immediate uses, return
- SSA_PROP_INTERESTING. If there are any REALPART_EXPR
- or IMAGPART_EXPR immediate uses, but none of them have
- a change in their value ranges, return
- SSA_PROP_NOT_INTERESTING. If there are no
- {REAL,IMAG}PART_EXPR uses at all,
- return SSA_PROP_VARYING. */
- value_range_equiv new_vr;
- m_vr_values->extract_range_basic (&new_vr, use_stmt);
- const value_range_equiv *old_vr
- = m_vr_values->get_value_range (use_lhs);
- if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
- res = SSA_PROP_INTERESTING;
- else
- res = SSA_PROP_NOT_INTERESTING;
- new_vr.equiv_clear ();
- if (res == SSA_PROP_INTERESTING)
- {
- *output_p = lhs;
- return res;
- }
- }
-
- return res;
- }
- break;
- default:
- break;
- }
-
- /* All other statements produce nothing of interest for VRP, so mark
- their outputs varying and prevent further simulation. */
- m_vr_values->set_defs_to_varying (stmt);
-
- return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
-}
-
-/* Visit all arguments for PHI node PHI that flow through executable
- edges. If a valid value range can be derived from all the incoming
- value ranges, set a new range for the LHS of PHI. */
-
-enum ssa_prop_result
-vrp_prop::visit_phi (gphi *phi)
-{
- tree lhs = PHI_RESULT (phi);
- value_range_equiv vr_result;
- m_vr_values->extract_range_from_phi_node (phi, &vr_result);
- if (m_vr_values->update_value_range (lhs, &vr_result))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Found new range for ");
- print_generic_expr (dump_file, lhs);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr_result);
- fprintf (dump_file, "\n");
- }
-
- if (vr_result.varying_p ())
- return SSA_PROP_VARYING;
-
- return SSA_PROP_INTERESTING;
- }
-
- /* Nothing changed, don't add outgoing edges. */
- return SSA_PROP_NOT_INTERESTING;
-}
-
-/* Traverse all the blocks folding conditionals with known ranges. */
-
-void
-vrp_prop::finalize ()
-{
- size_t i;
-
- /* We have completed propagating through the lattice. */
- m_vr_values->set_lattice_propagation_complete ();
-
- if (dump_file)
- {
- fprintf (dump_file, "\nValue ranges after VRP:\n\n");
- m_vr_values->dump (dump_file);
- fprintf (dump_file, "\n");
- }
-
- /* Set value range to non pointer SSA_NAMEs. */
- for (i = 0; i < num_ssa_names; i++)
- {
- tree name = ssa_name (i);
- if (!name)
- continue;
-
- const value_range_equiv *vr = m_vr_values->get_value_range (name);
- if (!name || vr->varying_p () || !vr->constant_p ())
- continue;
-
- if (POINTER_TYPE_P (TREE_TYPE (name))
- && range_includes_zero_p (vr) == 0)
- set_ptr_nonnull (name);
- else if (!POINTER_TYPE_P (TREE_TYPE (name)))
- set_range_info (name, *vr);
- }
-}
+/* Given a SWITCH_STMT, return the case label that encompasses the
+ known possible values for the switch operand. RANGE_OF_OP is a
+ range for the known values of the switch operand. */
-class vrp_folder : public substitute_and_fold_engine
+tree
+find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
{
- public:
- vrp_folder (vr_values *v)
- : substitute_and_fold_engine (/* Fold all stmts. */ true),
- m_vr_values (v), simplifier (v)
- { }
- void simplify_casted_conds (function *fun);
+ if (range_of_op->undefined_p ()
+ || range_of_op->varying_p ())
+ return NULL_TREE;
-private:
- tree value_of_expr (tree name, gimple *stmt) override
+ size_t i, j;
+ tree op = gimple_switch_index (switch_stmt);
+ tree type = TREE_TYPE (op);
+ tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
+ tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
+ find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
+ if (i == j)
{
- return m_vr_values->value_of_expr (name, stmt);
+ /* Look for exactly one label that encompasses the range of
+ the operand. */
+ tree label = gimple_switch_label (switch_stmt, i);
+ tree case_high
+ = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
+ int_range_max label_range (CASE_LOW (label), case_high);
+ if (!types_compatible_p (label_range.type (), range_of_op->type ()))
+ range_cast (label_range, range_of_op->type ());
+ label_range.intersect (*range_of_op);
+ if (label_range == *range_of_op)
+ return label;
}
- bool fold_stmt (gimple_stmt_iterator *) final override;
- bool fold_predicate_in (gimple_stmt_iterator *);
-
- vr_values *m_vr_values;
- simplify_using_ranges simplifier;
-};
-
-/* If the statement pointed by SI has a predicate whose value can be
- computed using the value range information computed by VRP, compute
- its value and return true. Otherwise, return false. */
-
-bool
-vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
-{
- bool assignment_p = false;
- tree val;
- gimple *stmt = gsi_stmt (*si);
-
- if (is_gimple_assign (stmt)
- && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
+ else if (i > j)
{
- assignment_p = true;
- val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- stmt);
+ /* If there are no labels at all, take the default. */
+ return gimple_switch_label (switch_stmt, 0);
}
- else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
- val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
- gimple_cond_lhs (cond_stmt),
- gimple_cond_rhs (cond_stmt),
- stmt);
else
- return false;
-
- if (val)
{
- if (assignment_p)
- val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
-
- if (dump_file)
- {
- fprintf (dump_file, "Folding predicate ");
- print_gimple_expr (dump_file, stmt, 0);
- fprintf (dump_file, " to ");
- print_generic_expr (dump_file, val);
- fprintf (dump_file, "\n");
- }
-
- if (is_gimple_assign (stmt))
- gimple_assign_set_rhs_from_tree (si, val);
- else
- {
- gcc_assert (gimple_code (stmt) == GIMPLE_COND);
- gcond *cond_stmt = as_a <gcond *> (stmt);
- if (integer_zerop (val))
- gimple_cond_make_false (cond_stmt);
- else if (integer_onep (val))
- gimple_cond_make_true (cond_stmt);
- else
- gcc_unreachable ();
- }
-
- return true;
+ /* Otherwise, there are various labels that can encompass
+ the range of operand. In which case, see if the range of
+ the operand is entirely *outside* the bounds of all the
+ (non-default) case labels. If so, take the default. */
+ unsigned n = gimple_switch_num_labels (switch_stmt);
+ tree min_label = gimple_switch_label (switch_stmt, 1);
+ tree max_label = gimple_switch_label (switch_stmt, n - 1);
+ tree case_high = CASE_HIGH (max_label);
+ if (!case_high)
+ case_high = CASE_LOW (max_label);
+ int_range_max label_range (CASE_LOW (min_label), case_high);
+ if (!types_compatible_p (label_range.type (), range_of_op->type ()))
+ range_cast (label_range, range_of_op->type ());
+ label_range.intersect (*range_of_op);
+ if (label_range.undefined_p ())
+ return gimple_switch_label (switch_stmt, 0);
}
-
- return false;
-}
-
-/* Callback for substitute_and_fold folding the stmt at *SI. */
-
-bool
-vrp_folder::fold_stmt (gimple_stmt_iterator *si)
-{
- if (fold_predicate_in (si))
- return true;
-
- return simplifier.simplify (si);
+ return NULL_TREE;
}
-/* A comparison of an SSA_NAME against a constant where the SSA_NAME
- was set by a type conversion can often be rewritten to use the RHS
- of the type conversion. Do this optimization for all conditionals
- in FUN. */
-
-void
-vrp_folder::simplify_casted_conds (function *fun)
+struct case_info
{
+ tree expr;
basic_block bb;
- FOR_EACH_BB_FN (bb, fun)
- {
- gimple *last = last_stmt (bb);
- if (last && gimple_code (last) == GIMPLE_COND)
- {
- if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Folded into: ");
- print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
- fprintf (dump_file, "\n");
- }
- }
- }
- }
-}
-
-/* Main entry point to VRP (Value Range Propagation). This pass is
- loosely based on J. R. C. Patterson, ``Accurate Static Branch
- Prediction by Value Range Propagation,'' in SIGPLAN Conference on
- Programming Language Design and Implementation, pp. 67-78, 1995.
- Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
-
- This is essentially an SSA-CCP pass modified to deal with ranges
- instead of constants.
-
- While propagating ranges, we may find that two or more SSA name
- have equivalent, though distinct ranges. For instance,
-
- 1 x_9 = p_3->a;
- 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
- 3 if (p_4 == q_2)
- 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
- 5 endif
- 6 if (q_2)
-
- In the code above, pointer p_5 has range [q_2, q_2], but from the
- code we can also determine that p_5 cannot be NULL and, if q_2 had
- a non-varying range, p_5's range should also be compatible with it.
-
- These equivalences are created by two expressions: ASSERT_EXPR and
- copy operations. Since p_5 is an assertion on p_4, and p_4 was the
- result of another assertion, then we can use the fact that p_5 and
- p_4 are equivalent when evaluating p_5's range.
-
- Together with value ranges, we also propagate these equivalences
- between names so that we can take advantage of information from
- multiple ranges when doing final replacement. Note that this
- equivalency relation is transitive but not symmetric.
-
- In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
- cannot assert that q_2 is equivalent to p_5 because q_2 may be used
- in contexts where that assertion does not hold (e.g., in line 6).
-
- TODO, the main difference between this pass and Patterson's is that
- we do not propagate edge probabilities. We only compute whether
- edges can be taken or not. That is, instead of having a spectrum
- of jump probabilities between 0 and 1, we only deal with 0, 1 and
- DON'T KNOW. In the future, it may be worthwhile to propagate
- probabilities to aid branch prediction. */
-
-static unsigned int
-execute_vrp (struct function *fun, bool warn_array_bounds_p)
-{
- loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
- rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
- scev_initialize ();
-
- /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
- Inserting assertions may split edges which will invalidate
- EDGE_DFS_BACK. */
- vrp_asserts assert_engine (fun);
- assert_engine.insert_range_assertions ();
-
- /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
- mark_dfs_back_edges ();
-
- vr_values vrp_vr_values;
-
- class vrp_prop vrp_prop (&vrp_vr_values);
- vrp_prop.initialize (fun);
- vrp_prop.ssa_propagate ();
-
- /* Instantiate the folder here, so that edge cleanups happen at the
- end of this function. */
- vrp_folder folder (&vrp_vr_values);
- vrp_prop.finalize ();
-
- /* If we're checking array refs, we want to merge information on
- the executability of each edge between vrp_folder and the
- check_array_bounds_dom_walker: each can clear the
- EDGE_EXECUTABLE flag on edges, in different ways.
-
- Hence, if we're going to call check_all_array_refs, set
- the flag on every edge now, rather than in
- check_array_bounds_dom_walker's ctor; vrp_folder may clear
- it from some edges. */
- if (warn_array_bounds && warn_array_bounds_p)
- set_all_edges_as_executable (fun);
-
- folder.substitute_and_fold ();
-
- if (warn_array_bounds && warn_array_bounds_p)
- {
- array_bounds_checker array_checker (fun, &vrp_vr_values);
- array_checker.check ();
- }
-
- folder.simplify_casted_conds (fun);
-
- free_numbers_of_iterations_estimates (fun);
-
- assert_engine.remove_range_assertions ();
-
- scev_finalize ();
- loop_optimizer_finalize ();
- return 0;
-}
+};
// This is a ranger based folder which continues to use the dominator
// walk to access the substitute and fold machinery. Ranges are calculated
@@ -4621,10 +1198,7 @@ public:
if (my_pass == 0)
return execute_ranger_vrp (fun, /*warn_array_bounds_p=*/false, false);
- if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
- || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
- return execute_ranger_vrp (fun, warn_array_bounds_p, my_pass == 2);
- return execute_vrp (fun, warn_array_bounds_p);
+ return execute_ranger_vrp (fun, warn_array_bounds_p, my_pass == 2);
}
private:
@@ -22,26 +22,6 @@ along with GCC; see the file COPYING3. If not see
#include "value-range.h"
-struct assert_info
-{
- /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
- enum tree_code comp_code;
-
- /* Name to register the assert for. */
- tree name;
-
- /* Value being compared against. */
- tree val;
-
- /* Expression to compare. */
- tree expr;
-};
-
-extern void register_edge_assert_for (tree, edge, enum tree_code,
- tree, tree, vec<assert_info> &);
-extern bool stmt_interesting_for_vrp (gimple *);
-extern bool infer_value_range (gimple *, tree, tree_code *, tree *);
-
extern bool range_int_cst_p (const value_range *);
extern int compare_values (tree, tree);
@@ -60,11 +40,6 @@ extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
extern tree find_case_label_range (gswitch *, const irange *vr);
extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *);
-extern tree get_single_symbol (tree, bool *, tree *);
extern void maybe_set_nonzero_bits (edge, tree);
-extern wide_int masked_increment (const wide_int &val_in, const wide_int &mask,
- const wide_int &sgnbit, unsigned int prec);
-extern unsigned int execute_ranger_vrp (struct function *fun,
- bool warn_array_bounds_p = false);
#endif /* GCC_TREE_VRP_H */
@@ -1197,12 +1197,6 @@ wide_int
irange::legacy_lower_bound (unsigned pair) const
{
gcc_checking_assert (legacy_mode_p ());
- if (symbolic_p ())
- {
- value_range numeric_range (*this);
- numeric_range.normalize_symbolics ();
- return numeric_range.legacy_lower_bound (pair);
- }
gcc_checking_assert (m_num_ranges > 0);
gcc_checking_assert (pair + 1 <= num_pairs ());
if (m_kind == VR_ANTI_RANGE)
@@ -1224,12 +1218,6 @@ wide_int
irange::legacy_upper_bound (unsigned pair) const
{
gcc_checking_assert (legacy_mode_p ());
- if (symbolic_p ())
- {
- value_range numeric_range (*this);
- numeric_range.normalize_symbolics ();
- return numeric_range.legacy_upper_bound (pair);
- }
gcc_checking_assert (m_num_ranges > 0);
gcc_checking_assert (pair + 1 <= num_pairs ());
if (m_kind == VR_ANTI_RANGE)
@@ -1297,16 +1285,6 @@ irange::operator== (const irange &other) const
return get_nonzero_bits () == other.get_nonzero_bits ();
}
-/* Return TRUE if this is a symbolic range. */
-
-bool
-irange::symbolic_p () const
-{
- return (m_num_ranges > 0
- && (!is_gimple_min_invariant (min ())
- || !is_gimple_min_invariant (max ())));
-}
-
/* Return TRUE if this is a constant range. */
bool
@@ -1426,12 +1404,6 @@ irange::contains_p (tree cst) const
if (legacy_mode_p ())
{
gcc_checking_assert (TREE_CODE (cst) == INTEGER_CST);
- if (symbolic_p ())
- {
- value_range numeric_range (*this);
- numeric_range.normalize_symbolics ();
- return numeric_range.contains_p (cst);
- }
return value_inside_range (cst) == 1;
}
@@ -163,7 +163,6 @@ public:
// Deprecated legacy public methods.
tree min () const; // DEPRECATED
tree max () const; // DEPRECATED
- bool symbolic_p () const; // DEPRECATED
bool constant_p () const; // DEPRECATED
void normalize_symbolics (); // DEPRECATED
void normalize_addresses (); // DEPRECATED
@@ -52,415 +52,6 @@ along with GCC; see the file COPYING3. If not see
#include "range-op.h"
#include "gimple-range.h"
-/* Set value range VR to a non-negative range of type TYPE. */
-
-static inline void
-set_value_range_to_nonnegative (value_range_equiv *vr, tree type)
-{
- tree zero = build_int_cst (type, 0);
- vr->update (zero, vrp_val_max (type));
-}
-
-/* Set value range VR to a range of a truthvalue of type TYPE. */
-
-static inline void
-set_value_range_to_truthvalue (value_range_equiv *vr, tree type)
-{
- if (TYPE_PRECISION (type) == 1)
- vr->set_varying (type);
- else
- vr->update (build_int_cst (type, 0), build_int_cst (type, 1));
-}
-
-/* Return the lattice entry for VAR or NULL if it doesn't exist or cannot
- be initialized. */
-
-value_range_equiv *
-vr_values::get_lattice_entry (const_tree var)
-{
- value_range_equiv *vr;
- tree sym;
- unsigned ver = SSA_NAME_VERSION (var);
-
- /* If we query the entry for a new SSA name avoid reallocating the lattice
- since we should get here at most from the substitute-and-fold stage which
- will never try to change values. */
- if (ver >= num_vr_values)
- return NULL;
-
- vr = vr_value[ver];
- if (vr)
- return vr;
-
- /* Create a default value range. */
- vr = allocate_value_range_equiv ();
- vr_value[ver] = vr;
-
- /* After propagation finished return varying. */
- if (values_propagated)
- {
- vr->set_varying (TREE_TYPE (var));
- return vr;
- }
-
- vr->set_undefined ();
-
- /* If VAR is a default definition of a parameter, the variable can
- take any value in VAR's type. */
- if (SSA_NAME_IS_DEFAULT_DEF (var))
- {
- sym = SSA_NAME_VAR (var);
- if (TREE_CODE (sym) == PARM_DECL)
- {
- /* Try to use the "nonnull" attribute to create ~[0, 0]
- anti-ranges for pointers. Note that this is only valid with
- default definitions of PARM_DECLs. */
- if (POINTER_TYPE_P (TREE_TYPE (sym))
- && (nonnull_arg_p (sym)
- || (get_global_range_query ()->range_of_expr (*vr,
- const_cast <tree> (var))
- && vr->nonzero_p ())))
- {
- vr->set_nonzero (TREE_TYPE (sym));
- vr->equiv_clear ();
- }
- else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
- {
- get_global_range_query ()->range_of_expr (*vr, const_cast <tree> (var));
- if (vr->undefined_p ())
- vr->set_varying (TREE_TYPE (sym));
- }
- else
- vr->set_varying (TREE_TYPE (sym));
- }
- else if (TREE_CODE (sym) == RESULT_DECL
- && DECL_BY_REFERENCE (sym))
- {
- vr->set_nonzero (TREE_TYPE (sym));
- vr->equiv_clear ();
- }
- }
-
- return vr;
-}
-
-/* Return value range information for VAR.
-
- If we have no values ranges recorded (ie, VRP is not running), then
- return NULL. Otherwise create an empty range if none existed for VAR. */
-
-const value_range_equiv *
-vr_values::get_value_range (const_tree var,
- gimple *stmt ATTRIBUTE_UNUSED)
-{
- /* If we have no recorded ranges, then return NULL. */
- if (!vr_value)
- return NULL;
-
- value_range_equiv *vr = get_lattice_entry (var);
-
- /* Reallocate the lattice if needed. */
- if (!vr)
- {
- unsigned int old_sz = num_vr_values;
- num_vr_values = num_ssa_names + num_ssa_names / 10;
- vr_value = XRESIZEVEC (value_range_equiv *, vr_value, num_vr_values);
- for ( ; old_sz < num_vr_values; old_sz++)
- vr_value [old_sz] = NULL;
-
- /* Now that the lattice has been resized, we should never fail. */
- vr = get_lattice_entry (var);
- gcc_assert (vr);
- }
-
- return vr;
-}
-
-bool
-vr_values::range_of_expr (vrange &r, tree expr, gimple *stmt)
-{
- if (!gimple_range_ssa_p (expr))
- return get_tree_range (r, expr, stmt);
-
- if (const value_range *vr = get_value_range (expr, stmt))
- {
- if (!vr->supports_type_p (TREE_TYPE (expr)))
- {
- // vr_values::extract_range_basic() use of ranger's
- // fold_range() can create a situation where we are asked
- // for the range of an unsupported legacy type. Since
- // get_value_range() above will return varying or undefined
- // for such types, avoid copying incompatible range types.
- if (vr->undefined_p ())
- r.set_undefined ();
- else
- r.set_varying (TREE_TYPE (expr));
- return true;
- }
- if (vr->undefined_p () || vr->constant_p ())
- r = *vr;
- else
- {
- value_range tmp = *vr;
- tmp.normalize_symbolics ();
- r = tmp;
- }
- return true;
- }
- return false;
-}
-
-tree
-vr_values::value_of_expr (tree op, gimple *)
-{
- return op_with_constant_singleton_value_range (op);
-}
-
-tree
-vr_values::value_on_edge (edge, tree op)
-{
- return op_with_constant_singleton_value_range (op);
-}
-
-tree
-vr_values::value_of_stmt (gimple *stmt, tree op)
-{
- if (!op)
- op = gimple_get_lhs (stmt);
-
- gcc_checking_assert (!op|| op == gimple_get_lhs (stmt));
-
- if (op)
- return op_with_constant_singleton_value_range (op);
- return NULL_TREE;
-}
-
-/* Set the lattice entry for DEF to VARYING. */
-
-void
-vr_values::set_def_to_varying (const_tree def)
-{
- value_range_equiv *vr = get_lattice_entry (def);
- if (vr)
- vr->set_varying (TREE_TYPE (def));
-}
-
-/* Set value-ranges of all SSA names defined by STMT to varying. */
-
-void
-vr_values::set_defs_to_varying (gimple *stmt)
-{
- ssa_op_iter i;
- tree def;
- FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
- set_def_to_varying (def);
-}
-
-/* Update the value range and equivalence set for variable VAR to
- NEW_VR. Return true if NEW_VR is different from VAR's previous
- value.
-
- NOTE: This function assumes that NEW_VR is a temporary value range
- object created for the sole purpose of updating VAR's range. The
- storage used by the equivalence set from NEW_VR will be freed by
- this function. Do not call update_value_range when NEW_VR
- is the range object associated with another SSA name. */
-
-bool
-vr_values::update_value_range (const_tree var, value_range_equiv *new_vr)
-{
- value_range_equiv *old_vr;
- bool is_new;
-
- /* If there is a value-range on the SSA name from earlier analysis
- factor that in. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
- {
- value_range_equiv nr;
- get_global_range_query ()->range_of_expr (nr, const_cast <tree> (var));
- if (!nr.undefined_p ())
- new_vr->legacy_verbose_intersect (&nr);
- }
-
- /* Update the value range, if necessary. If we cannot allocate a lattice
- entry for VAR keep it at VARYING. This happens when DOM feeds us stmts
- with SSA names allocated after setting up the lattice. */
- old_vr = get_lattice_entry (var);
- if (!old_vr)
- return false;
- is_new = !old_vr->equal_p (*new_vr, /*ignore_equivs=*/false);
-
- if (is_new)
- {
- /* Do not allow transitions up the lattice. The following
- is slightly more awkward than just new_vr->type < old_vr->type
- because VR_RANGE and VR_ANTI_RANGE need to be considered
- the same. We may not have is_new when transitioning to
- UNDEFINED. If old_vr->type is VARYING, we shouldn't be
- called, if we are anyway, keep it VARYING. */
- if (old_vr->varying_p ())
- {
- new_vr->set_varying (TREE_TYPE (var));
- is_new = false;
- }
- else if (new_vr->undefined_p ())
- {
- old_vr->set_varying (TREE_TYPE (var));
- new_vr->set_varying (TREE_TYPE (var));
- return true;
- }
- else
- old_vr->set (new_vr->min (), new_vr->max (), new_vr->equiv (),
- new_vr->kind ());
- }
-
- new_vr->equiv_clear ();
-
- return is_new;
-}
-
-/* Return true if value range VR involves exactly one symbol SYM. */
-
-static bool
-symbolic_range_based_on_p (value_range *vr, const_tree sym)
-{
- bool neg, min_has_symbol, max_has_symbol;
- tree inv;
-
- if (is_gimple_min_invariant (vr->min ()))
- min_has_symbol = false;
- else if (get_single_symbol (vr->min (), &neg, &inv) == sym)
- min_has_symbol = true;
- else
- return false;
-
- if (is_gimple_min_invariant (vr->max ()))
- max_has_symbol = false;
- else if (get_single_symbol (vr->max (), &neg, &inv) == sym)
- max_has_symbol = true;
- else
- return false;
-
- return (min_has_symbol || max_has_symbol);
-}
-
-/* Return true if the result of assignment STMT is know to be non-zero. */
-
-static bool
-gimple_assign_nonzero_p (gimple *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
- bool strict_overflow_p;
- tree type = TREE_TYPE (gimple_assign_lhs (stmt));
- switch (get_gimple_rhs_class (code))
- {
- case GIMPLE_UNARY_RHS:
- return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- type,
- gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_BINARY_RHS:
- return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- type,
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- &strict_overflow_p);
- case GIMPLE_TERNARY_RHS:
- return false;
- case GIMPLE_SINGLE_RHS:
- return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_INVALID_RHS:
- gcc_unreachable ();
- default:
- gcc_unreachable ();
- }
-}
-
-/* Return true if STMT is known to compute a non-zero value. */
-
-static bool
-gimple_stmt_nonzero_p (gimple *stmt)
-{
- switch (gimple_code (stmt))
- {
- case GIMPLE_ASSIGN:
- return gimple_assign_nonzero_p (stmt);
- case GIMPLE_CALL:
- {
- gcall *call_stmt = as_a<gcall *> (stmt);
- return (gimple_call_nonnull_result_p (call_stmt)
- || gimple_call_nonnull_arg (call_stmt));
- }
- default:
- gcc_unreachable ();
- }
-}
-/* Like tree_expr_nonzero_p, but this function uses value ranges
- obtained so far. */
-
-bool
-vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
-{
- if (gimple_stmt_nonzero_p (stmt))
- return true;
-
- /* If we have an expression of the form &X->a, then the expression
- is nonnull if X is nonnull. */
- if (is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
- {
- tree expr = gimple_assign_rhs1 (stmt);
- poly_int64 bitsize, bitpos;
- tree offset;
- machine_mode mode;
- int unsignedp, reversep, volatilep;
- tree base = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize,
- &bitpos, &offset, &mode, &unsignedp,
- &reversep, &volatilep);
-
- if (base != NULL_TREE
- && TREE_CODE (base) == MEM_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
- {
- poly_offset_int off = 0;
- bool off_cst = false;
- if (offset == NULL_TREE || TREE_CODE (offset) == INTEGER_CST)
- {
- off = mem_ref_offset (base);
- if (offset)
- off += poly_offset_int::from (wi::to_poly_wide (offset),
- SIGNED);
- off <<= LOG2_BITS_PER_UNIT;
- off += bitpos;
- off_cst = true;
- }
- /* If &X->a is equal to X and X is ~[0, 0], the result is too.
- For -fdelete-null-pointer-checks -fno-wrapv-pointer we don't
- allow going from non-NULL pointer to NULL. */
- if ((off_cst && known_eq (off, 0))
- || (flag_delete_null_pointer_checks
- && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (expr))))
- {
- const value_range_equiv *vr
- = get_value_range (TREE_OPERAND (base, 0), stmt);
- if (!range_includes_zero_p (vr))
- return true;
- }
- /* If MEM_REF has a "positive" offset, consider it non-NULL
- always, for -fdelete-null-pointer-checks also "negative"
- ones. Punt for unknown offsets (e.g. variable ones). */
- if (!TYPE_OVERFLOW_WRAPS (TREE_TYPE (expr))
- && off_cst
- && known_ne (off, 0)
- && (flag_delete_null_pointer_checks || known_gt (off, 0)))
- return true;
- }
- }
-
- return false;
-}
-
/* Returns true if EXPR is a valid value (as expected by compare_values) --
a gimple invariant, or SSA_NAME +- CST. */
@@ -478,25 +69,6 @@ valid_value_p (tree expr)
return is_gimple_min_invariant (expr);
}
-/* If OP has a value range with a single constant value return that,
- otherwise return NULL_TREE. This returns OP itself if OP is a
- constant. */
-
-tree
-vr_values::op_with_constant_singleton_value_range (tree op)
-{
- if (is_gimple_min_invariant (op))
- return op;
-
- if (TREE_CODE (op) != SSA_NAME)
- return NULL_TREE;
-
- tree t;
- if (get_value_range (op)->singleton_p (&t))
- return t;
- return NULL;
-}
-
/* Return true if op is in a boolean [0, 1] value-range. */
bool
@@ -519,549 +91,6 @@ simplify_using_ranges::op_with_boolean_value_range_p (tree op, gimple *s)
build_one_cst (TREE_TYPE (op)));
}
-/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
- true and store it in *VR_P. */
-
-void
-vr_values::extract_range_for_var_from_comparison_expr (tree var,
- enum tree_code cond_code,
- tree op, tree limit,
- value_range_equiv *vr_p)
-{
- tree min, max, type;
- const value_range_equiv *limit_vr;
- type = TREE_TYPE (var);
-
- /* For pointer arithmetic, we only keep track of pointer equality
- and inequality. If we arrive here with unfolded conditions like
- _1 > _1 do not derive anything. */
- if ((POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
- || limit == var)
- {
- vr_p->set_varying (type);
- return;
- }
-
- /* If LIMIT is another SSA name and LIMIT has a range of its own,
- try to use LIMIT's range to avoid creating symbolic ranges
- unnecessarily. */
- limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
-
- /* LIMIT's range is only interesting if it has any useful information. */
- if (! limit_vr
- || limit_vr->undefined_p ()
- || limit_vr->varying_p ()
- || (limit_vr->symbolic_p ()
- && ! (limit_vr->kind () == VR_RANGE
- && (limit_vr->min () == limit_vr->max ()
- || operand_equal_p (limit_vr->min (),
- limit_vr->max (), 0)))))
- limit_vr = NULL;
-
- /* Initially, the new range has the same set of equivalences of
- VAR's range. This will be revised before returning the final
- value. Since assertions may be chained via mutually exclusive
- predicates, we will need to trim the set of equivalences before
- we are done. */
- gcc_assert (vr_p->equiv () == NULL);
- vr_p->equiv_add (var, get_value_range (var), &vrp_equiv_obstack);
-
- /* Extract a new range based on the asserted comparison for VAR and
- LIMIT's value range. Notice that if LIMIT has an anti-range, we
- will only use it for equality comparisons (EQ_EXPR). For any
- other kind of assertion, we cannot derive a range from LIMIT's
- anti-range that can be used to describe the new range. For
- instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
- then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
- no single range for x_2 that could describe LE_EXPR, so we might
- as well build the range [b_4, +INF] for it.
- One special case we handle is extracting a range from a
- range test encoded as (unsigned)var + CST <= limit. */
- if (TREE_CODE (op) == NOP_EXPR
- || TREE_CODE (op) == PLUS_EXPR)
- {
- if (TREE_CODE (op) == PLUS_EXPR)
- {
- min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
- TREE_OPERAND (op, 1));
- max = int_const_binop (PLUS_EXPR, limit, min);
- op = TREE_OPERAND (op, 0);
- }
- else
- {
- min = build_int_cst (TREE_TYPE (var), 0);
- max = limit;
- }
-
- /* Make sure to not set TREE_OVERFLOW on the final type
- conversion. We are willingly interpreting large positive
- unsigned values as negative signed values here. */
- min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
- max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
-
- /* We can transform a max, min range to an anti-range or
- vice-versa. Use set_and_canonicalize which does this for
- us. */
- if (cond_code == LE_EXPR)
- vr_p->set (min, max, vr_p->equiv ());
- else if (cond_code == GT_EXPR)
- vr_p->set (min, max, vr_p->equiv (), VR_ANTI_RANGE);
- else
- gcc_unreachable ();
- }
- else if (cond_code == EQ_EXPR)
- {
- enum value_range_kind range_kind;
-
- if (limit_vr)
- {
- range_kind = limit_vr->kind ();
- min = limit_vr->min ();
- max = limit_vr->max ();
- }
- else
- {
- range_kind = VR_RANGE;
- min = limit;
- max = limit;
- }
-
- vr_p->update (min, max, range_kind);
-
- /* When asserting the equality VAR == LIMIT and LIMIT is another
- SSA name, the new range will also inherit the equivalence set
- from LIMIT. */
- if (TREE_CODE (limit) == SSA_NAME)
- vr_p->equiv_add (limit, get_value_range (limit), &vrp_equiv_obstack);
- }
- else if (cond_code == NE_EXPR)
- {
- /* As described above, when LIMIT's range is an anti-range and
- this assertion is an inequality (NE_EXPR), then we cannot
- derive anything from the anti-range. For instance, if
- LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
- not imply that VAR's range is [0, 0]. So, in the case of
- anti-ranges, we just assert the inequality using LIMIT and
- not its anti-range.
-
- If LIMIT_VR is a range, we can only use it to build a new
- anti-range if LIMIT_VR is a single-valued range. For
- instance, if LIMIT_VR is [0, 1], the predicate
- VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
- Rather, it means that for value 0 VAR should be ~[0, 0]
- and for value 1, VAR should be ~[1, 1]. We cannot
- represent these ranges.
-
- The only situation in which we can build a valid
- anti-range is when LIMIT_VR is a single-valued range
- (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
- build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
- if (limit_vr
- && limit_vr->kind () == VR_RANGE
- && compare_values (limit_vr->min (), limit_vr->max ()) == 0)
- {
- min = limit_vr->min ();
- max = limit_vr->max ();
- }
- else
- {
- /* In any other case, we cannot use LIMIT's range to build a
- valid anti-range. */
- min = max = limit;
- }
-
- /* If MIN and MAX cover the whole range for their type, then
- just use the original LIMIT. */
- if (INTEGRAL_TYPE_P (type)
- && vrp_val_is_min (min)
- && vrp_val_is_max (max))
- min = max = limit;
-
- vr_p->set (min, max, vr_p->equiv (), VR_ANTI_RANGE);
- }
- else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
- {
- min = TYPE_MIN_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->kind () == VR_ANTI_RANGE)
- max = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
- LT_EXPR. */
- max = limit_vr->max ();
- }
-
- /* If the maximum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == LT_EXPR
- && compare_values (max, min) == 0)
- vr_p->set_varying (TREE_TYPE (min));
- else
- {
- /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
- if (cond_code == LT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (max)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (max)))
- max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), -1));
- else
- max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (max))
- suppress_warning (max, OPT_Woverflow);
- }
-
- vr_p->update (min, max);
- }
- }
- else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
- {
- max = TYPE_MAX_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->kind () == VR_ANTI_RANGE)
- min = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
- GT_EXPR. */
- min = limit_vr->min ();
- }
-
- /* If the minimum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == GT_EXPR
- && compare_values (min, max) == 0)
- vr_p->set_varying (TREE_TYPE (min));
- else
- {
- /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
- if (cond_code == GT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (min)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (min)))
- min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), -1));
- else
- min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (min))
- suppress_warning (min, OPT_Woverflow);
- }
-
- vr_p->update (min, max);
- }
- }
- else
- gcc_unreachable ();
-
- /* Finally intersect the new range with what we already know about var. */
- vr_p->legacy_verbose_intersect (get_value_range (var));
-}
-
-/* Extract value range information from an ASSERT_EXPR EXPR and store
- it in *VR_P. */
-
-void
-vr_values::extract_range_from_assert (value_range_equiv *vr_p, tree expr)
-{
- tree var = ASSERT_EXPR_VAR (expr);
- tree cond = ASSERT_EXPR_COND (expr);
- tree limit, op;
- enum tree_code cond_code;
- gcc_assert (COMPARISON_CLASS_P (cond));
-
- /* Find VAR in the ASSERT_EXPR conditional. */
- if (var == TREE_OPERAND (cond, 0)
- || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
- || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
- {
- /* If the predicate is of the form VAR COMP LIMIT, then we just
- take LIMIT from the RHS and use the same comparison code. */
- cond_code = TREE_CODE (cond);
- limit = TREE_OPERAND (cond, 1);
- op = TREE_OPERAND (cond, 0);
- }
- else
- {
- /* If the predicate is of the form LIMIT COMP VAR, then we need
- to flip around the comparison code to create the proper range
- for VAR. */
- cond_code = swap_tree_comparison (TREE_CODE (cond));
- limit = TREE_OPERAND (cond, 0);
- op = TREE_OPERAND (cond, 1);
- }
- extract_range_for_var_from_comparison_expr (var, cond_code, op,
- limit, vr_p);
-}
-
-/* Extract range information from SSA name VAR and store it in VR. If
- VAR has an interesting range, use it. Otherwise, create the
- range [VAR, VAR] and return it. This is useful in situations where
- we may have conditionals testing values of VARYING names. For
- instance,
-
- x_3 = y_5;
- if (x_3 > y_5)
- ...
-
- Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
- always false. */
-
-void
-vr_values::extract_range_from_ssa_name (value_range_equiv *vr, tree var)
-{
- const value_range_equiv *var_vr = get_value_range (var);
-
- if (!var_vr->varying_p ())
- vr->deep_copy (var_vr);
- else
- vr->set (var);
-
- if (!vr->undefined_p ())
- vr->equiv_add (var, get_value_range (var), &vrp_equiv_obstack);
-}
-
-/* Extract range information from a binary expression OP0 CODE OP1 based on
- the ranges of each of its operands with resulting type EXPR_TYPE.
- The resulting range is stored in *VR. */
-
-void
-vr_values::extract_range_from_binary_expr (value_range_equiv *vr,
- enum tree_code code,
- tree expr_type, tree op0, tree op1)
-{
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- value_range vr0, vr1;
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- vr0.set (op0, op0);
- else
- vr0.set_varying (TREE_TYPE (op0));
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- vr1.set (op1, op1);
- else
- vr1.set_varying (TREE_TYPE (op1));
-
- /* If one argument is varying, we can sometimes still deduce a
- range for the output: any + [3, +INF] is in [MIN+3, +INF]. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
- {
- if (vr0.varying_p () && !vr1.varying_p ())
- vr0 = value_range (vrp_val_min (expr_type), vrp_val_max (expr_type));
- else if (vr1.varying_p () && !vr0.varying_p ())
- vr1 = value_range (vrp_val_min (expr_type), vrp_val_max (expr_type));
- }
-
- range_fold_binary_expr (vr, code, expr_type, &vr0, &vr1);
-
- /* Set value_range for n in following sequence:
- def = __builtin_memchr (arg, 0, sz)
- n = def - arg
- Here the range for n can be set to [0, PTRDIFF_MAX - 1]. */
-
- if (vr->varying_p ()
- && code == POINTER_DIFF_EXPR
- && TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == SSA_NAME)
- {
- tree op0_ptype = TREE_TYPE (TREE_TYPE (op0));
- tree op1_ptype = TREE_TYPE (TREE_TYPE (op1));
- gcall *call_stmt = NULL;
-
- if (TYPE_MODE (op0_ptype) == TYPE_MODE (char_type_node)
- && TYPE_PRECISION (op0_ptype) == TYPE_PRECISION (char_type_node)
- && TYPE_MODE (op1_ptype) == TYPE_MODE (char_type_node)
- && TYPE_PRECISION (op1_ptype) == TYPE_PRECISION (char_type_node)
- && (call_stmt = dyn_cast<gcall *>(SSA_NAME_DEF_STMT (op0)))
- && gimple_call_builtin_p (call_stmt, BUILT_IN_MEMCHR)
- && operand_equal_p (op0, gimple_call_lhs (call_stmt), 0)
- && operand_equal_p (op1, gimple_call_arg (call_stmt, 0), 0)
- && integer_zerop (gimple_call_arg (call_stmt, 1)))
- {
- tree max = vrp_val_max (ptrdiff_type_node);
- wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
- tree range_min = build_zero_cst (expr_type);
- tree range_max = wide_int_to_tree (expr_type, wmax - 1);
- vr->set (range_min, range_max, NULL);
- return;
- }
- }
-
- /* Try harder for PLUS and MINUS if the range of one operand is symbolic
- and based on the other operand, for example if it was deduced from a
- symbolic comparison. When a bound of the range of the first operand
- is invariant, we set the corresponding bound of the new range to INF
- in order to avoid recursing on the range of the second operand. */
- if (vr->varying_p ()
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op1) == SSA_NAME
- && vr0.kind () == VR_RANGE
- && symbolic_range_based_on_p (&vr0, op1))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr1;
-
- /* Try with VR0 and [-INF, OP1]. */
- if (is_gimple_min_invariant (minus_p ? vr0.max () : vr0.min ()))
- n_vr1.set (vrp_val_min (expr_type), op1);
-
- /* Try with VR0 and [OP1, +INF]. */
- else if (is_gimple_min_invariant (minus_p ? vr0.min () : vr0.max ()))
- n_vr1.set (op1, vrp_val_max (expr_type));
-
- /* Try with VR0 and [OP1, OP1]. */
- else
- n_vr1.set (op1, op1);
-
- range_fold_binary_expr (vr, code, expr_type, &vr0, &n_vr1);
- }
-
- if (vr->varying_p ()
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && vr1.kind () == VR_RANGE
- && symbolic_range_based_on_p (&vr1, op0))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr0;
-
- /* Try with [-INF, OP0] and VR1. */
- if (is_gimple_min_invariant (minus_p ? vr1.max () : vr1.min ()))
- n_vr0.set (vrp_val_min (expr_type), op0);
-
- /* Try with [OP0, +INF] and VR1. */
- else if (is_gimple_min_invariant (minus_p ? vr1.min (): vr1.max ()))
- n_vr0.set (op0, vrp_val_max (expr_type));
-
- /* Try with [OP0, OP0] and VR1. */
- else
- n_vr0.set (op0, op0);
-
- range_fold_binary_expr (vr, code, expr_type, &n_vr0, &vr1);
- }
-
- /* If we didn't derive a range for MINUS_EXPR, and
- op1's range is ~[op0,op0] or vice-versa, then we
- can derive a non-null range. This happens often for
- pointer subtraction. */
- if (vr->varying_p ()
- && (code == MINUS_EXPR || code == POINTER_DIFF_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && ((vr0.kind () == VR_ANTI_RANGE
- && vr0.min () == op1
- && vr0.min () == vr0.max ())
- || (vr1.kind () == VR_ANTI_RANGE
- && vr1.min () == op0
- && vr1.min () == vr1.max ())))
- {
- vr->set_nonzero (expr_type);
- vr->equiv_clear ();
- }
-}
-
-/* Extract range information from a unary expression CODE OP0 based on
- the range of its operand with resulting type TYPE.
- The resulting range is stored in *VR. */
-
-void
-vr_values::extract_range_from_unary_expr (value_range_equiv *vr,
- enum tree_code code,
- tree type, tree op0)
-{
- value_range vr0;
-
- /* Get value ranges for the operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- vr0.set (op0, op0);
- else
- vr0.set_varying (type);
-
- range_fold_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
-}
-
-
-/* Extract range information from a conditional expression STMT based on
- the ranges of each of its operands and the expression code. */
-
-void
-vr_values::extract_range_from_cond_expr (value_range_equiv *vr, gassign *stmt)
-{
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- tree op0 = gimple_assign_rhs2 (stmt);
- value_range_equiv tem0;
- const value_range_equiv *vr0 = &tem0;
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = get_value_range (op0);
- else if (is_gimple_min_invariant (op0))
- tem0.set (op0);
- else
- tem0.set_varying (TREE_TYPE (op0));
-
- tree op1 = gimple_assign_rhs3 (stmt);
- value_range_equiv tem1;
- const value_range_equiv *vr1 = &tem1;
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = get_value_range (op1);
- else if (is_gimple_min_invariant (op1))
- tem1.set (op1);
- else
- tem1.set_varying (TREE_TYPE (op1));
-
- /* The resulting value range is the union of the operand ranges */
- vr->deep_copy (vr0);
- vr->legacy_verbose_union_ (vr1);
-}
-
-
-/* Extract range information from a comparison expression EXPR based
- on the range of its operand and the expression code. */
-
-void
-vr_values::extract_range_from_comparison (value_range_equiv *vr,
- gimple *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
- tree type = TREE_TYPE (gimple_assign_lhs (stmt));
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- bool sop;
- tree val
- = simplifier.vrp_evaluate_conditional_warnv_with_ops (stmt, code, op0, op1,
- false, &sop, NULL);
- if (val)
- {
- /* Since this expression was found on the RHS of an assignment,
- its type may be different from _Bool. Convert VAL to EXPR's
- type. */
- val = fold_convert (type, val);
- if (is_gimple_min_invariant (val))
- vr->set (val);
- else
- vr->update (val, val);
- }
- else
- /* The result of a comparison is always true or false. */
- set_value_range_to_truthvalue (vr, type);
-}
-
/* Helper function for simplify_internal_call_using_ranges and
extract_range_basic. Return true if OP0 SUBCODE OP1 for
SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
@@ -1150,247 +179,25 @@ check_for_binary_op_overflow (range_query *query,
{
wmin = wt;
wmax = wt;
- }
- else
- {
- wmin = wi::smin (wmin, wt);
- wmax = wi::smax (wmax, wt);
- }
- }
- /* The result of op0 CODE op1 is known to be in range
- [wmin, wmax]. */
- widest_int wtmin = wi::to_widest (vrp_val_min (type));
- widest_int wtmax = wi::to_widest (vrp_val_max (type));
- /* If all values in [wmin, wmax] are smaller than
- [wtmin, wtmax] or all are larger than [wtmin, wtmax],
- the arithmetic operation will always overflow. */
- if (wmax < wtmin || wmin > wtmax)
- return true;
- return false;
- }
- return true;
-}
-
-/* Derive a range from a builtin. Set range in VR and return TRUE if
- successful. */
-
-bool
-vr_values::extract_range_from_ubsan_builtin (value_range_equiv *vr, gimple *stmt)
-{
- gcc_assert (is_gimple_call (stmt));
- enum tree_code subcode = ERROR_MARK;
- combined_fn cfn = gimple_call_combined_fn (stmt);
- scalar_int_mode mode;
-
- switch (cfn)
- {
- case CFN_UBSAN_CHECK_ADD:
- subcode = PLUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_SUB:
- subcode = MINUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_MUL:
- subcode = MULT_EXPR;
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, we'll complain, but will actually do
- wrapping operation. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode,
- TREE_TYPE (gimple_call_arg (stmt, 0)),
- gimple_call_arg (stmt, 0),
- gimple_call_arg (stmt, 1));
- flag_wrapv = saved_flag_wrapv;
-
- /* If for both arguments vrp_valueize returned non-NULL,
- this should have been already folded and if not, it
- wasn't folded because of overflow. Avoid removing the
- UBSAN_CHECK_* calls in that case. */
- if (vr->kind () == VR_RANGE
- && (vr->min () == vr->max ()
- || operand_equal_p (vr->min (), vr->max (), 0)))
- vr->set_varying (vr->type ());
-
- return !vr->varying_p ();
- }
- return false;
-}
-
-/* Try to derive a nonnegative or nonzero range out of STMT relying
- primarily on generic routines in fold in conjunction with range data.
- Store the result in *VR */
-
-void
-vr_values::extract_range_basic (value_range_equiv *vr, gimple *stmt)
-{
- bool sop;
-
- if (is_gimple_call (stmt))
- {
- combined_fn cfn = gimple_call_combined_fn (stmt);
- switch (cfn)
- {
- case CFN_UBSAN_CHECK_ADD:
- case CFN_UBSAN_CHECK_SUB:
- case CFN_UBSAN_CHECK_MUL:
- if (extract_range_from_ubsan_builtin (vr, stmt))
- return;
- break;
- default:
- if (fold_range (*vr, stmt, this))
- {
- /* The original code nuked equivalences every time a
- range was found, so do the same here. */
- vr->equiv_clear ();
- return;
- }
- break;
- }
- }
- /* Handle extraction of the two results (result of arithmetics and
- a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
- internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
- if (is_gimple_assign (stmt)
- && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
- || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
- && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt))))
- {
- enum tree_code code = gimple_assign_rhs_code (stmt);
- tree op = gimple_assign_rhs1 (stmt);
- tree type = TREE_TYPE (gimple_assign_lhs (stmt));
- if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
- {
- gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
- if (is_gimple_call (g) && gimple_call_internal_p (g))
+ }
+ else
{
- enum tree_code subcode = ERROR_MARK;
- switch (gimple_call_internal_fn (g))
- {
- case IFN_ADD_OVERFLOW:
- subcode = PLUS_EXPR;
- break;
- case IFN_SUB_OVERFLOW:
- subcode = MINUS_EXPR;
- break;
- case IFN_MUL_OVERFLOW:
- subcode = MULT_EXPR;
- break;
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- if (code == IMAGPART_EXPR)
- {
- /* This is the boolean return value whether compare and
- exchange changed anything or not. */
- vr->set (build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- return;
- }
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- tree op0 = gimple_call_arg (g, 0);
- tree op1 = gimple_call_arg (g, 1);
- if (code == IMAGPART_EXPR)
- {
- bool ovf = false;
- if (check_for_binary_op_overflow (this, subcode, type,
- op0, op1, &ovf))
- vr->set (build_int_cst (type, ovf));
- else if (TYPE_PRECISION (type) == 1
- && !TYPE_UNSIGNED (type))
- vr->set_varying (type);
- else
- vr->set (build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- }
- else if (types_compatible_p (type, TREE_TYPE (op0))
- && types_compatible_p (type, TREE_TYPE (op1)))
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode, type,
- op0, op1);
- flag_wrapv = saved_flag_wrapv;
- }
- else
- {
- value_range_equiv vr0, vr1;
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_unary_expr (&vr0, NOP_EXPR,
- type, op0);
- extract_range_from_unary_expr (&vr1, NOP_EXPR,
- type, op1);
- range_fold_binary_expr (vr, subcode, type, &vr0, &vr1);
- flag_wrapv = saved_flag_wrapv;
- }
- return;
- }
+ wmin = wi::smin (wmin, wt);
+ wmax = wi::smax (wmax, wt);
}
}
+ /* The result of op0 CODE op1 is known to be in range
+ [wmin, wmax]. */
+ widest_int wtmin = wi::to_widest (vrp_val_min (type));
+ widest_int wtmax = wi::to_widest (vrp_val_max (type));
+ /* If all values in [wmin, wmax] are smaller than
+ [wtmin, wtmax] or all are larger than [wtmin, wtmax],
+ the arithmetic operation will always overflow. */
+ if (wmax < wtmin || wmin > wtmax)
+ return true;
+ return false;
}
- /* None of the below should need a 'type', but we are only called
- for assignments and calls with a LHS. */
- tree type = TREE_TYPE (gimple_get_lhs (stmt));
- if (INTEGRAL_TYPE_P (type)
- && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
- set_value_range_to_nonnegative (vr, type);
- else if (vrp_stmt_computes_nonzero (stmt))
- {
- vr->set_nonzero (type);
- vr->equiv_clear ();
- }
- else
- vr->set_varying (type);
-}
-
-
-/* Try to compute a useful range out of assignment STMT and store it
- in *VR. */
-
-void
-vr_values::extract_range_from_assignment (value_range_equiv *vr, gassign *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
-
- if (code == ASSERT_EXPR)
- extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
- else if (code == SSA_NAME)
- extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_binary)
- extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_unary)
- extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt));
- else if (code == COND_EXPR)
- extract_range_from_cond_expr (vr, stmt);
- else if (TREE_CODE_CLASS (code) == tcc_comparison)
- extract_range_from_comparison (vr, stmt);
- else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
- && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
- vr->set (gimple_assign_rhs1 (stmt));
- else
- vr->set_varying (TREE_TYPE (gimple_assign_lhs (stmt)));
-
- if (vr->varying_p ())
- extract_range_basic (vr, stmt);
+ return true;
}
/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
@@ -1808,202 +615,6 @@ bounds_of_var_in_loop (tree *min, tree *max, range_query *query,
return true;
}
-/* Given a range VR, a LOOP and a variable VAR, determine whether it
- would be profitable to adjust VR using scalar evolution information
- for VAR. If so, update VR with the new limits. */
-
-void
-vr_values::adjust_range_with_scev (value_range_equiv *vr, class loop *loop,
- gimple *stmt, tree var)
-{
- tree min, max;
- if (bounds_of_var_in_loop (&min, &max, this, loop, stmt, var))
- {
- if (vr->undefined_p () || vr->varying_p ())
- {
- /* For VARYING or UNDEFINED ranges, just about anything we get
- from scalar evolutions should be better. */
- vr->update (min, max);
- }
- else if (vr->kind () == VR_RANGE)
- {
- /* Start with the input range... */
- tree vrmin = vr->min ();
- tree vrmax = vr->max ();
-
- /* ...and narrow it down with what we got from SCEV. */
- if (compare_values (min, vrmin) == 1)
- vrmin = min;
- if (compare_values (max, vrmax) == -1)
- vrmax = max;
-
- vr->update (vrmin, vrmax);
- }
- else if (vr->kind () == VR_ANTI_RANGE)
- {
- /* ?? As an enhancement, if VR, MIN, and MAX are constants, one
- could just intersect VR with a range of [MIN,MAX]. */
- }
- }
-}
-
-/* Dump value ranges of all SSA_NAMEs to FILE. */
-
-void
-vr_values::dump (FILE *file)
-{
- size_t i;
-
- for (i = 0; i < num_vr_values; i++)
- {
- if (vr_value[i] && ssa_name (i))
- {
- print_generic_expr (file, ssa_name (i));
- fprintf (file, ": ");
- dump_value_range (file, vr_value[i]);
- fprintf (file, "\n");
- }
- }
-
- fprintf (file, "\n");
-}
-
-/* Initialize VRP lattice. */
-
-vr_values::vr_values () : simplifier (this)
-{
- values_propagated = false;
- num_vr_values = num_ssa_names * 2;
- vr_value = XCNEWVEC (value_range_equiv *, num_vr_values);
- vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
- bitmap_obstack_initialize (&vrp_equiv_obstack);
-}
-
-/* Free VRP lattice. */
-
-vr_values::~vr_values ()
-{
- /* Free allocated memory. */
- free (vr_value);
- free (vr_phi_edge_counts);
- bitmap_obstack_release (&vrp_equiv_obstack);
-
- /* So that we can distinguish between VRP data being available
- and not available. */
- vr_value = NULL;
- vr_phi_edge_counts = NULL;
-}
-
-
-/* A hack. */
-static class vr_values *x_vr_values;
-
-/* Return the singleton value-range for NAME or NAME. */
-
-static inline tree
-vrp_valueize (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- const value_range_equiv *vr = x_vr_values->get_value_range (name);
- if (vr->kind () == VR_RANGE
- && (TREE_CODE (vr->min ()) == SSA_NAME
- || is_gimple_min_invariant (vr->min ()))
- && vrp_operand_equal_p (vr->min (), vr->max ()))
- return vr->min ();
- }
- return name;
-}
-
-/* Return the singleton value-range for NAME if that is a constant
- but signal to not follow SSA edges. */
-
-static inline tree
-vrp_valueize_1 (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- /* If the definition may be simulated again we cannot follow
- this SSA edge as the SSA propagator does not necessarily
- re-visit the use. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- if (!gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt))
- return NULL_TREE;
- const value_range_equiv *vr = x_vr_values->get_value_range (name);
- tree singleton;
- if (vr->singleton_p (&singleton))
- return singleton;
- }
- return name;
-}
-
-/* Given STMT, an assignment or call, return its LHS if the type
- of the LHS is suitable for VRP analysis, else return NULL_TREE. */
-
-tree
-get_output_for_vrp (gimple *stmt)
-{
- if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
- return NULL_TREE;
-
- /* We only keep track of ranges in integral and pointer types. */
- tree lhs = gimple_get_lhs (stmt);
- if (TREE_CODE (lhs) == SSA_NAME
- && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- /* It is valid to have NULL MIN/MAX values on a type. See
- build_range_type. */
- && TYPE_MIN_VALUE (TREE_TYPE (lhs))
- && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
- || POINTER_TYPE_P (TREE_TYPE (lhs))))
- return lhs;
-
- return NULL_TREE;
-}
-
-/* Visit assignment STMT. If it produces an interesting range, record
- the range in VR and set LHS to OUTPUT_P. */
-
-void
-vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
- value_range_equiv *vr)
-{
- tree lhs = get_output_for_vrp (stmt);
- *output_p = lhs;
-
- /* We only keep track of ranges in integral and pointer types. */
- if (lhs)
- {
- enum gimple_code code = gimple_code (stmt);
-
- /* Try folding the statement to a constant first. */
- x_vr_values = this;
- tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
- vrp_valueize_1);
- x_vr_values = NULL;
- if (tem)
- {
- if (TREE_CODE (tem) == SSA_NAME
- && (SSA_NAME_IS_DEFAULT_DEF (tem)
- || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
- {
- extract_range_from_ssa_name (vr, tem);
- return;
- }
- else if (is_gimple_min_invariant (tem))
- {
- vr->set (tem);
- return;
- }
- }
- /* Then dispatch to value-range extracting functions. */
- if (code == GIMPLE_CALL)
- extract_range_basic (vr, stmt);
- else
- extract_range_from_assignment (vr, as_a <gassign *> (stmt));
- }
-}
-
/* Helper that gets the value range of the SSA_NAME with version I
or a symbolic range containing the SSA_NAME only if the value range
is varying or undefined. Uses TEM as storage for the alternate range. */
@@ -2352,100 +963,6 @@ simplify_using_ranges::vrp_evaluate_conditional_warnv_with_ops
return NULL_TREE;
}
-/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
- information. Return NULL if the conditional cannot be evaluated.
- The ranges of all the names equivalent with the operands in COND
- will be used when trying to compute the value. If the result is
- based on undefined signed overflow, issue a warning if
- appropriate. */
-
-tree
-simplify_using_ranges::vrp_evaluate_conditional (tree_code code, tree op0,
- tree op1, gimple *stmt)
-{
- bool sop;
- tree ret;
- bool only_ranges;
-
- /* Some passes and foldings leak constants with overflow flag set
- into the IL. Avoid doing wrong things with these and bail out. */
- if ((TREE_CODE (op0) == INTEGER_CST
- && TREE_OVERFLOW (op0))
- || (TREE_CODE (op1) == INTEGER_CST
- && TREE_OVERFLOW (op1)))
- return NULL_TREE;
-
- sop = false;
- ret = vrp_evaluate_conditional_warnv_with_ops (stmt, code, op0, op1, true,
- &sop, &only_ranges);
-
- if (ret && sop)
- {
- enum warn_strict_overflow_code wc;
- const char* warnmsg;
-
- if (is_gimple_min_invariant (ret))
- {
- wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional to constant");
- }
- else
- {
- wc = WARN_STRICT_OVERFLOW_COMPARISON;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional");
- }
-
- if (issue_strict_overflow_warning (wc))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
- }
- }
-
- if (warn_type_limits
- && ret && only_ranges
- && TREE_CODE_CLASS (code) == tcc_comparison
- && TREE_CODE (op0) == SSA_NAME)
- {
- /* If the comparison is being folded and the operand on the LHS
- is being compared against a constant value that is outside of
- the natural range of OP0's type, then the predicate will
- always fold regardless of the value of OP0. If -Wtype-limits
- was specified, emit a warning. */
- tree type = TREE_TYPE (op0);
- const value_range_equiv *vr0 = query->get_value_range (op0, stmt);
-
- if (vr0->varying_p ()
- && INTEGRAL_TYPE_P (type)
- && is_gimple_min_invariant (op1))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
-
- warning_at (location, OPT_Wtype_limits,
- integer_zerop (ret)
- ? G_("comparison always false "
- "due to limited range of data type")
- : G_("comparison always true "
- "due to limited range of data type"));
- }
- }
-
- return ret;
-}
-
-
/* Visit conditional statement STMT. If we can determine which edge
will be taken out of STMT's basic block, record it in
*TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
@@ -2619,336 +1136,6 @@ find_case_label_ranges (gswitch *stmt, const value_range *vr,
return false;
}
-/* Visit switch statement STMT. If we can determine which edge
- will be taken out of STMT's basic block, record it in
- *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
-
-void
-vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
-{
- tree op, val;
- const value_range_equiv *vr;
- size_t i = 0, j = 0, k, l;
- bool take_default;
-
- *taken_edge_p = NULL;
- op = gimple_switch_index (stmt);
- if (TREE_CODE (op) != SSA_NAME)
- return;
-
- vr = get_value_range (op);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting switch expression with operand ");
- print_generic_expr (dump_file, op);
- fprintf (dump_file, " with known range ");
- dump_value_range (dump_file, vr);
- fprintf (dump_file, "\n");
- }
-
- if (vr->undefined_p ()
- || vr->varying_p ()
- || vr->symbolic_p ())
- return;
-
- /* Find the single edge that is taken from the switch expression. */
- take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
-
- /* Check if the range spans no CASE_LABEL. If so, we only reach the default
- label */
- if (j < i)
- {
- gcc_assert (take_default);
- val = gimple_switch_default_label (stmt);
- }
- else
- {
- /* Check if labels with index i to j and maybe the default label
- are all reaching the same label. */
-
- val = gimple_switch_label (stmt, i);
- if (take_default
- && CASE_LABEL (gimple_switch_default_label (stmt))
- != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- for (++i; i <= j; ++i)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- for (; k <= l; ++k)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- }
-
- *taken_edge_p = find_edge (gimple_bb (stmt),
- label_to_block (cfun, CASE_LABEL (val)));
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, " will take edge to ");
- print_generic_stmt (dump_file, CASE_LABEL (val));
- }
-}
-
-
-/* Evaluate statement STMT. If the statement produces a useful range,
- set VR and corepsponding OUTPUT_P.
-
- If STMT is a conditional branch and we can determine its truth
- value, the taken edge is recorded in *TAKEN_EDGE_P. */
-
-void
-vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
- tree *output_p, value_range_equiv *vr)
-{
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nextract_range_from_stmt visiting:\n");
- print_gimple_stmt (dump_file, stmt, 0, dump_flags);
- }
-
- if (!stmt_interesting_for_vrp (stmt))
- gcc_assert (stmt_ends_bb_p (stmt));
- else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
- vrp_visit_assignment_or_call (stmt, output_p, vr);
- else if (gimple_code (stmt) == GIMPLE_COND)
- simplifier.vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
- else if (gimple_code (stmt) == GIMPLE_SWITCH)
- vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
-}
-
-/* Visit all arguments for PHI node PHI that flow through executable
- edges. If a valid value range can be derived from all the incoming
- value ranges, set a new range in VR_RESULT. */
-
-void
-vr_values::extract_range_from_phi_node (gphi *phi,
- value_range_equiv *vr_result)
-{
- tree lhs = PHI_RESULT (phi);
- const value_range_equiv *lhs_vr = get_value_range (lhs);
- bool first = true;
- int old_edges;
- class loop *l;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting PHI node: ");
- print_gimple_stmt (dump_file, phi, 0, dump_flags);
- }
-
- bool may_simulate_backedge_again = false;
- int edges = 0;
- for (size_t i = 0; i < gimple_phi_num_args (phi); i++)
- {
- edge e = gimple_phi_arg_edge (phi, i);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file,
- " Argument #%d (%d -> %d %sexecutable)\n",
- (int) i, e->src->index, e->dest->index,
- (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
- }
-
- if (e->flags & EDGE_EXECUTABLE)
- {
- value_range_equiv vr_arg_tem;
- const value_range_equiv *vr_arg = &vr_arg_tem;
-
- ++edges;
-
- tree arg = PHI_ARG_DEF (phi, i);
- if (TREE_CODE (arg) == SSA_NAME)
- {
- /* See if we are eventually going to change one of the args. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
- if (! gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt)
- && e->flags & EDGE_DFS_BACK)
- may_simulate_backedge_again = true;
-
- const value_range_equiv *vr_arg_ = get_value_range (arg);
- /* Do not allow equivalences or symbolic ranges to leak in from
- backedges. That creates invalid equivalencies.
- See PR53465 and PR54767. */
- if (e->flags & EDGE_DFS_BACK)
- {
- if (!vr_arg_->varying_p () && !vr_arg_->undefined_p ())
- {
- vr_arg_tem.set (vr_arg_->min (), vr_arg_->max (), NULL,
- vr_arg_->kind ());
- if (vr_arg_tem.symbolic_p ())
- vr_arg_tem.set_varying (TREE_TYPE (arg));
- }
- else
- vr_arg = vr_arg_;
- }
- /* If the non-backedge arguments range is VR_VARYING then
- we can still try recording a simple equivalence. */
- else if (vr_arg_->varying_p ())
- vr_arg_tem.set (arg);
- else
- vr_arg = vr_arg_;
- }
- else
- {
- if (TREE_OVERFLOW_P (arg))
- arg = drop_tree_overflow (arg);
-
- vr_arg_tem.set (arg);
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\t");
- print_generic_expr (dump_file, arg, dump_flags);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, vr_arg);
- fprintf (dump_file, "\n");
- }
-
- if (first)
- vr_result->deep_copy (vr_arg);
- else
- vr_result->legacy_verbose_union_ (vr_arg);
- first = false;
-
- if (vr_result->varying_p ())
- break;
- }
- }
-
- if (vr_result->varying_p ())
- goto varying;
- else if (vr_result->undefined_p ())
- goto update_range;
-
- old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
- vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
-
- /* To prevent infinite iterations in the algorithm, derive ranges
- when the new value is slightly bigger or smaller than the
- previous one. We don't do this if we have seen a new executable
- edge; this helps us avoid an infinity for conditionals
- which are not in a loop. If the old value-range was VR_UNDEFINED
- use the updated range and iterate one more time. If we will not
- simulate this PHI again via the backedge allow us to iterate. */
- if (edges > 0
- && gimple_phi_num_args (phi) > 1
- && edges == old_edges
- && !lhs_vr->undefined_p ()
- && may_simulate_backedge_again)
- {
- /* Compare old and new ranges, fall back to varying if the
- values are not comparable. */
- int cmp_min = compare_values (lhs_vr->min (), vr_result->min ());
- if (cmp_min == -2)
- goto varying;
- int cmp_max = compare_values (lhs_vr->max (), vr_result->max ());
- if (cmp_max == -2)
- goto varying;
-
- /* For non VR_RANGE or for pointers fall back to varying if
- the range changed. */
- if ((lhs_vr->kind () != VR_RANGE || vr_result->kind () != VR_RANGE
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && (cmp_min != 0 || cmp_max != 0))
- goto varying;
-
- /* If the new minimum is larger than the previous one
- retain the old value. If the new minimum value is smaller
- than the previous one and not -INF go all the way to -INF + 1.
- In the first case, to avoid infinite bouncing between different
- minimums, and in the other case to avoid iterating millions of
- times to reach -INF. Going to -INF + 1 also lets the following
- iteration compute whether there will be any overflow, at the
- expense of one additional iteration. */
- tree new_min = vr_result->min ();
- tree new_max = vr_result->max ();
- if (cmp_min < 0)
- new_min = lhs_vr->min ();
- else if (cmp_min > 0
- && (TREE_CODE (vr_result->min ()) != INTEGER_CST
- || tree_int_cst_lt (vrp_val_min (vr_result->type ()),
- vr_result->min ())))
- new_min = int_const_binop (PLUS_EXPR,
- vrp_val_min (vr_result->type ()),
- build_int_cst (vr_result->type (), 1));
-
- /* Similarly for the maximum value. */
- if (cmp_max > 0)
- new_max = lhs_vr->max ();
- else if (cmp_max < 0
- && (TREE_CODE (vr_result->max ()) != INTEGER_CST
- || tree_int_cst_lt (vr_result->max (),
- vrp_val_max (vr_result->type ()))))
- new_max = int_const_binop (MINUS_EXPR,
- vrp_val_max (vr_result->type ()),
- build_int_cst (vr_result->type (), 1));
-
- vr_result->update (new_min, new_max, vr_result->kind ());
-
- /* If we dropped either bound to +-INF then if this is a loop
- PHI node SCEV may known more about its value-range. */
- if (cmp_min > 0 || cmp_min < 0
- || cmp_max < 0 || cmp_max > 0)
- goto scev_check;
-
- goto infinite_check;
- }
-
- goto update_range;
-
-varying:
- vr_result->set_varying (TREE_TYPE (lhs));
-
-scev_check:
- /* If this is a loop PHI node SCEV may known more about its value-range.
- scev_check can be reached from two paths, one is a fall through from above
- "varying" label, the other is direct goto from code block which tries to
- avoid infinite simulation. */
- if (scev_initialized_p ()
- && (l = loop_containing_stmt (phi))
- && l->header == gimple_bb (phi))
- adjust_range_with_scev (vr_result, l, phi, lhs);
-
-infinite_check:
- /* If we will end up with a (-INF, +INF) range, set it to
- VARYING. Same if the previous max value was invalid for
- the type and we end up with vr_result.min > vr_result.max. */
- if ((!vr_result->varying_p () && !vr_result->undefined_p ())
- && !((vrp_val_is_max (vr_result->max ()) && vrp_val_is_min (vr_result->min ()))
- || compare_values (vr_result->min (), vr_result->max ()) > 0))
- ;
- else
- vr_result->set_varying (TREE_TYPE (lhs));
-
- /* If the new range is different than the previous value, keep
- iterating. */
-update_range:
- return;
-}
-
/* Simplify boolean operations if the source is known
to be already a boolean. */
bool
@@ -3557,8 +1744,7 @@ simplify_using_ranges::fold_cond (gcond *cond)
return true;
}
- /* ?? vrp_folder::fold_predicate_in() is a superset of this. At
- some point we should merge all variants of this code. */
+ // FIXME: Audit the code below and make sure it never finds anything.
edge taken_edge;
vrp_visit_cond_stmt (cond, &taken_edge);
@@ -3759,9 +1945,7 @@ simplify_using_ranges::simplify_switch_using_ranges (gswitch *stmt)
vr = query->get_value_range (op, stmt);
/* We can only handle integer ranges. */
- if (vr->varying_p ()
- || vr->undefined_p ()
- || vr->symbolic_p ())
+ if (vr->varying_p () || vr->undefined_p ())
return false;
/* Find case label for min/max of the value range. */
@@ -4428,24 +2612,3 @@ simplify_using_ranges::simplify (gimple_stmt_iterator *gsi)
return false;
}
-
-/* Set the lattice entry for VAR to VR. */
-
-void
-vr_values::set_vr_value (tree var, value_range_equiv *vr)
-{
- if (SSA_NAME_VERSION (var) >= num_vr_values)
- return;
- vr_value[SSA_NAME_VERSION (var)] = vr;
-}
-
-/* Swap the lattice entry for VAR with VR and return the old entry. */
-
-value_range_equiv *
-vr_values::swap_vr_value (tree var, value_range_equiv *vr)
-{
- if (SSA_NAME_VERSION (var) >= num_vr_values)
- return NULL;
- std::swap (vr_value[SSA_NAME_VERSION (var)], vr);
- return vr;
-}
@@ -33,21 +33,14 @@ public:
simplify_using_ranges (range_query *query = NULL,
int not_executable_flag = 0);
~simplify_using_ranges ();
- void set_range_query (class range_query *q, int not_executable_flag = 0)
- { query = q; m_not_executable_flag = not_executable_flag; }
-
bool simplify (gimple_stmt_iterator *);
-
- // ?? These should be cleaned, merged, and made private.
- tree vrp_evaluate_conditional (tree_code, tree, tree, gimple *);
- void vrp_visit_cond_stmt (gcond *, edge *);
bool fold_cond (gcond *);
+private:
+ void vrp_visit_cond_stmt (gcond *, edge *);
tree vrp_evaluate_conditional_warnv_with_ops (gimple *stmt, enum tree_code,
tree, tree, bool,
bool *, bool *);
bool simplify_casted_cond (gcond *);
-
-private:
bool simplify_truth_ops_using_ranges (gimple_stmt_iterator *, gimple *);
bool simplify_div_or_mod_using_ranges (gimple_stmt_iterator *, gimple *);
bool simplify_abs_using_ranges (gimple_stmt_iterator *, gimple *);
@@ -89,95 +82,6 @@ private:
vec<edge> m_flag_set_edges; // List of edges with flag to be cleared.
};
-/* The VR_VALUES class holds the current view of range information
- for all the SSA_NAMEs in the IL.
-
- It can be used to hold context sensitive range information during
- a dominator walk or it may be used to hold range information in the
- standard VRP pass as ranges are propagated through the lattice to a
- steady state.
-
- This information is independent of the range information that gets
- attached to SSA_NAMEs. A pass such as VRP may choose to transfer
- the global information it produces into global range information that
- gets attached to an SSA_NAME. It's unclear how useful that global
- information will be in a world where we can compute context sensitive
- range information fast or perform on-demand queries. */
-class vr_values : public range_query
-{
- public:
- vr_values (void);
- ~vr_values (void);
-
- virtual bool range_of_expr (vrange &r, tree expr, gimple *stmt) override;
- virtual tree value_of_expr (tree, gimple * = NULL) override;
- virtual tree value_on_edge (edge, tree) override;
- virtual tree value_of_stmt (gimple *, tree = NULL_TREE) override;
- virtual const value_range_equiv *get_value_range (const_tree,
- gimple * = NULL) override;
- void set_vr_value (tree, value_range_equiv *);
- value_range_equiv *swap_vr_value (tree, value_range_equiv *);
-
- void set_def_to_varying (const_tree);
- void set_defs_to_varying (gimple *);
- bool update_value_range (const_tree, value_range_equiv *);
- tree op_with_constant_singleton_value_range (tree);
- void adjust_range_with_scev (value_range_equiv *, class loop *,
- gimple *, tree);
- virtual void dump (FILE *) override;
-
- void extract_range_for_var_from_comparison_expr (tree, enum tree_code,
- tree, tree,
- value_range_equiv *);
- void extract_range_from_phi_node (gphi *, value_range_equiv *);
- void extract_range_basic (value_range_equiv *, gimple *);
- void extract_range_from_stmt (gimple *, edge *, tree *, value_range_equiv *);
-
- /* Indicate that propagation through the lattice is complete. */
- void set_lattice_propagation_complete (void) { values_propagated = true; }
-
- /* Allocate a new value_range object. */
- value_range_equiv *allocate_value_range_equiv (void)
- { return range_query::allocate_value_range_equiv (); }
- void free_value_range (value_range_equiv *vr)
- { free_value_range_equiv (vr); }
-
- private:
- value_range_equiv *get_lattice_entry (const_tree);
- bool vrp_stmt_computes_nonzero (gimple *);
- void extract_range_from_assignment (value_range_equiv *, gassign *);
- void extract_range_from_assert (value_range_equiv *, tree);
- void extract_range_from_ssa_name (value_range_equiv *, tree);
- void extract_range_from_binary_expr (value_range_equiv *, enum tree_code,
- tree, tree, tree);
- void extract_range_from_unary_expr (value_range_equiv *, enum tree_code,
- tree, tree);
- void extract_range_from_cond_expr (value_range_equiv *, gassign *);
- void extract_range_from_comparison (value_range_equiv *, gimple *);
- void vrp_visit_assignment_or_call (gimple*, tree *, value_range_equiv *);
- void vrp_visit_switch_stmt (gswitch *, edge *);
- bool extract_range_from_ubsan_builtin (value_range_equiv *, gimple *);
-
- /* This probably belongs in the lattice rather than in here. */
- bool values_propagated;
-
- /* Allocations for equivalences all come from this obstack. */
- bitmap_obstack vrp_equiv_obstack;
-
- /* Value range array. After propagation, VR_VALUE[I] holds the range
- of values that SSA name N_I may take. */
- unsigned int num_vr_values;
- value_range_equiv **vr_value;
-
- /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
- number of executable edges we saw the last time we visited the
- node. */
- int *vr_phi_edge_counts;
- simplify_using_ranges simplifier;
-};
-
-extern tree get_output_for_vrp (gimple *);
-
extern bool range_fits_type_p (const value_range *vr,
unsigned dest_precision, signop dest_sgn);
extern bool bounds_of_var_in_loop (tree *min, tree *max, range_query *,
--
2.38.1