| /* Support routines for Value Range Propagation (VRP). |
| Copyright (C) 2005-2019 Free Software Foundation, Inc. |
| Contributed by Diego Novillo <dnovillo@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "insn-codes.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "gimple-pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "flags.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "calls.h" |
| #include "cfganal.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-ssa-loop.h" |
| #include "tree-into-ssa.h" |
| #include "tree-ssa.h" |
| #include "intl.h" |
| #include "cfgloop.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-ssa-propagate.h" |
| #include "tree-chrec.h" |
| #include "tree-ssa-threadupdate.h" |
| #include "tree-ssa-scopedtables.h" |
| #include "tree-ssa-threadedge.h" |
| #include "omp-general.h" |
| #include "target.h" |
| #include "case-cfn-macros.h" |
| #include "params.h" |
| #include "alloc-pool.h" |
| #include "domwalk.h" |
| #include "tree-cfgcleanup.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "vr-values.h" |
| #include "builtins.h" |
| #include "wide-int-range.h" |
| |
| /* Set of SSA names found live during the RPO traversal of the function |
| for still active basic-blocks. */ |
| static sbitmap *live; |
| |
| void |
| value_range_base::set (enum value_range_kind kind, tree min, tree max) |
| { |
| m_kind = kind; |
| m_min = min; |
| m_max = max; |
| if (flag_checking) |
| check (); |
| } |
| |
| void |
| value_range::set_equiv (bitmap equiv) |
| { |
| /* Since updating the equivalence set involves deep copying the |
| bitmaps, only do it if absolutely necessary. |
| |
| All equivalence bitmaps are allocated from the same obstack. So |
| we can use the obstack associated with EQUIV to allocate vr->equiv. */ |
| if (m_equiv == NULL |
| && equiv != NULL) |
| m_equiv = BITMAP_ALLOC (equiv->obstack); |
| |
| if (equiv != m_equiv) |
| { |
| if (equiv && !bitmap_empty_p (equiv)) |
| bitmap_copy (m_equiv, equiv); |
| else |
| bitmap_clear (m_equiv); |
| } |
| } |
| |
| /* Initialize value_range. */ |
| |
| void |
| value_range::set (enum value_range_kind kind, tree min, tree max, |
| bitmap equiv) |
| { |
| value_range_base::set (kind, min, max); |
| set_equiv (equiv); |
| if (flag_checking) |
| check (); |
| } |
| |
| value_range_base::value_range_base (value_range_kind kind, tree min, tree max) |
| { |
| set (kind, min, max); |
| } |
| |
| value_range::value_range (value_range_kind kind, tree min, tree max, |
| bitmap equiv) |
| { |
| m_equiv = NULL; |
| set (kind, min, max, equiv); |
| } |
| |
| value_range::value_range (const value_range_base &other) |
| { |
| m_equiv = NULL; |
| set (other.kind (), other.min(), other.max (), NULL); |
| } |
| |
| /* Like set, but keep the equivalences in place. */ |
| |
| void |
| value_range::update (value_range_kind kind, tree min, tree max) |
| { |
| set (kind, min, max, |
| (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL); |
| } |
| |
| /* Copy value_range in FROM into THIS while avoiding bitmap sharing. |
| |
| Note: The code that avoids the bitmap sharing looks at the existing |
| this->m_equiv, so this function cannot be used to initalize an |
| object. Use the constructors for initialization. */ |
| |
| void |
| value_range::deep_copy (const value_range *from) |
| { |
| set (from->m_kind, from->min (), from->max (), from->m_equiv); |
| } |
| |
| void |
| value_range::move (value_range *from) |
| { |
| set (from->m_kind, from->min (), from->max ()); |
| m_equiv = from->m_equiv; |
| from->m_equiv = NULL; |
| } |
| |
| /* Check the validity of the range. */ |
| |
| void |
| value_range_base::check () |
| { |
| switch (m_kind) |
| { |
| case VR_RANGE: |
| case VR_ANTI_RANGE: |
| { |
| int cmp; |
| |
| gcc_assert (m_min && m_max); |
| |
| gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max)); |
| |
| /* Creating ~[-MIN, +MAX] is stupid because that would be |
| the empty set. */ |
| if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE) |
| gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max)); |
| |
| cmp = compare_values (m_min, m_max); |
| gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); |
| break; |
| } |
| case VR_UNDEFINED: |
| case VR_VARYING: |
| gcc_assert (!min () && !max ()); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| void |
| value_range::check () |
| { |
| value_range_base::check (); |
| switch (m_kind) |
| { |
| case VR_UNDEFINED: |
| case VR_VARYING: |
| gcc_assert (!m_equiv || bitmap_empty_p (m_equiv)); |
| default:; |
| } |
| } |
| |
| /* Equality operator. We purposely do not overload ==, to avoid |
| confusion with the equality bitmap in the derived value_range |
| class. */ |
| |
| bool |
| value_range_base::equal_p (const value_range_base &other) const |
| { |
| return (m_kind == other.m_kind |
| && vrp_operand_equal_p (m_min, other.m_min) |
| && vrp_operand_equal_p (m_max, other.m_max)); |
| } |
| |
| /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if |
| IGNORE_EQUIVS is TRUE. */ |
| |
| bool |
| value_range::equal_p (const value_range &other, bool ignore_equivs) const |
| { |
| return (value_range_base::equal_p (other) |
| && (ignore_equivs |
| || vrp_bitmap_equal_p (m_equiv, other.m_equiv))); |
| } |
| |
| /* Return TRUE if this is a symbolic range. */ |
| |
| bool |
| value_range_base::symbolic_p () const |
| { |
| return (!varying_p () |
| && !undefined_p () |
| && (!is_gimple_min_invariant (m_min) |
| || !is_gimple_min_invariant (m_max))); |
| } |
| |
| /* NOTE: This is not the inverse of symbolic_p because the range |
| could also be varying or undefined. Ideally they should be inverse |
| of each other, with varying only applying to symbolics. Varying of |
| constants would be represented as [-MIN, +MAX]. */ |
| |
| bool |
| value_range_base::constant_p () const |
| { |
| return (!varying_p () |
| && !undefined_p () |
| && TREE_CODE (m_min) == INTEGER_CST |
| && TREE_CODE (m_max) == INTEGER_CST); |
| } |
| |
| void |
| value_range_base::set_undefined () |
| { |
| set (VR_UNDEFINED, NULL, NULL); |
| } |
| |
| void |
| value_range::set_undefined () |
| { |
| set (VR_UNDEFINED, NULL, NULL, NULL); |
| } |
| |
| void |
| value_range_base::set_varying () |
| { |
| set (VR_VARYING, NULL, NULL); |
| } |
| |
| void |
| value_range::set_varying () |
| { |
| set (VR_VARYING, NULL, NULL, NULL); |
| } |
| |
| /* Return TRUE if it is possible that range contains VAL. */ |
| |
| bool |
| value_range_base::may_contain_p (tree val) const |
| { |
| if (varying_p ()) |
| return true; |
| |
| if (undefined_p ()) |
| return true; |
| |
| if (m_kind == VR_ANTI_RANGE) |
| { |
| int res = value_inside_range (val, min (), max ()); |
| return res == 0 || res == -2; |
| } |
| return value_inside_range (val, min (), max ()) != 0; |
| } |
| |
| void |
| value_range::equiv_clear () |
| { |
| if (m_equiv) |
| bitmap_clear (m_equiv); |
| } |
| |
| /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence |
| bitmap. If no equivalence table has been created, OBSTACK is the |
| obstack to use (NULL for the default obstack). |
| |
| This is the central point where equivalence processing can be |
| turned on/off. */ |
| |
| void |
| value_range::equiv_add (const_tree var, |
| const value_range *var_vr, |
| bitmap_obstack *obstack) |
| { |
| if (!m_equiv) |
| m_equiv = BITMAP_ALLOC (obstack); |
| unsigned ver = SSA_NAME_VERSION (var); |
| bitmap_set_bit (m_equiv, ver); |
| if (var_vr && var_vr->m_equiv) |
| bitmap_ior_into (m_equiv, var_vr->m_equiv); |
| } |
| |
| /* If range is a singleton, place it in RESULT and return TRUE. |
| Note: A singleton can be any gimple invariant, not just constants. |
| So, [&x, &x] counts as a singleton. */ |
| |
| bool |
| value_range_base::singleton_p (tree *result) const |
| { |
| if (m_kind == VR_RANGE |
| && vrp_operand_equal_p (min (), max ()) |
| && is_gimple_min_invariant (min ())) |
| { |
| if (result) |
| *result = min (); |
| return true; |
| } |
| return false; |
| } |
| |
| tree |
| value_range_base::type () const |
| { |
| /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are |
| known to have non-zero min/max. */ |
| gcc_assert (min ()); |
| return TREE_TYPE (min ()); |
| } |
| |
| void |
| value_range_base::dump (FILE *file) const |
| { |
| if (undefined_p ()) |
| fprintf (file, "UNDEFINED"); |
| else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE) |
| { |
| tree ttype = type (); |
| |
| print_generic_expr (file, ttype); |
| fprintf (file, " "); |
| |
| fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : ""); |
| |
| if (INTEGRAL_TYPE_P (ttype) |
| && !TYPE_UNSIGNED (ttype) |
| && vrp_val_is_min (min ()) |
| && TYPE_PRECISION (ttype) != 1) |
| fprintf (file, "-INF"); |
| else |
| print_generic_expr (file, min ()); |
| |
| fprintf (file, ", "); |
| |
| if (INTEGRAL_TYPE_P (ttype) |
| && vrp_val_is_max (max ()) |
| && TYPE_PRECISION (ttype) != 1) |
| fprintf (file, "+INF"); |
| else |
| print_generic_expr (file, max ()); |
| |
| fprintf (file, "]"); |
| } |
| else if (varying_p ()) |
| fprintf (file, "VARYING"); |
| else |
| gcc_unreachable (); |
| } |
| |
| void |
| value_range::dump (FILE *file) const |
| { |
| value_range_base::dump (file); |
| if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE) |
| && m_equiv) |
| { |
| bitmap_iterator bi; |
| unsigned i, c = 0; |
| |
| fprintf (file, " EQUIVALENCES: { "); |
| |
| EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi) |
| { |
| print_generic_expr (file, ssa_name (i)); |
| fprintf (file, " "); |
| c++; |
| } |
| |
| fprintf (file, "} (%u elements)", c); |
| } |
| } |
| |
| void |
| dump_value_range (FILE *file, const value_range *vr) |
| { |
| if (!vr) |
| fprintf (file, "[]"); |
| else |
| vr->dump (file); |
| } |
| |
| void |
| dump_value_range (FILE *file, const value_range_base *vr) |
| { |
| if (!vr) |
| fprintf (file, "[]"); |
| else |
| vr->dump (file); |
| } |
| |
| DEBUG_FUNCTION void |
| debug (const value_range_base *vr) |
| { |
| dump_value_range (stderr, vr); |
| } |
| |
| DEBUG_FUNCTION void |
| debug (const value_range_base &vr) |
| { |
| dump_value_range (stderr, &vr); |
| } |
| |
| DEBUG_FUNCTION void |
| debug (const value_range *vr) |
| { |
| dump_value_range (stderr, vr); |
| } |
| |
| DEBUG_FUNCTION void |
| debug (const value_range &vr) |
| { |
| dump_value_range (stderr, &vr); |
| } |
| |
| /* Return true if the SSA name NAME is live on the edge E. */ |
| |
| static bool |
| live_on_edge (edge e, tree name) |
| { |
| return (live[e->dest->index] |
| && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |
| } |
| |
| /* Location information for ASSERT_EXPRs. Each instance of this |
| structure describes an ASSERT_EXPR for an SSA name. Since a single |
| SSA name may have more than one assertion associated with it, these |
| locations are kept in a linked list attached to the corresponding |
| SSA name. */ |
| struct assert_locus |
| { |
| /* Basic block where the assertion would be inserted. */ |
| basic_block bb; |
| |
| /* Some assertions need to be inserted on an edge (e.g., assertions |
| generated by COND_EXPRs). In those cases, BB will be NULL. */ |
| edge e; |
| |
| /* Pointer to the statement that generated this assertion. */ |
| gimple_stmt_iterator si; |
| |
| /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ |
| enum tree_code comp_code; |
| |
| /* Value being compared against. */ |
| tree val; |
| |
| /* Expression to compare. */ |
| tree expr; |
| |
| /* Next node in the linked list. */ |
| assert_locus *next; |
| }; |
| |
| /* If bit I is present, it means that SSA name N_i has a list of |
| assertions that should be inserted in the IL. */ |
| static bitmap need_assert_for; |
| |
| /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] |
| holds a list of ASSERT_LOCUS_T nodes that describe where |
| ASSERT_EXPRs for SSA name N_I should be inserted. */ |
| static assert_locus **asserts_for; |
| |
| /* Return the maximum value for TYPE. */ |
| |
| tree |
| vrp_val_max (const_tree type) |
| { |
| if (!INTEGRAL_TYPE_P (type)) |
| return NULL_TREE; |
| |
| return TYPE_MAX_VALUE (type); |
| } |
| |
| /* Return the minimum value for TYPE. */ |
| |
| tree |
| vrp_val_min (const_tree type) |
| { |
| if (!INTEGRAL_TYPE_P (type)) |
| return NULL_TREE; |
| |
| return TYPE_MIN_VALUE (type); |
| } |
| |
| /* Return whether VAL is equal to the maximum value of its type. |
| We can't do a simple equality comparison with TYPE_MAX_VALUE because |
| C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE |
| is not == to the integer constant with the same value in the type. */ |
| |
| bool |
| vrp_val_is_max (const_tree val) |
| { |
| tree type_max = vrp_val_max (TREE_TYPE (val)); |
| return (val == type_max |
| || (type_max != NULL_TREE |
| && operand_equal_p (val, type_max, 0))); |
| } |
| |
| /* Return whether VAL is equal to the minimum value of its type. */ |
| |
| bool |
| vrp_val_is_min (const_tree val) |
| { |
| tree type_min = vrp_val_min (TREE_TYPE (val)); |
| return (val == type_min |
| || (type_min != NULL_TREE |
| && operand_equal_p (val, type_min, 0))); |
| } |
| |
| /* VR_TYPE describes a range with mininum value *MIN and maximum |
| value *MAX. Restrict the range to the set of values that have |
| no bits set outside NONZERO_BITS. Update *MIN and *MAX and |
| return the new range type. |
| |
| SGN gives the sign of the values described by the range. */ |
| |
| enum value_range_kind |
| intersect_range_with_nonzero_bits (enum value_range_kind vr_type, |
| wide_int *min, wide_int *max, |
| const wide_int &nonzero_bits, |
| signop sgn) |
| { |
| if (vr_type == VR_ANTI_RANGE) |
| { |
| /* The VR_ANTI_RANGE is equivalent to the union of the ranges |
| A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS |
| to create an inclusive upper bound for A and an inclusive lower |
| bound for B. */ |
| wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits); |
| wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits); |
| |
| /* If the calculation of A_MAX wrapped, A is effectively empty |
| and A_MAX is the highest value that satisfies NONZERO_BITS. |
| Likewise if the calculation of B_MIN wrapped, B is effectively |
| empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */ |
| bool a_empty = wi::ge_p (a_max, *min, sgn); |
| bool b_empty = wi::le_p (b_min, *max, sgn); |
| |
| /* If both A and B are empty, there are no valid values. */ |
| if (a_empty && b_empty) |
| return VR_UNDEFINED; |
| |
| /* If exactly one of A or B is empty, return a VR_RANGE for the |
| other one. */ |
| if (a_empty || b_empty) |
| { |
| *min = b_min; |
| *max = a_max; |
| gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
| return VR_RANGE; |
| } |
| |
| /* Update the VR_ANTI_RANGE bounds. */ |
| *min = a_max + 1; |
| *max = b_min - 1; |
| gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
| |
| /* Now check whether the excluded range includes any values that |
| satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */ |
| if (wi::round_up_for_mask (*min, nonzero_bits) == b_min) |
| { |
| unsigned int precision = min->get_precision (); |
| *min = wi::min_value (precision, sgn); |
| *max = wi::max_value (precision, sgn); |
| vr_type = VR_RANGE; |
| } |
| } |
| if (vr_type == VR_RANGE) |
| { |
| *max = wi::round_down_for_mask (*max, nonzero_bits); |
| |
| /* Check that the range contains at least one valid value. */ |
| if (wi::gt_p (*min, *max, sgn)) |
| return VR_UNDEFINED; |
| |
| *min = wi::round_up_for_mask (*min, nonzero_bits); |
| gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
| } |
| return vr_type; |
| } |
| |
| |
| /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}. |
| This means adjusting VRTYPE, MIN and MAX representing the case of a |
| wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] |
| as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. |
| In corner cases where MAX+1 or MIN-1 wraps this will fall back |
| to varying. |
| This routine exists to ease canonicalization in the case where we |
| extract ranges from var + CST op limit. */ |
| |
| void |
| value_range_base::set_and_canonicalize (enum value_range_kind kind, |
| tree min, tree max) |
| { |
| /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
| if (kind == VR_UNDEFINED) |
| { |
| set_undefined (); |
| return; |
| } |
| else if (kind == VR_VARYING) |
| { |
| set_varying (); |
| return; |
| } |
| |
| /* Nothing to canonicalize for symbolic ranges. */ |
| if (TREE_CODE (min) != INTEGER_CST |
| || TREE_CODE (max) != INTEGER_CST) |
| { |
| set (kind, min, max); |
| return; |
| } |
| |
| /* Wrong order for min and max, to swap them and the VR type we need |
| to adjust them. */ |
| if (tree_int_cst_lt (max, min)) |
| { |
| tree one, tmp; |
| |
| /* For one bit precision if max < min, then the swapped |
| range covers all values, so for VR_RANGE it is varying and |
| for VR_ANTI_RANGE empty range, so drop to varying as well. */ |
| if (TYPE_PRECISION (TREE_TYPE (min)) == 1) |
| { |
| set_varying (); |
| return; |
| } |
| |
| one = build_int_cst (TREE_TYPE (min), 1); |
| tmp = int_const_binop (PLUS_EXPR, max, one); |
| max = int_const_binop (MINUS_EXPR, min, one); |
| min = tmp; |
| |
| /* There's one corner case, if we had [C+1, C] before we now have |
| that again. But this represents an empty value range, so drop |
| to varying in this case. */ |
| if (tree_int_cst_lt (max, min)) |
| { |
| set_varying (); |
| return; |
| } |
| |
| kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; |
| } |
| |
| /* Anti-ranges that can be represented as ranges should be so. */ |
| if (kind == VR_ANTI_RANGE) |
| { |
| /* For -fstrict-enums we may receive out-of-range ranges so consider |
| values < -INF and values > INF as -INF/INF as well. */ |
| tree type = TREE_TYPE (min); |
| bool is_min = (INTEGRAL_TYPE_P (type) |
| && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0); |
| bool is_max = (INTEGRAL_TYPE_P (type) |
| && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0); |
| |
| if (is_min && is_max) |
| { |
| /* We cannot deal with empty ranges, drop to varying. |
| ??? This could be VR_UNDEFINED instead. */ |
| set_varying (); |
| return; |
| } |
| else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
| && (is_min || is_max)) |
| { |
| /* Non-empty boolean ranges can always be represented |
| as a singleton range. */ |
| if (is_min) |
| min = max = vrp_val_max (TREE_TYPE (min)); |
| else |
| min = max = vrp_val_min (TREE_TYPE (min)); |
| kind = VR_RANGE; |
| } |
| else if (is_min |
| /* As a special exception preserve non-null ranges. */ |
| && !(TYPE_UNSIGNED (TREE_TYPE (min)) |
| && integer_zerop (max))) |
| { |
| tree one = build_int_cst (TREE_TYPE (max), 1); |
| min = int_const_binop (PLUS_EXPR, max, one); |
| max = vrp_val_max (TREE_TYPE (max)); |
| kind = VR_RANGE; |
| } |
| else if (is_max) |
| { |
| tree one = build_int_cst (TREE_TYPE (min), 1); |
| max = int_const_binop (MINUS_EXPR, min, one); |
| min = vrp_val_min (TREE_TYPE (min)); |
| kind = VR_RANGE; |
| } |
| } |
| |
| /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky |
| to make sure VRP iteration terminates, otherwise we can get into |
| oscillations. */ |
| |
| set (kind, min, max); |
| } |
| |
| void |
| value_range::set_and_canonicalize (enum value_range_kind kind, |
| tree min, tree max, bitmap equiv) |
| { |
| value_range_base::set_and_canonicalize (kind, min, max); |
| if (this->kind () == VR_RANGE || this->kind () == VR_ANTI_RANGE) |
| set_equiv (equiv); |
| else |
| equiv_clear (); |
| } |
| |
| void |
| value_range_base::set (tree val) |
| { |
| gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val)); |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| set (VR_RANGE, val, val); |
| } |
| |
| void |
| value_range::set (tree val) |
| { |
| gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val)); |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| set (VR_RANGE, val, val, NULL); |
| } |
| |
| /* Set value range VR to a non-NULL range of type TYPE. */ |
| |
| void |
| value_range_base::set_nonnull (tree type) |
| { |
| tree zero = build_int_cst (type, 0); |
| set (VR_ANTI_RANGE, zero, zero); |
| } |
| |
| void |
| value_range::set_nonnull (tree type) |
| { |
| tree zero = build_int_cst (type, 0); |
| set (VR_ANTI_RANGE, zero, zero, NULL); |
| } |
| |
| /* Set value range VR to a NULL range of type TYPE. */ |
| |
| void |
| value_range_base::set_null (tree type) |
| { |
| set (build_int_cst (type, 0)); |
| } |
| |
| void |
| value_range::set_null (tree type) |
| { |
| set (build_int_cst (type, 0)); |
| } |
| |
| /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
| |
| bool |
| vrp_operand_equal_p (const_tree val1, const_tree val2) |
| { |
| if (val1 == val2) |
| return true; |
| if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) |
| return false; |
| return true; |
| } |
| |
| /* Return true, if the bitmaps B1 and B2 are equal. */ |
| |
| bool |
| vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
| { |
| return (b1 == b2 |
| || ((!b1 || bitmap_empty_p (b1)) |
| && (!b2 || bitmap_empty_p (b2))) |
| || (b1 && b2 |
| && bitmap_equal_p (b1, b2))); |
| } |
| |
| /* Return true if VR is [0, 0]. */ |
| |
| static inline bool |
| range_is_null (const value_range_base *vr) |
| { |
| return vr->zero_p (); |
| } |
| |
| static inline bool |
| range_is_nonnull (const value_range_base *vr) |
| { |
| return (vr->kind () == VR_ANTI_RANGE |
| && vr->min () == vr->max () |
| && integer_zerop (vr->min ())); |
| } |
| |
| /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
| a singleton. */ |
| |
| bool |
| range_int_cst_p (const value_range_base *vr) |
| { |
| return (vr->kind () == VR_RANGE |
| && TREE_CODE (vr->min ()) == INTEGER_CST |
| && TREE_CODE (vr->max ()) == INTEGER_CST); |
| } |
| |
| /* Return true if VR is a INTEGER_CST singleton. */ |
| |
| bool |
| range_int_cst_singleton_p (const value_range_base *vr) |
| { |
| return (range_int_cst_p (vr) |
| && tree_int_cst_equal (vr->min (), vr->max ())); |
| } |
| |
| /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
| otherwise. We only handle additive operations and set NEG to true if the |
| symbol is negated and INV to the invariant part, if any. */ |
| |
| tree |
| get_single_symbol (tree t, bool *neg, tree *inv) |
| { |
| bool neg_; |
| tree inv_; |
| |
| *inv = NULL_TREE; |
| *neg = false; |
| |
| if (TREE_CODE (t) == PLUS_EXPR |
| || TREE_CODE (t) == POINTER_PLUS_EXPR |
| || TREE_CODE (t) == MINUS_EXPR) |
| { |
| if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) |
| { |
| neg_ = (TREE_CODE (t) == MINUS_EXPR); |
| inv_ = TREE_OPERAND (t, 0); |
| t = TREE_OPERAND (t, 1); |
| } |
| else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) |
| { |
| neg_ = false; |
| inv_ = TREE_OPERAND (t, 1); |
| t = TREE_OPERAND (t, 0); |
| } |
| else |
| return NULL_TREE; |
| } |
| else |
| { |
| neg_ = false; |
| inv_ = NULL_TREE; |
| } |
| |
| if (TREE_CODE (t) == NEGATE_EXPR) |
| { |
| t = TREE_OPERAND (t, 0); |
| neg_ = !neg_; |
| } |
| |
| if (TREE_CODE (t) != SSA_NAME) |
| return NULL_TREE; |
| |
| if (inv_ && TREE_OVERFLOW_P (inv_)) |
| inv_ = drop_tree_overflow (inv_); |
| |
| *neg = neg_; |
| *inv = inv_; |
| return t; |
| } |
| |
| /* The reverse operation: build a symbolic expression with TYPE |
| from symbol SYM, negated according to NEG, and invariant INV. */ |
| |
| static tree |
| build_symbolic_expr (tree type, tree sym, bool neg, tree inv) |
| { |
| const bool pointer_p = POINTER_TYPE_P (type); |
| tree t = sym; |
| |
| if (neg) |
| t = build1 (NEGATE_EXPR, type, t); |
| |
| if (integer_zerop (inv)) |
| return t; |
| |
| return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); |
| } |
| |
| /* Return |
| 1 if VAL < VAL2 |
| 0 if !(VAL < VAL2) |
| -2 if those are incomparable. */ |
| int |
| operand_less_p (tree val, tree val2) |
| { |
| /* LT is folded faster than GE and others. Inline the common case. */ |
| if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) |
| return tree_int_cst_lt (val, val2); |
| else |
| { |
| tree tcmp; |
| |
| fold_defer_overflow_warnings (); |
| |
| tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
| |
| fold_undefer_and_ignore_overflow_warnings (); |
| |
| if (!tcmp |
| || TREE_CODE (tcmp) != INTEGER_CST) |
| return -2; |
| |
| if (!integer_zerop (tcmp)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Compare two values VAL1 and VAL2. Return |
| |
| -2 if VAL1 and VAL2 cannot be compared at compile-time, |
| -1 if VAL1 < VAL2, |
| 0 if VAL1 == VAL2, |
| +1 if VAL1 > VAL2, and |
| +2 if VAL1 != VAL2 |
| |
| This is similar to tree_int_cst_compare but supports pointer values |
| and values that cannot be compared at compile time. |
| |
| If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to |
| true if the return value is only valid if we assume that signed |
| overflow is undefined. */ |
| |
| int |
| compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
| { |
| if (val1 == val2) |
| return 0; |
| |
| /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
| both integers. */ |
| gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) |
| == POINTER_TYPE_P (TREE_TYPE (val2))); |
| |
| /* Convert the two values into the same type. This is needed because |
| sizetype causes sign extension even for unsigned types. */ |
| val2 = fold_convert (TREE_TYPE (val1), val2); |
| STRIP_USELESS_TYPE_CONVERSION (val2); |
| |
| const bool overflow_undefined |
| = INTEGRAL_TYPE_P (TREE_TYPE (val1)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)); |
| tree inv1, inv2; |
| bool neg1, neg2; |
| tree sym1 = get_single_symbol (val1, &neg1, &inv1); |
| tree sym2 = get_single_symbol (val2, &neg2, &inv2); |
| |
| /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1 |
| accordingly. If VAL1 and VAL2 don't use the same name, return -2. */ |
| if (sym1 && sym2) |
| { |
| /* Both values must use the same name with the same sign. */ |
| if (sym1 != sym2 || neg1 != neg2) |
| return -2; |
| |
| /* [-]NAME + CST == [-]NAME + CST. */ |
| if (inv1 == inv2) |
| return 0; |
| |
| /* If overflow is defined we cannot simplify more. */ |
| if (!overflow_undefined) |
| return -2; |
| |
| if (strict_overflow_p != NULL |
| /* Symbolic range building sets TREE_NO_WARNING to declare |
| that overflow doesn't happen. */ |
| && (!inv1 || !TREE_NO_WARNING (val1)) |
| && (!inv2 || !TREE_NO_WARNING (val2))) |
| *strict_overflow_p = true; |
| |
| if (!inv1) |
| inv1 = build_int_cst (TREE_TYPE (val1), 0); |
| if (!inv2) |
| inv2 = build_int_cst (TREE_TYPE (val2), 0); |
| |
| return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2), |
| TYPE_SIGN (TREE_TYPE (val1))); |
| } |
| |
| const bool cst1 = is_gimple_min_invariant (val1); |
| const bool cst2 = is_gimple_min_invariant (val2); |
| |
| /* If one is of the form '[-]NAME + CST' and the other is constant, then |
| it might be possible to say something depending on the constants. */ |
| if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1)) |
| { |
| if (!overflow_undefined) |
| return -2; |
| |
| if (strict_overflow_p != NULL |
| /* Symbolic range building sets TREE_NO_WARNING to declare |
| that overflow doesn't happen. */ |
| && (!sym1 || !TREE_NO_WARNING (val1)) |
| && (!sym2 || !TREE_NO_WARNING (val2))) |
| *strict_overflow_p = true; |
| |
| const signop sgn = TYPE_SIGN (TREE_TYPE (val1)); |
| tree cst = cst1 ? val1 : val2; |
| tree inv = cst1 ? inv2 : inv1; |
| |
| /* Compute the difference between the constants. If it overflows or |
| underflows, this means that we can trivially compare the NAME with |
| it and, consequently, the two values with each other. */ |
| wide_int diff = wi::to_wide (cst) - wi::to_wide (inv); |
| if (wi::cmp (0, wi::to_wide (inv), sgn) |
| != wi::cmp (diff, wi::to_wide (cst), sgn)) |
| { |
| const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn); |
| return cst1 ? res : -res; |
| } |
| |
| return -2; |
| } |
| |
| /* We cannot say anything more for non-constants. */ |
| if (!cst1 || !cst2) |
| return -2; |
| |
| if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
| { |
| /* We cannot compare overflowed values. */ |
| if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
| return -2; |
| |
| if (TREE_CODE (val1) == INTEGER_CST |
| && TREE_CODE (val2) == INTEGER_CST) |
| return tree_int_cst_compare (val1, val2); |
| |
| if (poly_int_tree_p (val1) && poly_int_tree_p (val2)) |
| { |
| if (known_eq (wi::to_poly_widest (val1), |
| wi::to_poly_widest (val2))) |
| return 0; |
| if (known_lt (wi::to_poly_widest (val1), |
| wi::to_poly_widest (val2))) |
| return -1; |
| if (known_gt (wi::to_poly_widest (val1), |
| wi::to_poly_widest (val2))) |
| return 1; |
| } |
| |
| return -2; |
| } |
| else |
| { |
| tree t; |
| |
| /* First see if VAL1 and VAL2 are not the same. */ |
| if (val1 == val2 || operand_equal_p (val1, val2, 0)) |
| return 0; |
| |
| /* If VAL1 is a lower address than VAL2, return -1. */ |
| if (operand_less_p (val1, val2) == 1) |
| return -1; |
| |
| /* If VAL1 is a higher address than VAL2, return +1. */ |
| if (operand_less_p (val2, val1) == 1) |
| return 1; |
| |
| /* If VAL1 is different than VAL2, return +2. |
| For integer constants we either have already returned -1 or 1 |
| or they are equivalent. We still might succeed in proving |
| something about non-trivial operands. */ |
| if (TREE_CODE (val1) != INTEGER_CST |
| || TREE_CODE (val2) != INTEGER_CST) |
| { |
| t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); |
| if (t && integer_onep (t)) |
| return 2; |
| } |
| |
| return -2; |
| } |
| } |
| |
| /* Compare values like compare_values_warnv. */ |
| |
| int |
| compare_values (tree val1, tree val2) |
| { |
| bool sop; |
| return compare_values_warnv (val1, val2, &sop); |
| } |
| |
| |
| /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |
| 0 if VAL is not inside [MIN, MAX], |
| -2 if we cannot tell either way. |
| |
| Benchmark compile/20001226-1.c compilation time after changing this |
| function. */ |
| |
| int |
| value_inside_range (tree val, tree min, tree max) |
| { |
| int cmp1, cmp2; |
| |
| cmp1 = operand_less_p (val, min); |
| if (cmp1 == -2) |
| return -2; |
| if (cmp1 == 1) |
| return 0; |
| |
| cmp2 = operand_less_p (max, val); |
| if (cmp2 == -2) |
| return -2; |
| |
| return !cmp2; |
| } |
| |
| |
| /* Return TRUE if *VR includes the value X. */ |
| |
| bool |
| range_includes_p (const value_range_base *vr, HOST_WIDE_INT x) |
| { |
| if (vr->varying_p () || vr->undefined_p ()) |
| return true; |
| return vr->may_contain_p (build_int_cst (vr->type (), x)); |
| } |
| |
| /* If *VR has a value range that is a single constant value return that, |
| otherwise return NULL_TREE. |
| |
| ?? This actually returns TRUE for [&x, &x], so perhaps "constant" |
| is not the best name. */ |
| |
| tree |
| value_range_constant_singleton (const value_range_base *vr) |
| { |
| tree result = NULL; |
| if (vr->singleton_p (&result)) |
| return result; |
| return NULL; |
| } |
| |
| /* Value range wrapper for wide_int_range_set_zero_nonzero_bits. |
| |
| Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR. |
| |
| Return TRUE if VR was a constant range and we were able to compute |
| the bit masks. */ |
| |
| bool |
| vrp_set_zero_nonzero_bits (const tree expr_type, |
| const value_range_base *vr, |
| wide_int *may_be_nonzero, |
| wide_int *must_be_nonzero) |
| { |
| if (!range_int_cst_p (vr)) |
| { |
| *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
| *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); |
| return false; |
| } |
| wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type), |
| wi::to_wide (vr->min ()), |
| wi::to_wide (vr->max ()), |
| *may_be_nonzero, *must_be_nonzero); |
| return true; |
| } |
| |
| /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
| so that *VR0 U *VR1 == *AR. Returns true if that is possible, |
| false otherwise. If *AR can be represented with a single range |
| *VR1 will be VR_UNDEFINED. */ |
| |
| static bool |
| ranges_from_anti_range (const value_range_base *ar, |
| value_range_base *vr0, value_range_base *vr1) |
| { |
| tree type = ar->type (); |
| |
| vr0->set_undefined (); |
| vr1->set_undefined (); |
| |
| /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U |
| [A+1, +INF]. Not sure if this helps in practice, though. */ |
| |
| if (ar->kind () != VR_ANTI_RANGE |
| || TREE_CODE (ar->min ()) != INTEGER_CST |
| || TREE_CODE (ar->max ()) != INTEGER_CST |
| || !vrp_val_min (type) |
| || !vrp_val_max (type)) |
| return false; |
| |
| if (tree_int_cst_lt (vrp_val_min (type), ar->min ())) |
| vr0->set (VR_RANGE, |
| vrp_val_min (type), |
| wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1)); |
| if (tree_int_cst_lt (ar->max (), vrp_val_max (type))) |
| vr1->set (VR_RANGE, |
| wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1), |
| vrp_val_max (type)); |
| if (vr0->undefined_p ()) |
| { |
| *vr0 = *vr1; |
| vr1->set_undefined (); |
| } |
| |
| return !vr0->undefined_p (); |
| } |
| |
| /* Extract the components of a value range into a pair of wide ints in |
| [WMIN, WMAX]. |
| |
| If the value range is anything but a VR_*RANGE of constants, the |
| resulting wide ints are set to [-MIN, +MAX] for the type. */ |
| |
| static void inline |
| extract_range_into_wide_ints (const value_range_base *vr, |
| signop sign, unsigned prec, |
| wide_int &wmin, wide_int &wmax) |
| { |
| gcc_assert (vr->kind () != VR_ANTI_RANGE || vr->symbolic_p ()); |
| if (range_int_cst_p (vr)) |
| { |
| wmin = wi::to_wide (vr->min ()); |
| wmax = wi::to_wide (vr->max ()); |
| } |
| else |
| { |
| wmin = wi::min_value (prec, sign); |
| wmax = wi::max_value (prec, sign); |
| } |
| } |
| |
| /* Value range wrapper for wide_int_range_multiplicative_op: |
| |
| *VR = *VR0 .CODE. *VR1. */ |
| |
| static void |
| extract_range_from_multiplicative_op (value_range_base *vr, |
| enum tree_code code, |
| const value_range_base *vr0, |
| const value_range_base *vr1) |
| { |
| gcc_assert (code == MULT_EXPR |
| || code == TRUNC_DIV_EXPR |
| || code == FLOOR_DIV_EXPR |
| || code == CEIL_DIV_EXPR |
| || code == EXACT_DIV_EXPR |
| || code == ROUND_DIV_EXPR |
| || code == RSHIFT_EXPR |
| || code == LSHIFT_EXPR); |
| gcc_assert (vr0->kind () == VR_RANGE |
| && vr0->kind () == vr1->kind ()); |
| |
| tree type = vr0->type (); |
| wide_int res_lb, res_ub; |
| wide_int vr0_lb = wi::to_wide (vr0->min ()); |
| wide_int vr0_ub = wi::to_wide (vr0->max ()); |
| wide_int vr1_lb = wi::to_wide (vr1->min ()); |
| wide_int vr1_ub = wi::to_wide (vr1->max ()); |
| bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type); |
| unsigned prec = TYPE_PRECISION (type); |
| |
| if (wide_int_range_multiplicative_op (res_lb, res_ub, |
| code, TYPE_SIGN (type), prec, |
| vr0_lb, vr0_ub, vr1_lb, vr1_ub, |
| overflow_undefined)) |
| vr->set_and_canonicalize (VR_RANGE, |
| wide_int_to_tree (type, res_lb), |
| wide_int_to_tree (type, res_ub)); |
| else |
| vr->set_varying (); |
| } |
| |
| /* If BOUND will include a symbolic bound, adjust it accordingly, |
| otherwise leave it as is. |
| |
| CODE is the original operation that combined the bounds (PLUS_EXPR |
| or MINUS_EXPR). |
| |
| TYPE is the type of the original operation. |
| |
| SYM_OPn is the symbolic for OPn if it has a symbolic. |
| |
| NEG_OPn is TRUE if the OPn was negated. */ |
| |
| static void |
| adjust_symbolic_bound (tree &bound, enum tree_code code, tree type, |
| tree sym_op0, tree sym_op1, |
| bool neg_op0, bool neg_op1) |
| { |
| bool minus_p = (code == MINUS_EXPR); |
| /* If the result bound is constant, we're done; otherwise, build the |
| symbolic lower bound. */ |
| if (sym_op0 == sym_op1) |
| ; |
| else if (sym_op0) |
| bound = build_symbolic_expr (type, sym_op0, |
| neg_op0, bound); |
| else if (sym_op1) |
| { |
| /* We may not negate if that might introduce |
| undefined overflow. */ |
| if (!minus_p |
| || neg_op1 |
| || TYPE_OVERFLOW_WRAPS (type)) |
| bound = build_symbolic_expr (type, sym_op1, |
| neg_op1 ^ minus_p, bound); |
| else |
| bound = NULL_TREE; |
| } |
| } |
| |
| /* Combine OP1 and OP1, which are two parts of a bound, into one wide |
| int bound according to CODE. CODE is the operation combining the |
| bound (either a PLUS_EXPR or a MINUS_EXPR). |
| |
| TYPE is the type of the combine operation. |
| |
| WI is the wide int to store the result. |
| |
| OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0 |
| if over/underflow occurred. */ |
| |
| static void |
| combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf, |
| tree type, tree op0, tree op1) |
| { |
| bool minus_p = (code == MINUS_EXPR); |
| const signop sgn = TYPE_SIGN (type); |
| const unsigned int prec = TYPE_PRECISION (type); |
| |
| /* Combine the bounds, if any. */ |
| if (op0 && op1) |
| { |
| if (minus_p) |
| wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf); |
| else |
| wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf); |
| } |
| else if (op0) |
| wi = wi::to_wide (op0); |
| else if (op1) |
| { |
| if (minus_p) |
| wi = wi::neg (wi::to_wide (op1), &ovf); |
| else |
| wi = wi::to_wide (op1); |
| } |
| else |
| wi = wi::shwi (0, prec); |
| } |
| |
| /* Given a range in [WMIN, WMAX], adjust it for possible overflow and |
| put the result in VR. |
| |
| TYPE is the type of the range. |
| |
| MIN_OVF and MAX_OVF indicate what type of overflow, if any, |
| occurred while originally calculating WMIN or WMAX. -1 indicates |
| underflow. +1 indicates overflow. 0 indicates neither. */ |
| |
| static void |
| set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max, |
| tree type, |
| const wide_int &wmin, const wide_int &wmax, |
| wi::overflow_type min_ovf, |
| wi::overflow_type max_ovf) |
| { |
| const signop sgn = TYPE_SIGN (type); |
| const unsigned int prec = TYPE_PRECISION (type); |
| |
| /* For one bit precision if max < min, then the swapped |
| range covers all values. */ |
| if (prec == 1 && wi::lt_p (wmax, wmin, sgn)) |
| { |
| kind = VR_VARYING; |
| return; |
| } |
| |
| if (TYPE_OVERFLOW_WRAPS (type)) |
| { |
| /* If overflow wraps, truncate the values and adjust the |
| range kind and bounds appropriately. */ |
| wide_int tmin = wide_int::from (wmin, prec, sgn); |
| wide_int tmax = wide_int::from (wmax, prec, sgn); |
| if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE)) |
| { |
| /* If the limits are swapped, we wrapped around and cover |
| the entire range. We have a similar check at the end of |
| extract_range_from_binary_expr. */ |
| if (wi::gt_p (tmin, tmax, sgn)) |
| kind = VR_VARYING; |
| else |
| { |
| kind = VR_RANGE; |
| /* No overflow or both overflow or underflow. The |
| range kind stays VR_RANGE. */ |
| min = wide_int_to_tree (type, tmin); |
| max = wide_int_to_tree (type, tmax); |
| } |
| return; |
| } |
| else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE) |
| || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE)) |
| { |
| /* Min underflow or max overflow. The range kind |
| changes to VR_ANTI_RANGE. */ |
| bool covers = false; |
| wide_int tem = tmin; |
| tmin = tmax + 1; |
| if (wi::cmp (tmin, tmax, sgn) < 0) |
| covers = true; |
| tmax = tem - 1; |
| if (wi::cmp (tmax, tem, sgn) > 0) |
| covers = true; |
| /* If the anti-range would cover nothing, drop to varying. |
| Likewise if the anti-range bounds are outside of the |
| types values. */ |
| if (covers || wi::cmp (tmin, tmax, sgn) > 0) |
| { |
| kind = VR_VARYING; |
| return; |
| } |
| kind = VR_ANTI_RANGE; |
| min = wide_int_to_tree (type, tmin); |
| max = wide_int_to_tree (type, tmax); |
| return; |
| } |
| else |
| { |
| /* Other underflow and/or overflow, drop to VR_VARYING. */ |
| kind = VR_VARYING; |
| return; |
| } |
| } |
| else |
| { |
| /* If overflow does not wrap, saturate to the types min/max |
| value. */ |
| wide_int type_min = wi::min_value (prec, sgn); |
| wide_int type_max = wi::max_value (prec, sgn); |
| kind = VR_RANGE; |
| if (min_ovf == wi::OVF_UNDERFLOW) |
| min = wide_int_to_tree (type, type_min); |
| else if (min_ovf == wi::OVF_OVERFLOW) |
| min = wide_int_to_tree (type, type_max); |
| else |
| min = wide_int_to_tree (type, wmin); |
| |
| if (max_ovf == wi::OVF_UNDERFLOW) |
| max = wide_int_to_tree (type, type_min); |
| else if (max_ovf == wi::OVF_OVERFLOW) |
| max = wide_int_to_tree (type, type_max); |
| else |
| max = wide_int_to_tree (type, wmax); |
| } |
| } |
| |
| /* Extract range information from a binary operation CODE based on |
| the ranges of each of its operands *VR0 and *VR1 with resulting |
| type EXPR_TYPE. The resulting range is stored in *VR. */ |
| |
| void |
| extract_range_from_binary_expr (value_range_base *vr, |
| enum tree_code code, tree expr_type, |
| const value_range_base *vr0_, |
| const value_range_base *vr1_) |
| { |
| signop sign = TYPE_SIGN (expr_type); |
| unsigned int prec = TYPE_PRECISION (expr_type); |
| value_range_base vr0 = *vr0_, vr1 = *vr1_; |
| value_range_base vrtem0, vrtem1; |
| enum value_range_kind type; |
| tree min = NULL_TREE, max = NULL_TREE; |
| int cmp; |
| |
| if (!INTEGRAL_TYPE_P (expr_type) |
| && !POINTER_TYPE_P (expr_type)) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* Not all binary expressions can be applied to ranges in a |
| meaningful way. Handle only arithmetic operations. */ |
| if (code != PLUS_EXPR |
| && code != MINUS_EXPR |
| && code != POINTER_PLUS_EXPR |
| && code != MULT_EXPR |
| && code != TRUNC_DIV_EXPR |
| && code != FLOOR_DIV_EXPR |
| && code != CEIL_DIV_EXPR |
| && code != EXACT_DIV_EXPR |
| && code != ROUND_DIV_EXPR |
| && code != TRUNC_MOD_EXPR |
| && code != RSHIFT_EXPR |
| && code != LSHIFT_EXPR |
| && code != MIN_EXPR |
| && code != MAX_EXPR |
| && code != BIT_AND_EXPR |
| && code != BIT_IOR_EXPR |
| && code != BIT_XOR_EXPR) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* If both ranges are UNDEFINED, so is the result. */ |
| if (vr0.undefined_p () && vr1.undefined_p ()) |
| { |
| vr->set_undefined (); |
| return; |
| } |
| /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
| code. At some point we may want to special-case operations that |
| have UNDEFINED result for all or some value-ranges of the not UNDEFINED |
| operand. */ |
| else if (vr0.undefined_p ()) |
| vr0.set_varying (); |
| else if (vr1.undefined_p ()) |
| vr1.set_varying (); |
| |
| /* We get imprecise results from ranges_from_anti_range when |
| code is EXACT_DIV_EXPR. We could mask out bits in the resulting |
| range, but then we also need to hack up vrp_union. It's just |
| easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ |
| if (code == EXACT_DIV_EXPR && range_is_nonnull (&vr0)) |
| { |
| vr->set_nonnull (expr_type); |
| return; |
| } |
| |
| /* Now canonicalize anti-ranges to ranges when they are not symbolic |
| and express ~[] op X as ([]' op X) U ([]'' op X). */ |
| if (vr0.kind () == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_binary_expr (vr, code, expr_type, &vrtem0, vr1_); |
| if (!vrtem1.undefined_p ()) |
| { |
| value_range_base vrres; |
| extract_range_from_binary_expr (&vrres, code, expr_type, |
| &vrtem1, vr1_); |
| vr->union_ (&vrres); |
| } |
| return; |
| } |
| /* Likewise for X op ~[]. */ |
| if (vr1.kind () == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_binary_expr (vr, code, expr_type, vr0_, &vrtem0); |
| if (!vrtem1.undefined_p ()) |
| { |
| value_range_base vrres; |
| extract_range_from_binary_expr (&vrres, code, expr_type, |
| vr0_, &vrtem1); |
| vr->union_ (&vrres); |
| } |
| return; |
| } |
| |
| /* The type of the resulting value range defaults to VR0.TYPE. */ |
| type = vr0.kind (); |
| |
| /* Refuse to operate on VARYING ranges, ranges of different kinds |
| and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |
| because we may be able to derive a useful range even if one of |
| the operands is VR_VARYING or symbolic range. Similarly for |
| divisions, MIN/MAX and PLUS/MINUS. |
| |
| TODO, we may be able to derive anti-ranges in some cases. */ |
| if (code != BIT_AND_EXPR |
| && code != BIT_IOR_EXPR |
| && code != TRUNC_DIV_EXPR |
| && code != FLOOR_DIV_EXPR |
| && code != CEIL_DIV_EXPR |
| && code != EXACT_DIV_EXPR |
| && code != ROUND_DIV_EXPR |
| && code != TRUNC_MOD_EXPR |
| && code != MIN_EXPR |
| && code != MAX_EXPR |
| && code != PLUS_EXPR |
| && code != MINUS_EXPR |
| && code != RSHIFT_EXPR |
| && code != POINTER_PLUS_EXPR |
| && (vr0.varying_p () |
| || vr1.varying_p () |
| || vr0.kind () != vr1.kind () |
| || vr0.symbolic_p () |
| || vr1.symbolic_p ())) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* Now evaluate the expression to determine the new range. */ |
| if (POINTER_TYPE_P (expr_type)) |
| { |
| if (code == MIN_EXPR || code == MAX_EXPR) |
| { |
| /* For MIN/MAX expressions with pointers, we only care about |
| nullness, if both are non null, then the result is nonnull. |
| If both are null, then the result is null. Otherwise they |
| are varying. */ |
| if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1)) |
| vr->set_nonnull (expr_type); |
| else if (range_is_null (&vr0) && range_is_null (&vr1)) |
| vr->set_null (expr_type); |
| else |
| vr->set_varying (); |
| } |
| else if (code == POINTER_PLUS_EXPR) |
| { |
| /* For pointer types, we are really only interested in asserting |
| whether the expression evaluates to non-NULL. |
| With -fno-delete-null-pointer-checks we need to be more |
| conservative. As some object might reside at address 0, |
| then some offset could be added to it and the same offset |
| subtracted again and the result would be NULL. |
| E.g. |
| static int a[12]; where &a[0] is NULL and |
| ptr = &a[6]; |
| ptr -= 6; |
| ptr will be NULL here, even when there is POINTER_PLUS_EXPR |
| where the first range doesn't include zero and the second one |
| doesn't either. As the second operand is sizetype (unsigned), |
| consider all ranges where the MSB could be set as possible |
| subtractions where the result might be NULL. */ |
| if ((!range_includes_zero_p (&vr0) |
| || !range_includes_zero_p (&vr1)) |
| && !TYPE_OVERFLOW_WRAPS (expr_type) |
| && (flag_delete_null_pointer_checks |
| || (range_int_cst_p (&vr1) |
| && !tree_int_cst_sign_bit (vr1.max ())))) |
| vr->set_nonnull (expr_type); |
| else if (range_is_null (&vr0) && range_is_null (&vr1)) |
| vr->set_null (expr_type); |
| else |
| vr->set_varying (); |
| } |
| else if (code == BIT_AND_EXPR) |
| { |
| /* For pointer types, we are really only interested in asserting |
| whether the expression evaluates to non-NULL. */ |
| if (range_is_null (&vr0) || range_is_null (&vr1)) |
| vr->set_null (expr_type); |
| else |
| vr->set_varying (); |
| } |
| else |
| vr->set_varying (); |
| |
| return; |
| } |
| |
| /* For integer ranges, apply the operation to each end of the |
| range and see what we end up with. */ |
| if (code == PLUS_EXPR || code == MINUS_EXPR) |
| { |
| /* This will normalize things such that calculating |
| [0,0] - VR_VARYING is not dropped to varying, but is |
| calculated as [MIN+1, MAX]. */ |
| if (vr0.varying_p ()) |
| vr0.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type)); |
| if (vr1.varying_p ()) |
| vr1.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type)); |
| |
| const bool minus_p = (code == MINUS_EXPR); |
| tree min_op0 = vr0.min (); |
| tree min_op1 = minus_p ? vr1.max () : vr1.min (); |
| tree max_op0 = vr0.max (); |
| tree max_op1 = minus_p ? vr1.min () : vr1.max (); |
| tree sym_min_op0 = NULL_TREE; |
| tree sym_min_op1 = NULL_TREE; |
| tree sym_max_op0 = NULL_TREE; |
| tree sym_max_op1 = NULL_TREE; |
| bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; |
| |
| neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false; |
| |
| /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or |
| single-symbolic ranges, try to compute the precise resulting range, |
| but only if we know that this resulting range will also be constant |
| or single-symbolic. */ |
| if (vr0.kind () == VR_RANGE && vr1.kind () == VR_RANGE |
| && (TREE_CODE (min_op0) == INTEGER_CST |
| || (sym_min_op0 |
| = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) |
| && (TREE_CODE (min_op1) == INTEGER_CST |
| || (sym_min_op1 |
| = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) |
| && (!(sym_min_op0 && sym_min_op1) |
| || (sym_min_op0 == sym_min_op1 |
| && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) |
| && (TREE_CODE (max_op0) == INTEGER_CST |
| || (sym_max_op0 |
| = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) |
| && (TREE_CODE (max_op1) == INTEGER_CST |
| || (sym_max_op1 |
| = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) |
| && (!(sym_max_op0 && sym_max_op1) |
| || (sym_max_op0 == sym_max_op1 |
| && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) |
| { |
| wide_int wmin, wmax; |
| wi::overflow_type min_ovf = wi::OVF_NONE; |
| wi::overflow_type max_ovf = wi::OVF_NONE; |
| |
| /* Build the bounds. */ |
| combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1); |
| combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1); |
| |
| /* If the resulting range will be symbolic, we need to eliminate any |
| explicit or implicit overflow introduced in the above computation |
| because compare_values could make an incorrect use of it. That's |
| why we require one of the ranges to be a singleton. */ |
| if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1) |
| && ((bool)min_ovf || (bool)max_ovf |
| || (min_op0 != max_op0 && min_op1 != max_op1))) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* Adjust the range for possible overflow. */ |
| set_value_range_with_overflow (type, min, max, expr_type, |
| wmin, wmax, min_ovf, max_ovf); |
| if (type == VR_VARYING) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* Build the symbolic bounds if needed. */ |
| adjust_symbolic_bound (min, code, expr_type, |
| sym_min_op0, sym_min_op1, |
| neg_min_op0, neg_min_op1); |
| adjust_symbolic_bound (max, code, expr_type, |
| sym_max_op0, sym_max_op1, |
| neg_max_op0, neg_max_op1); |
| } |
| else |
| { |
| /* For other cases, for example if we have a PLUS_EXPR with two |
| VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort |
| to compute a precise range for such a case. |
| ??? General even mixed range kind operations can be expressed |
| by for example transforming ~[3, 5] + [1, 2] to range-only |
| operations and a union primitive: |
| [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] |
| [-INF+1, 4] U [6, +INF(OVF)] |
| though usually the union is not exactly representable with |
| a single range or anti-range as the above is |
| [-INF+1, +INF(OVF)] intersected with ~[5, 5] |
| but one could use a scheme similar to equivalences for this. */ |
| vr->set_varying (); |
| return; |
| } |
| } |
| else if (code == MIN_EXPR |
| || code == MAX_EXPR) |
| { |
| wide_int wmin, wmax; |
| wide_int vr0_min, vr0_max; |
| wide_int vr1_min, vr1_max; |
| extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
| extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); |
| if (wide_int_range_min_max (wmin, wmax, code, sign, prec, |
| vr0_min, vr0_max, vr1_min, vr1_max)) |
| vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin), |
| wide_int_to_tree (expr_type, wmax)); |
| else |
| vr->set_varying (); |
| return; |
| } |
| else if (code == MULT_EXPR) |
| { |
| if (!range_int_cst_p (&vr0) |
| || !range_int_cst_p (&vr1)) |
| { |
| vr->set_varying (); |
| return; |
| } |
| extract_range_from_multiplicative_op (vr, code, &vr0, &vr1); |
| return; |
| } |
| else if (code == RSHIFT_EXPR |
| || code == LSHIFT_EXPR) |
| { |
| if (range_int_cst_p (&vr1) |
| && !wide_int_range_shift_undefined_p |
| (TYPE_SIGN (TREE_TYPE (vr1.min ())), |
| prec, |
| wi::to_wide (vr1.min ()), |
| wi::to_wide (vr1.max ()))) |
| { |
| if (code == RSHIFT_EXPR) |
| { |
| /* Even if vr0 is VARYING or otherwise not usable, we can derive |
| useful ranges just from the shift count. E.g. |
| x >> 63 for signed 64-bit x is always [-1, 0]. */ |
| if (vr0.kind () != VR_RANGE || vr0.symbolic_p ()) |
| vr0.set (VR_RANGE, vrp_val_min (expr_type), |
| vrp_val_max (expr_type)); |
| extract_range_from_multiplicative_op (vr, code, &vr0, &vr1); |
| return; |
| } |
| else if (code == LSHIFT_EXPR |
| && range_int_cst_p (&vr0)) |
| { |
| wide_int res_lb, res_ub; |
| if (wide_int_range_lshift (res_lb, res_ub, sign, prec, |
| wi::to_wide (vr0.min ()), |
| wi::to_wide (vr0.max ()), |
| wi::to_wide (vr1.min ()), |
| wi::to_wide (vr1.max ()), |
| TYPE_OVERFLOW_UNDEFINED (expr_type))) |
| { |
| min = wide_int_to_tree (expr_type, res_lb); |
| max = wide_int_to_tree (expr_type, res_ub); |
| vr->set_and_canonicalize (VR_RANGE, min, max); |
| return; |
| } |
| } |
| } |
| vr->set_varying (); |
| return; |
| } |
| else if (code == TRUNC_DIV_EXPR |
| || code == FLOOR_DIV_EXPR |
| || code == CEIL_DIV_EXPR |
| || code == EXACT_DIV_EXPR |
| || code == ROUND_DIV_EXPR) |
| { |
| wide_int dividend_min, dividend_max, divisor_min, divisor_max; |
| wide_int wmin, wmax, extra_min, extra_max; |
| bool extra_range_p; |
| |
| /* Special case explicit division by zero as undefined. */ |
| if (range_is_null (&vr1)) |
| { |
| vr->set_undefined (); |
| return; |
| } |
| |
| /* First, normalize ranges into constants we can handle. Note |
| that VR_ANTI_RANGE's of constants were already normalized |
| before arriving here. |
| |
| NOTE: As a future improvement, we may be able to do better |
| with mixed symbolic (anti-)ranges like [0, A]. See note in |
| ranges_from_anti_range. */ |
| extract_range_into_wide_ints (&vr0, sign, prec, |
| dividend_min, dividend_max); |
| extract_range_into_wide_ints (&vr1, sign, prec, |
| divisor_min, divisor_max); |
| if (!wide_int_range_div (wmin, wmax, code, sign, prec, |
| dividend_min, dividend_max, |
| divisor_min, divisor_max, |
| TYPE_OVERFLOW_UNDEFINED (expr_type), |
| extra_range_p, extra_min, extra_max)) |
| { |
| vr->set_varying (); |
| return; |
| } |
| vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin), |
| wide_int_to_tree (expr_type, wmax)); |
| if (extra_range_p) |
| { |
| value_range_base |
| extra_range (VR_RANGE, wide_int_to_tree (expr_type, extra_min), |
| wide_int_to_tree (expr_type, extra_max)); |
| vr->union_ (&extra_range); |
| } |
| return; |
| } |
| else if (code == TRUNC_MOD_EXPR) |
| { |
| if (range_is_null (&vr1)) |
| { |
| vr->set_undefined (); |
| return; |
| } |
| wide_int wmin, wmax, tmp; |
| wide_int vr0_min, vr0_max, vr1_min, vr1_max; |
| extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
| extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); |
| wide_int_range_trunc_mod (wmin, wmax, sign, prec, |
| vr0_min, vr0_max, vr1_min, vr1_max); |
| min = wide_int_to_tree (expr_type, wmin); |
| max = wide_int_to_tree (expr_type, wmax); |
| vr->set (VR_RANGE, min, max); |
| return; |
| } |
| else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |
| { |
| wide_int may_be_nonzero0, may_be_nonzero1; |
| wide_int must_be_nonzero0, must_be_nonzero1; |
| wide_int wmin, wmax; |
| wide_int vr0_min, vr0_max, vr1_min, vr1_max; |
| vrp_set_zero_nonzero_bits (expr_type, &vr0, |
| &may_be_nonzero0, &must_be_nonzero0); |
| vrp_set_zero_nonzero_bits (expr_type, &vr1, |
| &may_be_nonzero1, &must_be_nonzero1); |
| extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
| extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); |
| if (code == BIT_AND_EXPR) |
| { |
| if (wide_int_range_bit_and (wmin, wmax, sign, prec, |
| vr0_min, vr0_max, |
| vr1_min, vr1_max, |
| must_be_nonzero0, |
| may_be_nonzero0, |
| must_be_nonzero1, |
| may_be_nonzero1)) |
| { |
| min = wide_int_to_tree (expr_type, wmin); |
| max = wide_int_to_tree (expr_type, wmax); |
| vr->set (VR_RANGE, min, max); |
| } |
| else |
| vr->set_varying (); |
| return; |
| } |
| else if (code == BIT_IOR_EXPR) |
| { |
| if (wide_int_range_bit_ior (wmin, wmax, sign, |
| vr0_min, vr0_max, |
| vr1_min, vr1_max, |
| must_be_nonzero0, |
| may_be_nonzero0, |
| must_be_nonzero1, |
| may_be_nonzero1)) |
| { |
| min = wide_int_to_tree (expr_type, wmin); |
| max = wide_int_to_tree (expr_type, wmax); |
| vr->set (VR_RANGE, min, max); |
| } |
| else |
| vr->set_varying (); |
| return; |
| } |
| else if (code == BIT_XOR_EXPR) |
| { |
| if (wide_int_range_bit_xor (wmin, wmax, sign, prec, |
| must_be_nonzero0, |
| may_be_nonzero0, |
| must_be_nonzero1, |
| may_be_nonzero1)) |
| { |
| min = wide_int_to_tree (expr_type, wmin); |
| max = wide_int_to_tree (expr_type, wmax); |
| vr->set (VR_RANGE, min, max); |
| } |
| else |
| vr->set_varying (); |
| return; |
| } |
| } |
| else |
| gcc_unreachable (); |
| |
| /* If either MIN or MAX overflowed, then set the resulting range to |
| VARYING. */ |
| if (min == NULL_TREE |
| || TREE_OVERFLOW_P (min) |
| || max == NULL_TREE |
| || TREE_OVERFLOW_P (max)) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* We punt for [-INF, +INF]. |
| We learn nothing when we have INF on both sides. |
| Note that we do accept [-INF, -INF] and [+INF, +INF]. */ |
| if (vrp_val_is_min (min) && vrp_val_is_max (max)) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| cmp = compare_values (min, max); |
| if (cmp == -2 || cmp == 1) |
| { |
| /* If the new range has its limits swapped around (MIN > MAX), |
| then the operation caused one of them to wrap around, mark |
| the new range VARYING. */ |
| vr->set_varying (); |
| } |
| else |
| vr->set (type, min, max); |
| } |
| |
| /* Extract range information from a unary operation CODE based on |
| the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. |
| The resulting range is stored in *VR. */ |
| |
| void |
| extract_range_from_unary_expr (value_range_base *vr, |
| enum tree_code code, tree type, |
| const value_range_base *vr0_, tree op0_type) |
| { |
| signop sign = TYPE_SIGN (type); |
| unsigned int prec = TYPE_PRECISION (type); |
| value_range_base vr0 = *vr0_; |
| value_range_base vrtem0, vrtem1; |
| |
| /* VRP only operates on integral and pointer types. */ |
| if (!(INTEGRAL_TYPE_P (op0_type) |
| || POINTER_TYPE_P (op0_type)) |
| || !(INTEGRAL_TYPE_P (type) |
| || POINTER_TYPE_P (type))) |
| { |
| vr->set_varying (); |
| return; |
| } |
| |
| /* If VR0 is UNDEFINED, so is the result. */ |
| if (vr0.undefined_p ()) |
| { |
| vr->set_undefined (); |
| return; |
| } |
| |
| /* Handle operations that we express in terms of others. */ |
| if (code == PAREN_EXPR) |
| { |
| /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |
| *vr = vr0; |
| return; |
| } |
| else if (code == NEGATE_EXPR) |
| { |
| /* -X is simply 0 - X, so re-use existing code that also handles |
| anti-ranges fine. */ |
| value_range_base zero; |
| zero.set (build_int_cst (type, 0)); |
| extract_range_from_binary_expr (vr, MINUS_EXPR, type, &zero, &vr0); |
| return; |
| } |
| else if (code == BIT_NOT_EXPR) |
| { |
| /* ~X is simply -1 - X, so re-use existing code that also handles |
| anti-ranges fine. */ |
| value_range_base minusone; |
| minusone.set (build_int_cst (type, -1)); |
| extract_range_from_binary_expr (vr, MINUS_EXPR, type, &minusone, &vr0); |
| return; |
| } |
| |
| /* Now canonicalize anti-ranges to ranges when they are not symbolic |
| and express op ~[] as (op []') U (op []''). */ |
| if (vr0.kind () == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type); |
| if (!vrtem1.undefined_p ()) |
| { |
| value_range_base vrres; |
| extract_range_from_unary_expr (&vrres, code, type, |
| &vrtem1, op0_type); |
| vr->union_ (&vrres); |
| } |
| return; |
| } |
| |
| if (CONVERT_EXPR_CODE_P (code)) |
| { |
| tree inner_type = op0_type; |
| tree outer_type = type; |
| |
| /* If the expression involves a pointer, we are only interested in |
| determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). |
| |
| This may lose precision when converting (char *)~[0,2] to |
| int, because we'll forget that the pointer can also not be 1 |
| or 2. In practice we don't care, as this is some idiot |
| storing a magic constant to a pointer. */ |
| if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type)) |
| { |
| if (!range_includes_zero_p (&vr0)) |
| vr->set_nonnull (type); |
| else if (range_is_null (&vr0)) |
| vr->set_null (type); |
| else |
| vr->set_varying (); |
| return; |
| } |
| |
| /* The POINTER_TYPE_P code above will have dealt with all |
| pointer anti-ranges. Any remaining anti-ranges at this point |
| will be integer conversions from SSA names that will be |
| normalized into VARYING. For instance: ~[x_55, x_55]. */ |
| gcc_assert (vr0.kind () != VR_ANTI_RANGE |
| || TREE_CODE (vr0.min ()) != INTEGER_CST); |
| |
| /* NOTES: Previously we were returning VARYING for all symbolics, but |
| we can do better by treating them as [-MIN, +MAX]. For |
| example, converting [SYM, SYM] from INT to LONG UNSIGNED, |
| we can return: ~[0x8000000, 0xffffffff7fffffff]. |
| |
| We were also failing to convert ~[0,0] from char* to unsigned, |
| instead choosing to return VR_VARYING. Now we return ~[0,0]. */ |
| wide_int vr0_min, vr0_max, wmin, wmax; |
| signop inner_sign = TYPE_SIGN (inner_type); |
| signop outer_sign = TYPE_SIGN (outer_type); |
| unsigned inner_prec = TYPE_PRECISION (inner_type); |
| unsigned outer_prec = TYPE_PRECISION (outer_type); |
| extract_range_into_wide_ints (&vr0, inner_sign, inner_prec, |
| vr0_min, vr0_max); |
| if (wide_int_range_convert (wmin, wmax, |
| inner_sign, inner_prec, |
| outer_sign, outer_prec, |
| vr0_min, vr0_max)) |
| { |
| tree min = wide_int_to_tree (outer_type, wmin); |
| tree max = wide_int_to_tree (outer_type, wmax); |
| vr->set_and_canonicalize (VR_RANGE, min, max); |
| } |
| else |
| vr->set_varying (); |
| return; |
| } |
| else if (code == ABS_EXPR) |
| { |
| wide_int wmin, wmax; |
| wide_int vr0_min, vr0_max; |
| extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
| if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max, |
| TYPE_OVERFLOW_UNDEFINED (type))) |
| vr->set (VR_RANGE, wide_int_to_tree (type, wmin), |
| wide_int_to_tree (type, wmax)); |
| else |
| vr->set_varying (); |
| return; |
| } |
| else if (code == ABSU_EXPR) |
| { |
| wide_int wmin, wmax; |
| wide_int vr0_min, vr0_max; |
| extract_range_into_wide_ints (&vr0, SIGNED, prec, vr0_min, vr0_max); |
| wide_int_range_absu (wmin, wmax, prec, vr0_min, vr0_max); |
| vr->set (VR_RANGE, wide_int_to_tree (type, wmin), |
| wide_int_to_tree (type, wmax)); |
| return; |
| } |
| |
| /* For unhandled operations fall back to varying. */ |
| vr->set_varying (); |
| return; |
| } |
| |
| /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
| create a new SSA name N and return the assertion assignment |
| 'N = ASSERT_EXPR <V, V OP W>'. */ |
| |
| static gimple * |
| build_assert_expr_for (tree cond, tree v) |
| { |
| tree a; |
| gassign *assertion; |
| |
| gcc_assert (TREE_CODE (v) == SSA_NAME |
| && COMPARISON_CLASS_P (cond)); |
| |
| a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
| assertion = gimple_build_assign (NULL_TREE, a); |
| |
| /* The new ASSERT_EXPR, creates a new SSA name that replaces the |
| operand of the ASSERT_EXPR. Create it so the new name and the old one |
| are registered in the replacement table so that we can fix the SSA web |
| after adding all the ASSERT_EXPRs. */ |
| tree new_def = create_new_def_for (v, assertion, NULL); |
| /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain |
| given we have to be able to fully propagate those out to re-create |
| valid SSA when removing the asserts. */ |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v)) |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1; |
| |
| return assertion; |
| } |
| |
| |
| /* Return false if EXPR is a predicate expression involving floating |
| point values. */ |
| |
| static inline bool |
| fp_predicate (gimple *stmt) |
| { |
| GIMPLE_CHECK (stmt, GIMPLE_COND); |
| |
| return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); |
| } |
| |
| /* If the range of values taken by OP can be inferred after STMT executes, |
| return the comparison code (COMP_CODE_P) and value (VAL_P) that |
| describes the inferred range. Return true if a range could be |
| inferred. */ |
| |
| bool |
| infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) |
| { |
| *val_p = NULL_TREE; |
| *comp_code_p = ERROR_MARK; |
| |
| /* Do not attempt to infer anything in names that flow through |
| abnormal edges. */ |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) |
| return false; |
| |
| /* If STMT is the last statement of a basic block with no normal |
| successors, there is no point inferring anything about any of its |
| operands. We would not be able to find a proper insertion point |
| for the assertion, anyway. */ |
| if (stmt_ends_bb_p (stmt)) |
| { |
| edge_iterator ei; |
| edge e; |
| |
| FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
| if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH))) |
| break; |
| if (e == NULL) |
| return false; |
| } |
| |
| if (infer_nonnull_range (stmt, op)) |
| { |
| *val_p = build_int_cst (TREE_TYPE (op), 0); |
| *comp_code_p = NE_EXPR; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| void dump_asserts_for (FILE *, tree); |
| void debug_asserts_for (tree); |
| void dump_all_asserts (FILE *); |
| void debug_all_asserts (void); |
| |
| /* Dump all the registered assertions for NAME to FILE. */ |
| |
| void |
| dump_asserts_for (FILE *file, tree name) |
| { |
| assert_locus *loc; |
| |
| fprintf (file, "Assertions to be inserted for "); |
| print_generic_expr (file, name); |
| fprintf (file, "\n"); |
| |
| loc = asserts_for[SSA_NAME_VERSION (name)]; |
| while (loc) |
| { |
| fprintf (file, "\t"); |
| print_gimple_stmt (file, gsi_stmt (loc->si), 0); |
| fprintf (file, "\n\tBB #%d", loc->bb->index); |
| if (loc->e) |
| { |
| fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, |
| loc->e->dest->index); |
| dump_edge_info (file, loc->e, dump_flags, 0); |
| } |
| fprintf (file, "\n\tPREDICATE: "); |
| print_generic_expr (file, loc->expr); |
| fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); |
| print_generic_expr (file, loc->val); |
| fprintf (file, "\n\n"); |
| loc = loc->next; |
| } |
| |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump all the registered assertions for NAME to stderr. */ |
| |
| DEBUG_FUNCTION void |
| debug_asserts_for (tree name) |
| { |
| dump_asserts_for (stderr, name); |
| } |
| |
| |
| /* Dump all the registered assertions for all the names to FILE. */ |
| |
| void |
| dump_all_asserts (FILE *file) |
| { |
| unsigned i; |
| bitmap_iterator bi; |
| |
| fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); |
| EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
| dump_asserts_for (file, ssa_name (i)); |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump all the registered assertions for all the names to stderr. */ |
| |
| DEBUG_FUNCTION void |
| debug_all_asserts (void) |
| { |
| dump_all_asserts (stderr); |
| } |
| |
| /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */ |
| |
| static void |
| add_assert_info (vec<assert_info> &asserts, |
| tree name, tree expr, enum tree_code comp_code, tree val) |
| { |
| assert_info info; |
| info.comp_code = comp_code; |
| info.name = name; |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| info.val = val; |
| info.expr = expr; |
| asserts.safe_push (info); |
| if (dump_enabled_p ()) |
| dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS, |
| "Adding assert for %T from %T %s %T\n", |
| name, expr, op_symbol_code (comp_code), val); |
| } |
| |
| /* If NAME doesn't have an ASSERT_EXPR registered for asserting |
| 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
| E->DEST, then register this location as a possible insertion point |
| for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
| |
| BB, E and SI provide the exact insertion point for the new |
| ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted |
| on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on |
| BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E |
| must not be NULL. */ |
| |
| static void |
| register_new_assert_for (tree name, tree expr, |
| enum tree_code comp_code, |
| tree val, |
| basic_block bb, |
| edge e, |
| gimple_stmt_iterator si) |
| { |
| assert_locus *n, *loc, *last_loc; |
| basic_block dest_bb; |
| |
| gcc_checking_assert (bb == NULL || e == NULL); |
| |
| if (e == NULL) |
| gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
| && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); |
| |
| /* Never build an assert comparing against an integer constant with |
| TREE_OVERFLOW set. This confuses our undefined overflow warning |
| machinery. */ |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| |
| /* The new assertion A will be inserted at BB or E. We need to |
| determine if the new location is dominated by a previously |
| registered location for A. If we are doing an edge insertion, |
| assume that A will be inserted at E->DEST. Note that this is not |
| necessarily true. |
| |
| If E is a critical edge, it will be split. But even if E is |
| split, the new block will dominate the same set of blocks that |
| E->DEST dominates. |
| |
| The reverse, however, is not true, blocks dominated by E->DEST |
| will not be dominated by the new block created to split E. So, |
| if the insertion location is on a critical edge, we will not use |
| the new location to move another assertion previously registered |
| at a block dominated by E->DEST. */ |
| dest_bb = (bb) ? bb : e->dest; |
| |
| /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and |
| VAL at a block dominating DEST_BB, then we don't need to insert a new |
| one. Similarly, if the same assertion already exists at a block |
| dominated by DEST_BB and the new location is not on a critical |
| edge, then update the existing location for the assertion (i.e., |
| move the assertion up in the dominance tree). |
| |
| Note, this is implemented as a simple linked list because there |
| should not be more than a handful of assertions registered per |
| name. If this becomes a performance problem, a table hashed by |
| COMP_CODE and VAL could be implemented. */ |
| loc = asserts_for[SSA_NAME_VERSION (name)]; |
| last_loc = loc; |
| while (loc) |
| { |
| if (loc->comp_code == comp_code |
| && (loc->val == val |
| || operand_equal_p (loc->val, val, 0)) |
| && (loc->expr == expr |
| || operand_equal_p (loc->expr, expr, 0))) |
| { |
| /* If E is not a critical edge and DEST_BB |
| dominates the existing location for the assertion, move |
| the assertion up in the dominance tree by updating its |
| location information. */ |
| if ((e == NULL || !EDGE_CRITICAL_P (e)) |
| && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) |
| { |
| loc->bb = dest_bb; |
| loc->e = e; |
| loc->si = si; |
| return; |
| } |
| } |
| |
| /* Update the last node of the list and move to the next one. */ |
| last_loc = loc; |
| loc = loc->next; |
| } |
| |
| /* If we didn't find an assertion already registered for |
| NAME COMP_CODE VAL, add a new one at the end of the list of |
| assertions associated with NAME. */ |
| n = XNEW (struct assert_locus); |
| n->bb = dest_bb; |
| n->e = e; |
| n->si = si; |
| n->comp_code = comp_code; |
| n->val = val; |
| n->expr = expr; |
| n->next = NULL; |
| |
| if (last_loc) |
| last_loc->next = n; |
| else |
| asserts_for[SSA_NAME_VERSION (name)] = n; |
| |
| bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); |
| } |
| |
| /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
| Extract a suitable test code and value and store them into *CODE_P and |
| *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. |
| |
| If no extraction was possible, return FALSE, otherwise return TRUE. |
| |
| If INVERT is true, then we invert the result stored into *CODE_P. */ |
| |
| static bool |
| extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, |
| tree cond_op0, tree cond_op1, |
| bool invert, enum tree_code *code_p, |
| tree *val_p) |
| { |
| enum tree_code comp_code; |
| tree val; |
| |
| /* Otherwise, we have a comparison of the form NAME COMP VAL |
| or VAL COMP NAME. */ |
| if (name == cond_op1) |
| { |
| /* If the predicate is of the form VAL COMP NAME, flip |
| COMP around because we need to register NAME as the |
| first operand in the predicate. */ |
| comp_code = swap_tree_comparison (cond_code); |
| val = cond_op0; |
| } |
| else if (name == cond_op0) |
| { |
| /* The comparison is of the form NAME COMP VAL, so the |
| comparison code remains unchanged. */ |
| comp_code = cond_code; |
| val = cond_op1; |
| } |
| else |
| gcc_unreachable (); |
| |
| /* Invert the comparison code as necessary. */ |
| if (invert) |
| comp_code = invert_tree_comparison (comp_code, 0); |
| |
| /* VRP only handles integral and pointer types. */ |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (val)) |
| && ! POINTER_TYPE_P (TREE_TYPE (val))) |
| return false; |
| |
| /* Do not register always-false predicates. |
| FIXME: this works around a limitation in fold() when dealing with |
| enumerations. Given 'enum { N1, N2 } x;', fold will not |
| fold 'if (x > N2)' to 'if (0)'. */ |
| if ((comp_code == GT_EXPR || comp_code == LT_EXPR) |
| && INTEGRAL_TYPE_P (TREE_TYPE (val))) |
| { |
| tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); |
| tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); |
| |
| if (comp_code == GT_EXPR |
| && (!max |
| || compare_values (val, max) == 0)) |
| return false; |
| |
| if (comp_code == LT_EXPR |
| && (!min |
| || compare_values (val, min) == 0)) |
| return false; |
| } |
| *code_p = comp_code; |
| *val_p = val; |
| return true; |
| } |
| |
| /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |
| (otherwise return VAL). VAL and MASK must be zero-extended for |
| precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT |
| (to transform signed values into unsigned) and at the end xor |
| SGNBIT back. */ |
| |
| static wide_int |
| masked_increment (const wide_int &val_in, const wide_int &mask, |
| const wide_int &sgnbit, unsigned int prec) |
| { |
| wide_int bit = wi::one (prec), res; |
| unsigned int i; |
| |
| wide_int val = val_in ^ sgnbit; |
| for (i = 0; i < prec; i++, bit += bit) |
| { |
| res = mask; |
| if ((res & bit) == 0) |
| continue; |
| res = bit - 1; |
| res = wi::bit_and_not (val + bit, res); |
| res &= mask; |
| if (wi::gtu_p (res, val)) |
| return res ^ sgnbit; |
| } |
| return val ^ sgnbit; |
| } |
| |
| /* Helper for overflow_comparison_p |
| |
| OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
| OP1's defining statement to see if it ultimately has the form |
| OP0 CODE (OP0 PLUS INTEGER_CST) |
| |
| If so, return TRUE indicating this is an overflow test and store into |
| *NEW_CST an updated constant that can be used in a narrowed range test. |
| |
| REVERSED indicates if the comparison was originally: |
| |
| OP1 CODE' OP0. |
| |
| This affects how we build the updated constant. */ |
| |
| static bool |
| overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1, |
| bool follow_assert_exprs, bool reversed, tree *new_cst) |
| { |
| /* See if this is a relational operation between two SSA_NAMES with |
| unsigned, overflow wrapping values. If so, check it more deeply. */ |
| if ((code == LT_EXPR || code == LE_EXPR |
| || code == GE_EXPR || code == GT_EXPR) |
| && TREE_CODE (op0) == SSA_NAME |
| && TREE_CODE (op1) == SSA_NAME |
| && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
| && TYPE_UNSIGNED (TREE_TYPE (op0)) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))) |
| { |
| gimple *op1_def = SSA_NAME_DEF_STMT (op1); |
| |
| /* If requested, follow any ASSERT_EXPRs backwards for OP1. */ |
| if (follow_assert_exprs) |
| { |
| while (gimple_assign_single_p (op1_def) |
| && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR) |
| { |
| op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0); |
| if (TREE_CODE (op1) != SSA_NAME) |
| break; |
| op1_def = SSA_NAME_DEF_STMT (op1); |
| } |
| } |
| |
| /* Now look at the defining statement of OP1 to see if it adds |
| or subtracts a nonzero constant from another operand. */ |
| if (op1_def |
| && is_gimple_assign (op1_def) |
| && gimple_assign_rhs_code (op1_def) == PLUS_EXPR |
| && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST |
| && !integer_zerop (gimple_assign_rhs2 (op1_def))) |
| { |
| tree target = gimple_assign_rhs1 (op1_def); |
| |
| /* If requested, follow ASSERT_EXPRs backwards for op0 looking |
| for one where TARGET appears on the RHS. */ |
| if (follow_assert_exprs) |
| { |
| /* Now see if that "other operand" is op0, following the chain |
| of ASSERT_EXPRs if necessary. */ |
| gimple *op0_def = SSA_NAME_DEF_STMT (op0); |
| while (op0 != target |
| && gimple_assign_single_p (op0_def) |
| && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR) |
| { |
| op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0); |
| if (TREE_CODE (op0) != SSA_NAME) |
| break; |
| op0_def = SSA_NAME_DEF_STMT (op0); |
| } |
| } |
| |
| /* If we did not find our target SSA_NAME, then this is not |
| an overflow test. */ |
| if (op0 != target) |
| return false; |
| |
| tree type = TREE_TYPE (op0); |
| wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED); |
| tree inc = gimple_assign_rhs2 (op1_def); |
| if (reversed) |
| *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc)); |
| else |
| *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc)); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
| OP1's defining statement to see if it ultimately has the form |
| OP0 CODE (OP0 PLUS INTEGER_CST) |
| |
| If so, return TRUE indicating this is an overflow test and store into |
| *NEW_CST an updated constant that can be used in a narrowed range test. |
| |
| These statements are left as-is in the IL to facilitate discovery of |
| {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But |
| the alternate range representation is often useful within VRP. */ |
| |
| bool |
| overflow_comparison_p (tree_code code, tree name, tree val, |
| bool use_equiv_p, tree *new_cst) |
| { |
| if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst)) |
| return true; |
| return overflow_comparison_p_1 (swap_tree_comparison (code), val, name, |
| use_equiv_p, true, new_cst); |
| } |
| |
| |
| /* Try to register an edge assertion for SSA name NAME on edge E for |
| the condition COND contributing to the conditional jump pointed to by BSI. |
| Invert the condition COND if INVERT is true. */ |
| |
| static void |
| register_edge_assert_for_2 (tree name, edge e, |
| enum tree_code cond_code, |
| tree cond_op0, tree cond_op1, bool invert, |
| vec<assert_info> &asserts) |
| { |
| tree val; |
| enum tree_code comp_code; |
| |
| if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
| cond_op0, |
| cond_op1, |
| invert, &comp_code, &val)) |
| return; |
| |
| /* Queue the assert. */ |
| tree x; |
| if (overflow_comparison_p (comp_code, name, val, false, &x)) |
| { |
| enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR) |
| ? GT_EXPR : LE_EXPR); |
| add_assert_info (asserts, name, name, new_code, x); |
| } |
| add_assert_info (asserts, name, name, comp_code, val); |
| |
| /* In the case of NAME <= CST and NAME being defined as |
| NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 |
| and NAME2 <= CST - CST2. We can do the same for NAME > CST. |
| This catches range and anti-range tests. */ |
| if ((comp_code == LE_EXPR |
| || comp_code == GT_EXPR) |
| && TREE_CODE (val) == INTEGER_CST |
| && TYPE_UNSIGNED (TREE_TYPE (val))) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
| tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
| |
| /* Extract CST2 from the (optional) addition. */ |
| if (is_gimple_assign (def_stmt) |
| && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) |
| { |
| name2 = gimple_assign_rhs1 (def_stmt); |
| cst2 = gimple_assign_rhs2 (def_stmt); |
| if (TREE_CODE (name2) == SSA_NAME |
| && TREE_CODE (cst2) == INTEGER_CST) |
| def_stmt = SSA_NAME_DEF_STMT (name2); |
| } |
| |
| /* Extract NAME2 from the (optional) sign-changing cast. */ |
| if (gimple_assign_cast_p (def_stmt)) |
| { |
| if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |
| && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
| && (TYPE_PRECISION (gimple_expr_type (def_stmt)) |
| == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) |
| name3 = gimple_assign_rhs1 (def_stmt); |
| } |
| |
| /* If name3 is used later, create an ASSERT_EXPR for it. */ |
| if (name3 != NULL_TREE |
| && TREE_CODE (name3) == SSA_NAME |
| && (cst2 == NULL_TREE |
| || TREE_CODE (cst2) == INTEGER_CST) |
| && INTEGRAL_TYPE_P (TREE_TYPE (name3))) |
| { |
| tree tmp; |
| |
| /* Build an expression for the range test. */ |
| tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); |
| if (cst2 != NULL_TREE) |
| tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |
| add_assert_info (asserts, name3, tmp, comp_code, val); |
| } |
| |
| /* If name2 is used later, create an ASSERT_EXPR for it. */ |
| if (name2 != NULL_TREE |
| && TREE_CODE (name2) == SSA_NAME |
| && TREE_CODE (cst2) == INTEGER_CST |
| && INTEGRAL_TYPE_P (TREE_TYPE (name2))) |
| { |
| tree tmp; |
| |
| /* Build an expression for the range test. */ |
| tmp = name2; |
| if (TREE_TYPE (name) != TREE_TYPE (name2)) |
| tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); |
| if (cst2 != NULL_TREE) |
| tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |
| add_assert_info (asserts, name2, tmp, comp_code, val); |
| } |
| } |
| |
| /* In the case of post-in/decrement tests like if (i++) ... and uses |
| of the in/decremented value on the edge the extra name we want to |
| assert for is not on the def chain of the name compared. Instead |
| it is in the set of use stmts. |
| Similar cases happen for conversions that were simplified through |
| fold_{sign_changed,widened}_comparison. */ |
| if ((comp_code == NE_EXPR |
| || comp_code == EQ_EXPR) |
| && TREE_CODE (val) == INTEGER_CST) |
| { |
| imm_use_iterator ui; |
| gimple *use_stmt; |
| FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) |
| { |
| if (!is_gimple_assign (use_stmt)) |
| continue; |
| |
| /* Cut off to use-stmts that are dominating the predecessor. */ |
| if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt))) |
| continue; |
| |
| tree name2 = gimple_assign_lhs (use_stmt); |
| if (TREE_CODE (name2) != SSA_NAME) |
| continue; |
| |
| enum tree_code code = gimple_assign_rhs_code (use_stmt); |
| tree cst; |
| if (code == PLUS_EXPR |
| || code == MINUS_EXPR) |
| { |
| cst = gimple_assign_rhs2 (use_stmt); |
| if (TREE_CODE (cst) != INTEGER_CST) |
| continue; |
| cst = int_const_binop (code, val, cst); |
| } |
| else if (CONVERT_EXPR_CODE_P (code)) |
| { |
| /* For truncating conversions we cannot record |
| an inequality. */ |
| if (comp_code == NE_EXPR |
| && (TYPE_PRECISION (TREE_TYPE (name2)) |
| < TYPE_PRECISION (TREE_TYPE (name)))) |
| continue; |
| cst = fold_convert (TREE_TYPE (name2), val); |
| } |
| else |
| continue; |
| |
| if (TREE_OVERFLOW_P (cst)) |
| cst = drop_tree_overflow (cst); |
| add_assert_info (asserts, name2, name2, comp_code, cst); |
| } |
| } |
| |
| if (TREE_CODE_CLASS (comp_code) == tcc_comparison |
| && TREE_CODE (val) == INTEGER_CST) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
| tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; |
| tree val2 = NULL_TREE; |
| unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); |
| wide_int mask = wi::zero (prec); |
| unsigned int nprec = prec; |
| enum tree_code rhs_code = ERROR_MARK; |
| |
| if (is_gimple_assign (def_stmt)) |
| rhs_code = gimple_assign_rhs_code (def_stmt); |
| |
| /* In the case of NAME != CST1 where NAME = A +- CST2 we can |
| assert that A != CST1 -+ CST2. */ |
| if ((comp_code == EQ_EXPR || comp_code == NE_EXPR) |
| && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR)) |
| { |
| tree op0 = gimple_assign_rhs1 (def_stmt); |
| tree op1 = gimple_assign_rhs2 (def_stmt); |
| if (TREE_CODE (op0) == SSA_NAME |
| && TREE_CODE (op1) == INTEGER_CST) |
| { |
| enum tree_code reverse_op = (rhs_code == PLUS_EXPR |
| ? MINUS_EXPR : PLUS_EXPR); |
| op1 = int_const_binop (reverse_op, val, op1); |
| if (TREE_OVERFLOW (op1)) |
| op1 = drop_tree_overflow (op1); |
| add_assert_info (asserts, op0, op0, comp_code, op1); |
| } |
| } |
| |
| /* Add asserts for NAME cmp CST and NAME being defined |
| as NAME = (int) NAME2. */ |
| if (!TYPE_UNSIGNED (TREE_TYPE (val)) |
| && (comp_code == LE_EXPR || comp_code == LT_EXPR |
| || comp_code == GT_EXPR || comp_code == GE_EXPR) |
| && gimple_assign_cast_p (def_stmt)) |
| { |
| name2 = gimple_assign_rhs1 (def_stmt); |
| if (CONVERT_EXPR_CODE_P (rhs_code) |
| && TREE_CODE (name2) == SSA_NAME |
| && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
| && TYPE_UNSIGNED (TREE_TYPE (name2)) |
| && prec == TYPE_PRECISION (TREE_TYPE (name2)) |
| && (comp_code == LE_EXPR || comp_code == GT_EXPR |
| || !tree_int_cst_equal (val, |
| TYPE_MIN_VALUE (TREE_TYPE (val))))) |
| { |
| tree tmp, cst; |
| enum tree_code new_comp_code = comp_code; |
| |
| cst = fold_convert (TREE_TYPE (name2), |
| TYPE_MIN_VALUE (TREE_TYPE (val))); |
| /* Build an expression for the range test. */ |
| tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); |
| cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, |
| fold_convert (TREE_TYPE (name2), val)); |
| if (comp_code == LT_EXPR || comp_code == GE_EXPR) |
| { |
| new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; |
| cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, |
| build_int_cst (TREE_TYPE (name2), 1)); |
| } |
| add_assert_info (asserts, name2, tmp, new_comp_code, cst); |
| } |
| } |
| |
| /* Add asserts for NAME cmp CST and NAME being defined as |
| NAME = NAME2 >> CST2. |
| |
| Extract CST2 from the right shift. */ |
| if (rhs_code == RSHIFT_EXPR) |
| { |
| name2 = gimple_assign_rhs1 (def_stmt); |
| cst2 = gimple_assign_rhs2 (def_stmt); |
| if (TREE_CODE (name2) == SSA_NAME |
| && tree_fits_uhwi_p (cst2) |
| && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
| && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) |
| && type_has_mode_precision_p (TREE_TYPE (val))) |
| { |
| mask = wi::mask (tree_to_uhwi (cst2), false, prec); |
| val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); |
| } |
| } |
| if (val2 != NULL_TREE |
| && TREE_CODE (val2) == INTEGER_CST |
| && simple_cst_equal (fold_build2 (RSHIFT_EXPR, |
| TREE_TYPE (val), |
| val2, cst2), val)) |
| { |
| enum tree_code new_comp_code = comp_code; |
| tree tmp, new_val; |
| |
| tmp = name2; |
| if (comp_code == EQ_EXPR || comp_code == NE_EXPR) |
| { |
| if (!TYPE_UNSIGNED (TREE_TYPE (val))) |
| { |
| tree type = build_nonstandard_integer_type (prec, 1); |
| tmp = build1 (NOP_EXPR, type, name2); |
| val2 = fold_convert (type, val2); |
| } |
| tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); |
| new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); |
| new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; |
| } |
| else if (comp_code == LT_EXPR || comp_code == GE_EXPR) |
| { |
| wide_int minval |
| = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); |
| new_val = val2; |
| if (minval == wi::to_wide (new_val)) |
| new_val = NULL_TREE; |
| } |
| else |
| { |
| wide_int maxval |
| = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); |
| mask |= wi::to_wide (val2); |
| if (wi::eq_p (mask, maxval)) |
| new_val = NULL_TREE; |
| else |
| new_val = wide_int_to_tree (TREE_TYPE (val2), mask); |
| } |
| |
| if (new_val) |
| add_assert_info (asserts, name2, tmp, new_comp_code, new_val); |
| } |
| |
| /* If we have a conversion that doesn't change the value of the source |
| simply register the same assert for it. */ |
| if (CONVERT_EXPR_CODE_P (rhs_code)) |
| { |
| wide_int rmin, rmax; |
| tree rhs1 = gimple_assign_rhs1 (def_stmt); |
| if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) |
| && TREE_CODE (rhs1) == SSA_NAME |
| /* Make sure the relation preserves the upper/lower boundary of |
| the range conservatively. */ |
| && (comp_code == NE_EXPR |
| || comp_code == EQ_EXPR |
| || (TYPE_SIGN (TREE_TYPE (name)) |
| == TYPE_SIGN (TREE_TYPE (rhs1))) |
| || ((comp_code == LE_EXPR |
| || comp_code == LT_EXPR) |
| && !TYPE_UNSIGNED (TREE_TYPE (rhs1))) |
| || ((comp_code == GE_EXPR |
| || comp_code == GT_EXPR) |
| && TYPE_UNSIGNED (TREE_TYPE (rhs1)))) |
| /* And the conversion does not alter the value we compare |
| against and all values in rhs1 can be represented in |
| the converted to type. */ |
| && int_fits_type_p (val, TREE_TYPE (rhs1)) |
| && ((TYPE_PRECISION (TREE_TYPE (name)) |
| > TYPE_PRECISION (TREE_TYPE (rhs1))) |
| || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE |
| && wi::fits_to_tree_p |
| (widest_int::from (rmin, |
| TYPE_SIGN (TREE_TYPE (rhs1))), |
| TREE_TYPE (name)) |
| && wi::fits_to_tree_p |
| (widest_int::from (rmax, |
| TYPE_SIGN (TREE_TYPE (rhs1))), |
| TREE_TYPE (name))))) |
| add_assert_info (asserts, rhs1, rhs1, |
| comp_code, fold_convert (TREE_TYPE (rhs1), val)); |
| } |
| |
| /* Add asserts for NAME cmp CST and NAME being defined as |
| NAME = NAME2 & CST2. |
| |
| Extract CST2 from the and. |
| |
| Also handle |
| NAME = (unsigned) NAME2; |
| casts where NAME's type is unsigned and has smaller precision |
| than NAME2's type as if it was NAME = NAME2 & MASK. */ |
| names[0] = NULL_TREE; |
| names[1] = NULL_TREE; |
| cst2 = NULL_TREE; |
| if (rhs_code == BIT_AND_EXPR |
| || (CONVERT_EXPR_CODE_P (rhs_code) |
| && INTEGRAL_TYPE_P (TREE_TYPE (val)) |
| && TYPE_UNSIGNED (TREE_TYPE (val)) |
| && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
| > prec)) |
| { |
| name2 = gimple_assign_rhs1 (def_stmt); |
| if (rhs_code == BIT_AND_EXPR) |
| cst2 = gimple_assign_rhs2 (def_stmt); |
| else |
| { |
| cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); |
| nprec = TYPE_PRECISION (TREE_TYPE (name2)); |
| } |
| if (TREE_CODE (name2) == SSA_NAME |
| && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
| && TREE_CODE (cst2) == INTEGER_CST |
| && !integer_zerop (cst2) |
| && (nprec > 1 |
| || TYPE_UNSIGNED (TREE_TYPE (val)))) |
| { |
| gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2); |
| if (gimple_assign_cast_p (def_stmt2)) |
| { |
| names[1] = gimple_assign_rhs1 (def_stmt2); |
| if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) |
| || TREE_CODE (names[1]) != SSA_NAME |
| || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) |
| || (TYPE_PRECISION (TREE_TYPE (name2)) |
| != TYPE_PRECISION (TREE_TYPE (names[1])))) |
| names[1] = NULL_TREE; |
| } |
| names[0] = name2; |
| } |
| } |
| if (names[0] || names[1]) |
| { |
| wide_int minv, maxv, valv, cst2v; |
| wide_int tem, sgnbit; |
| bool valid_p = false, valn, cst2n; |
| enum tree_code ccode = comp_code; |
| |
| valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED); |
| cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED); |
| valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); |
| cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); |
| /* If CST2 doesn't have most significant bit set, |
| but VAL is negative, we have comparison like |
| if ((x & 0x123) > -4) (always true). Just give up. */ |
| if (!cst2n && valn) |
| ccode = ERROR_MARK; |
| if (cst2n) |
| sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |
| else |
| sgnbit = wi::zero (nprec); |
| minv = valv & cst2v; |
| switch (ccode) |
| { |
| case EQ_EXPR: |
| /* Minimum unsigned value for equality is VAL & CST2 |
| (should be equal to VAL, otherwise we probably should |
| have folded the comparison into false) and |
| maximum unsigned value is VAL | ~CST2. */ |
| maxv = valv | ~cst2v; |
| valid_p = true; |
| break; |
| |
| case NE_EXPR: |
| tem = valv | ~cst2v; |
| /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ |
| if (valv == 0) |
| { |
| cst2n = false; |
| sgnbit = wi::zero (nprec); |
| goto gt_expr; |
| } |
| /* If (VAL | ~CST2) is all ones, handle it as |
| (X & CST2) < VAL. */ |
| if (tem == -1) |
| { |
| cst2n = false; |
| valn = false; |
| sgnbit = wi::zero (nprec); |
| goto lt_expr; |
| } |
| if (!cst2n && wi::neg_p (cst2v)) |
| sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |
| if (sgnbit != 0) |
| { |
| if (valv == sgnbit) |
| { |
| cst2n = true; |
| valn = true; |
| goto gt_expr; |
| } |
| if (tem == wi::mask (nprec - 1, false, nprec)) |
| { |
| cst2n = true; |
| goto lt_expr; |
| } |
| if (!cst2n) |
| sgnbit = wi::zero (nprec); |
| } |
| break; |
| |
| case GE_EXPR: |
| /* Minimum unsigned value for >= if (VAL & CST2) == VAL |
| is VAL and maximum unsigned value is ~0. For signed |
| comparison, if CST2 doesn't have most significant bit |
| set, handle it similarly. If CST2 has MSB set, |
| the minimum is the same, and maximum is ~0U/2. */ |
| if (minv != valv) |
| { |
| /* If (VAL & CST2) != VAL, X & CST2 can't be equal to |
| VAL. */ |
| minv = masked_increment (valv, cst2v, sgnbit, nprec); |
| if (minv == valv) |
| break; |
| } |
| maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
| valid_p = true; |
| break; |
| |
| case GT_EXPR: |
| gt_expr: |
| /* Find out smallest MINV where MINV > VAL |
| && (MINV & CST2) == MINV, if any. If VAL is signed and |
| CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ |
| minv = masked_increment (valv, cst2v, sgnbit, nprec); |
| if (minv == valv) |
| break; |
| maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
| valid_p = true; |
| break; |
| |
| case LE_EXPR: |
| /* Minimum unsigned value for <= is 0 and maximum |
| unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. |
| Otherwise, find smallest VAL2 where VAL2 > VAL |
| && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 |
| as maximum. |
| |