| /* Support routines for Value Range Propagation (VRP). |
| Copyright (C) 2005-2017 Free Software Foundation, Inc. |
| Contributed by Diego Novillo <dnovillo@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "insn-codes.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "gimple-pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "flags.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "calls.h" |
| #include "cfganal.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-ssa-loop.h" |
| #include "tree-into-ssa.h" |
| #include "tree-ssa.h" |
| #include "intl.h" |
| #include "cfgloop.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-ssa-propagate.h" |
| #include "tree-chrec.h" |
| #include "tree-ssa-threadupdate.h" |
| #include "tree-ssa-scopedtables.h" |
| #include "tree-ssa-threadedge.h" |
| #include "omp-general.h" |
| #include "target.h" |
| #include "case-cfn-macros.h" |
| #include "params.h" |
| #include "alloc-pool.h" |
| #include "domwalk.h" |
| #include "tree-cfgcleanup.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "vr-values.h" |
| #include "builtins.h" |
| |
| /* Set of SSA names found live during the RPO traversal of the function |
| for still active basic-blocks. */ |
| static sbitmap *live; |
| |
| /* Return true if the SSA name NAME is live on the edge E. */ |
| |
| static bool |
| live_on_edge (edge e, tree name) |
| { |
| return (live[e->dest->index] |
| && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |
| } |
| |
| /* Location information for ASSERT_EXPRs. Each instance of this |
| structure describes an ASSERT_EXPR for an SSA name. Since a single |
| SSA name may have more than one assertion associated with it, these |
| locations are kept in a linked list attached to the corresponding |
| SSA name. */ |
| struct assert_locus |
| { |
| /* Basic block where the assertion would be inserted. */ |
| basic_block bb; |
| |
| /* Some assertions need to be inserted on an edge (e.g., assertions |
| generated by COND_EXPRs). In those cases, BB will be NULL. */ |
| edge e; |
| |
| /* Pointer to the statement that generated this assertion. */ |
| gimple_stmt_iterator si; |
| |
| /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ |
| enum tree_code comp_code; |
| |
| /* Value being compared against. */ |
| tree val; |
| |
| /* Expression to compare. */ |
| tree expr; |
| |
| /* Next node in the linked list. */ |
| assert_locus *next; |
| }; |
| |
| /* If bit I is present, it means that SSA name N_i has a list of |
| assertions that should be inserted in the IL. */ |
| static bitmap need_assert_for; |
| |
| /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] |
| holds a list of ASSERT_LOCUS_T nodes that describe where |
| ASSERT_EXPRs for SSA name N_I should be inserted. */ |
| static assert_locus **asserts_for; |
| |
| vec<edge> to_remove_edges; |
| vec<switch_update> to_update_switch_stmts; |
| |
| |
| /* Return the maximum value for TYPE. */ |
| |
| tree |
| vrp_val_max (const_tree type) |
| { |
| if (!INTEGRAL_TYPE_P (type)) |
| return NULL_TREE; |
| |
| return TYPE_MAX_VALUE (type); |
| } |
| |
| /* Return the minimum value for TYPE. */ |
| |
| tree |
| vrp_val_min (const_tree type) |
| { |
| if (!INTEGRAL_TYPE_P (type)) |
| return NULL_TREE; |
| |
| return TYPE_MIN_VALUE (type); |
| } |
| |
| /* Return whether VAL is equal to the maximum value of its type. |
| We can't do a simple equality comparison with TYPE_MAX_VALUE because |
| C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE |
| is not == to the integer constant with the same value in the type. */ |
| |
| bool |
| vrp_val_is_max (const_tree val) |
| { |
| tree type_max = vrp_val_max (TREE_TYPE (val)); |
| return (val == type_max |
| || (type_max != NULL_TREE |
| && operand_equal_p (val, type_max, 0))); |
| } |
| |
| /* Return whether VAL is equal to the minimum value of its type. */ |
| |
| bool |
| vrp_val_is_min (const_tree val) |
| { |
| tree type_min = vrp_val_min (TREE_TYPE (val)); |
| return (val == type_min |
| || (type_min != NULL_TREE |
| && operand_equal_p (val, type_min, 0))); |
| } |
| |
| |
| /* Set value range VR to VR_UNDEFINED. */ |
| |
| static inline void |
| set_value_range_to_undefined (value_range *vr) |
| { |
| vr->type = VR_UNDEFINED; |
| vr->min = vr->max = NULL_TREE; |
| if (vr->equiv) |
| bitmap_clear (vr->equiv); |
| } |
| |
| /* Set value range VR to VR_VARYING. */ |
| |
| void |
| set_value_range_to_varying (value_range *vr) |
| { |
| vr->type = VR_VARYING; |
| vr->min = vr->max = NULL_TREE; |
| if (vr->equiv) |
| bitmap_clear (vr->equiv); |
| } |
| |
| /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
| |
| void |
| set_value_range (value_range *vr, enum value_range_type t, tree min, |
| tree max, bitmap equiv) |
| { |
| /* Check the validity of the range. */ |
| if (flag_checking |
| && (t == VR_RANGE || t == VR_ANTI_RANGE)) |
| { |
| int cmp; |
| |
| gcc_assert (min && max); |
| |
| gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max)); |
| |
| if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) |
| gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |
| |
| cmp = compare_values (min, max); |
| gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); |
| } |
| |
| if (flag_checking |
| && (t == VR_UNDEFINED || t == VR_VARYING)) |
| { |
| gcc_assert (min == NULL_TREE && max == NULL_TREE); |
| gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); |
| } |
| |
| vr->type = t; |
| vr->min = min; |
| vr->max = max; |
| |
| /* Since updating the equivalence set involves deep copying the |
| bitmaps, only do it if absolutely necessary. |
| |
| All equivalence bitmaps are allocated from the same obstack. So |
| we can use the obstack associated with EQUIV to allocate vr->equiv. */ |
| if (vr->equiv == NULL |
| && equiv != NULL) |
| vr->equiv = BITMAP_ALLOC (equiv->obstack); |
| |
| if (equiv != vr->equiv) |
| { |
| if (equiv && !bitmap_empty_p (equiv)) |
| bitmap_copy (vr->equiv, equiv); |
| else |
| bitmap_clear (vr->equiv); |
| } |
| } |
| |
| |
| /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |
| This means adjusting T, MIN and MAX representing the case of a |
| wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] |
| as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. |
| In corner cases where MAX+1 or MIN-1 wraps this will fall back |
| to varying. |
| This routine exists to ease canonicalization in the case where we |
| extract ranges from var + CST op limit. */ |
| |
| void |
| set_and_canonicalize_value_range (value_range *vr, enum value_range_type t, |
| tree min, tree max, bitmap equiv) |
| { |
| /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
| if (t == VR_UNDEFINED) |
| { |
| set_value_range_to_undefined (vr); |
| return; |
| } |
| else if (t == VR_VARYING) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* Nothing to canonicalize for symbolic ranges. */ |
| if (TREE_CODE (min) != INTEGER_CST |
| || TREE_CODE (max) != INTEGER_CST) |
| { |
| set_value_range (vr, t, min, max, equiv); |
| return; |
| } |
| |
| /* Wrong order for min and max, to swap them and the VR type we need |
| to adjust them. */ |
| if (tree_int_cst_lt (max, min)) |
| { |
| tree one, tmp; |
| |
| /* For one bit precision if max < min, then the swapped |
| range covers all values, so for VR_RANGE it is varying and |
| for VR_ANTI_RANGE empty range, so drop to varying as well. */ |
| if (TYPE_PRECISION (TREE_TYPE (min)) == 1) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| one = build_int_cst (TREE_TYPE (min), 1); |
| tmp = int_const_binop (PLUS_EXPR, max, one); |
| max = int_const_binop (MINUS_EXPR, min, one); |
| min = tmp; |
| |
| /* There's one corner case, if we had [C+1, C] before we now have |
| that again. But this represents an empty value range, so drop |
| to varying in this case. */ |
| if (tree_int_cst_lt (max, min)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; |
| } |
| |
| /* Anti-ranges that can be represented as ranges should be so. */ |
| if (t == VR_ANTI_RANGE) |
| { |
| bool is_min = vrp_val_is_min (min); |
| bool is_max = vrp_val_is_max (max); |
| |
| if (is_min && is_max) |
| { |
| /* We cannot deal with empty ranges, drop to varying. |
| ??? This could be VR_UNDEFINED instead. */ |
| set_value_range_to_varying (vr); |
| return; |
| } |
| else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
| && (is_min || is_max)) |
| { |
| /* Non-empty boolean ranges can always be represented |
| as a singleton range. */ |
| if (is_min) |
| min = max = vrp_val_max (TREE_TYPE (min)); |
| else |
| min = max = vrp_val_min (TREE_TYPE (min)); |
| t = VR_RANGE; |
| } |
| else if (is_min |
| /* As a special exception preserve non-null ranges. */ |
| && !(TYPE_UNSIGNED (TREE_TYPE (min)) |
| && integer_zerop (max))) |
| { |
| tree one = build_int_cst (TREE_TYPE (max), 1); |
| min = int_const_binop (PLUS_EXPR, max, one); |
| max = vrp_val_max (TREE_TYPE (max)); |
| t = VR_RANGE; |
| } |
| else if (is_max) |
| { |
| tree one = build_int_cst (TREE_TYPE (min), 1); |
| max = int_const_binop (MINUS_EXPR, min, one); |
| min = vrp_val_min (TREE_TYPE (min)); |
| t = VR_RANGE; |
| } |
| } |
| |
| /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky |
| to make sure VRP iteration terminates, otherwise we can get into |
| oscillations. */ |
| |
| set_value_range (vr, t, min, max, equiv); |
| } |
| |
| /* Copy value range FROM into value range TO. */ |
| |
| void |
| copy_value_range (value_range *to, value_range *from) |
| { |
| set_value_range (to, from->type, from->min, from->max, from->equiv); |
| } |
| |
| /* Set value range VR to a single value. This function is only called |
| with values we get from statements, and exists to clear the |
| TREE_OVERFLOW flag. */ |
| |
| void |
| set_value_range_to_value (value_range *vr, tree val, bitmap equiv) |
| { |
| gcc_assert (is_gimple_min_invariant (val)); |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| set_value_range (vr, VR_RANGE, val, val, equiv); |
| } |
| |
| /* Set value range VR to a non-NULL range of type TYPE. */ |
| |
| void |
| set_value_range_to_nonnull (value_range *vr, tree type) |
| { |
| tree zero = build_int_cst (type, 0); |
| set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); |
| } |
| |
| |
| /* Set value range VR to a NULL range of type TYPE. */ |
| |
| void |
| set_value_range_to_null (value_range *vr, tree type) |
| { |
| set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |
| } |
| |
| |
| /* If abs (min) < abs (max), set VR to [-max, max], if |
| abs (min) >= abs (max), set VR to [-min, min]. */ |
| |
| static void |
| abs_extent_range (value_range *vr, tree min, tree max) |
| { |
| int cmp; |
| |
| gcc_assert (TREE_CODE (min) == INTEGER_CST); |
| gcc_assert (TREE_CODE (max) == INTEGER_CST); |
| gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); |
| gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); |
| min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); |
| max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); |
| if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| cmp = compare_values (min, max); |
| if (cmp == -1) |
| min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); |
| else if (cmp == 0 || cmp == 1) |
| { |
| max = min; |
| min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); |
| } |
| else |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |
| } |
| |
| /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
| |
| bool |
| vrp_operand_equal_p (const_tree val1, const_tree val2) |
| { |
| if (val1 == val2) |
| return true; |
| if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) |
| return false; |
| return true; |
| } |
| |
| /* Return true, if the bitmaps B1 and B2 are equal. */ |
| |
| bool |
| vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
| { |
| return (b1 == b2 |
| || ((!b1 || bitmap_empty_p (b1)) |
| && (!b2 || bitmap_empty_p (b2))) |
| || (b1 && b2 |
| && bitmap_equal_p (b1, b2))); |
| } |
| |
| /* Return true if VR is ~[0, 0]. */ |
| |
| bool |
| range_is_nonnull (value_range *vr) |
| { |
| return vr->type == VR_ANTI_RANGE |
| && integer_zerop (vr->min) |
| && integer_zerop (vr->max); |
| } |
| |
| |
| /* Return true if VR is [0, 0]. */ |
| |
| static inline bool |
| range_is_null (value_range *vr) |
| { |
| return vr->type == VR_RANGE |
| && integer_zerop (vr->min) |
| && integer_zerop (vr->max); |
| } |
| |
| /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
| a singleton. */ |
| |
| bool |
| range_int_cst_p (value_range *vr) |
| { |
| return (vr->type == VR_RANGE |
| && TREE_CODE (vr->max) == INTEGER_CST |
| && TREE_CODE (vr->min) == INTEGER_CST); |
| } |
| |
| /* Return true if VR is a INTEGER_CST singleton. */ |
| |
| bool |
| range_int_cst_singleton_p (value_range *vr) |
| { |
| return (range_int_cst_p (vr) |
| && tree_int_cst_equal (vr->min, vr->max)); |
| } |
| |
| /* Return true if value range VR involves at least one symbol. */ |
| |
| bool |
| symbolic_range_p (value_range *vr) |
| { |
| return (!is_gimple_min_invariant (vr->min) |
| || !is_gimple_min_invariant (vr->max)); |
| } |
| |
| /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
| otherwise. We only handle additive operations and set NEG to true if the |
| symbol is negated and INV to the invariant part, if any. */ |
| |
| tree |
| get_single_symbol (tree t, bool *neg, tree *inv) |
| { |
| bool neg_; |
| tree inv_; |
| |
| *inv = NULL_TREE; |
| *neg = false; |
| |
| if (TREE_CODE (t) == PLUS_EXPR |
| || TREE_CODE (t) == POINTER_PLUS_EXPR |
| || TREE_CODE (t) == MINUS_EXPR) |
| { |
| if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) |
| { |
| neg_ = (TREE_CODE (t) == MINUS_EXPR); |
| inv_ = TREE_OPERAND (t, 0); |
| t = TREE_OPERAND (t, 1); |
| } |
| else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) |
| { |
| neg_ = false; |
| inv_ = TREE_OPERAND (t, 1); |
| t = TREE_OPERAND (t, 0); |
| } |
| else |
| return NULL_TREE; |
| } |
| else |
| { |
| neg_ = false; |
| inv_ = NULL_TREE; |
| } |
| |
| if (TREE_CODE (t) == NEGATE_EXPR) |
| { |
| t = TREE_OPERAND (t, 0); |
| neg_ = !neg_; |
| } |
| |
| if (TREE_CODE (t) != SSA_NAME) |
| return NULL_TREE; |
| |
| if (inv_ && TREE_OVERFLOW_P (inv_)) |
| inv_ = drop_tree_overflow (inv_); |
| |
| *neg = neg_; |
| *inv = inv_; |
| return t; |
| } |
| |
| /* The reverse operation: build a symbolic expression with TYPE |
| from symbol SYM, negated according to NEG, and invariant INV. */ |
| |
| static tree |
| build_symbolic_expr (tree type, tree sym, bool neg, tree inv) |
| { |
| const bool pointer_p = POINTER_TYPE_P (type); |
| tree t = sym; |
| |
| if (neg) |
| t = build1 (NEGATE_EXPR, type, t); |
| |
| if (integer_zerop (inv)) |
| return t; |
| |
| return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); |
| } |
| |
| /* Return |
| 1 if VAL < VAL2 |
| 0 if !(VAL < VAL2) |
| -2 if those are incomparable. */ |
| int |
| operand_less_p (tree val, tree val2) |
| { |
| /* LT is folded faster than GE and others. Inline the common case. */ |
| if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) |
| return tree_int_cst_lt (val, val2); |
| else |
| { |
| tree tcmp; |
| |
| fold_defer_overflow_warnings (); |
| |
| tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
| |
| fold_undefer_and_ignore_overflow_warnings (); |
| |
| if (!tcmp |
| || TREE_CODE (tcmp) != INTEGER_CST) |
| return -2; |
| |
| if (!integer_zerop (tcmp)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Compare two values VAL1 and VAL2. Return |
| |
| -2 if VAL1 and VAL2 cannot be compared at compile-time, |
| -1 if VAL1 < VAL2, |
| 0 if VAL1 == VAL2, |
| +1 if VAL1 > VAL2, and |
| +2 if VAL1 != VAL2 |
| |
| This is similar to tree_int_cst_compare but supports pointer values |
| and values that cannot be compared at compile time. |
| |
| If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to |
| true if the return value is only valid if we assume that signed |
| overflow is undefined. */ |
| |
| int |
| compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
| { |
| if (val1 == val2) |
| return 0; |
| |
| /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
| both integers. */ |
| gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) |
| == POINTER_TYPE_P (TREE_TYPE (val2))); |
| |
| /* Convert the two values into the same type. This is needed because |
| sizetype causes sign extension even for unsigned types. */ |
| val2 = fold_convert (TREE_TYPE (val1), val2); |
| STRIP_USELESS_TYPE_CONVERSION (val2); |
| |
| const bool overflow_undefined |
| = INTEGRAL_TYPE_P (TREE_TYPE (val1)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)); |
| tree inv1, inv2; |
| bool neg1, neg2; |
| tree sym1 = get_single_symbol (val1, &neg1, &inv1); |
| tree sym2 = get_single_symbol (val2, &neg2, &inv2); |
| |
| /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1 |
| accordingly. If VAL1 and VAL2 don't use the same name, return -2. */ |
| if (sym1 && sym2) |
| { |
| /* Both values must use the same name with the same sign. */ |
| if (sym1 != sym2 || neg1 != neg2) |
| return -2; |
| |
| /* [-]NAME + CST == [-]NAME + CST. */ |
| if (inv1 == inv2) |
| return 0; |
| |
| /* If overflow is defined we cannot simplify more. */ |
| if (!overflow_undefined) |
| return -2; |
| |
| if (strict_overflow_p != NULL |
| /* Symbolic range building sets TREE_NO_WARNING to declare |
| that overflow doesn't happen. */ |
| && (!inv1 || !TREE_NO_WARNING (val1)) |
| && (!inv2 || !TREE_NO_WARNING (val2))) |
| *strict_overflow_p = true; |
| |
| if (!inv1) |
| inv1 = build_int_cst (TREE_TYPE (val1), 0); |
| if (!inv2) |
| inv2 = build_int_cst (TREE_TYPE (val2), 0); |
| |
| return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2), |
| TYPE_SIGN (TREE_TYPE (val1))); |
| } |
| |
| const bool cst1 = is_gimple_min_invariant (val1); |
| const bool cst2 = is_gimple_min_invariant (val2); |
| |
| /* If one is of the form '[-]NAME + CST' and the other is constant, then |
| it might be possible to say something depending on the constants. */ |
| if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1)) |
| { |
| if (!overflow_undefined) |
| return -2; |
| |
| if (strict_overflow_p != NULL |
| /* Symbolic range building sets TREE_NO_WARNING to declare |
| that overflow doesn't happen. */ |
| && (!sym1 || !TREE_NO_WARNING (val1)) |
| && (!sym2 || !TREE_NO_WARNING (val2))) |
| *strict_overflow_p = true; |
| |
| const signop sgn = TYPE_SIGN (TREE_TYPE (val1)); |
| tree cst = cst1 ? val1 : val2; |
| tree inv = cst1 ? inv2 : inv1; |
| |
| /* Compute the difference between the constants. If it overflows or |
| underflows, this means that we can trivially compare the NAME with |
| it and, consequently, the two values with each other. */ |
| wide_int diff = wi::to_wide (cst) - wi::to_wide (inv); |
| if (wi::cmp (0, wi::to_wide (inv), sgn) |
| != wi::cmp (diff, wi::to_wide (cst), sgn)) |
| { |
| const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn); |
| return cst1 ? res : -res; |
| } |
| |
| return -2; |
| } |
| |
| /* We cannot say anything more for non-constants. */ |
| if (!cst1 || !cst2) |
| return -2; |
| |
| if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
| { |
| /* We cannot compare overflowed values. */ |
| if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
| return -2; |
| |
| return tree_int_cst_compare (val1, val2); |
| } |
| else |
| { |
| tree t; |
| |
| /* First see if VAL1 and VAL2 are not the same. */ |
| if (val1 == val2 || operand_equal_p (val1, val2, 0)) |
| return 0; |
| |
| /* If VAL1 is a lower address than VAL2, return -1. */ |
| if (operand_less_p (val1, val2) == 1) |
| return -1; |
| |
| /* If VAL1 is a higher address than VAL2, return +1. */ |
| if (operand_less_p (val2, val1) == 1) |
| return 1; |
| |
| /* If VAL1 is different than VAL2, return +2. |
| For integer constants we either have already returned -1 or 1 |
| or they are equivalent. We still might succeed in proving |
| something about non-trivial operands. */ |
| if (TREE_CODE (val1) != INTEGER_CST |
| || TREE_CODE (val2) != INTEGER_CST) |
| { |
| t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); |
| if (t && integer_onep (t)) |
| return 2; |
| } |
| |
| return -2; |
| } |
| } |
| |
| /* Compare values like compare_values_warnv. */ |
| |
| int |
| compare_values (tree val1, tree val2) |
| { |
| bool sop; |
| return compare_values_warnv (val1, val2, &sop); |
| } |
| |
| |
| /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |
| 0 if VAL is not inside [MIN, MAX], |
| -2 if we cannot tell either way. |
| |
| Benchmark compile/20001226-1.c compilation time after changing this |
| function. */ |
| |
| int |
| value_inside_range (tree val, tree min, tree max) |
| { |
| int cmp1, cmp2; |
| |
| cmp1 = operand_less_p (val, min); |
| if (cmp1 == -2) |
| return -2; |
| if (cmp1 == 1) |
| return 0; |
| |
| cmp2 = operand_less_p (max, val); |
| if (cmp2 == -2) |
| return -2; |
| |
| return !cmp2; |
| } |
| |
| |
| /* Return true if value ranges VR0 and VR1 have a non-empty |
| intersection. |
| |
| Benchmark compile/20001226-1.c compilation time after changing this |
| function. |
| */ |
| |
| static inline bool |
| value_ranges_intersect_p (value_range *vr0, value_range *vr1) |
| { |
| /* The value ranges do not intersect if the maximum of the first range is |
| less than the minimum of the second range or vice versa. |
| When those relations are unknown, we can't do any better. */ |
| if (operand_less_p (vr0->max, vr1->min) != 0) |
| return false; |
| if (operand_less_p (vr1->max, vr0->min) != 0) |
| return false; |
| return true; |
| } |
| |
| |
| /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not |
| include the value zero, -2 if we cannot tell. */ |
| |
| int |
| range_includes_zero_p (tree min, tree max) |
| { |
| tree zero = build_int_cst (TREE_TYPE (min), 0); |
| return value_inside_range (zero, min, max); |
| } |
| |
| /* Return true if *VR is know to only contain nonnegative values. */ |
| |
| static inline bool |
| value_range_nonnegative_p (value_range *vr) |
| { |
| /* Testing for VR_ANTI_RANGE is not useful here as any anti-range |
| which would return a useful value should be encoded as a |
| VR_RANGE. */ |
| if (vr->type == VR_RANGE) |
| { |
| int result = compare_values (vr->min, integer_zero_node); |
| return (result == 0 || result == 1); |
| } |
| |
| return false; |
| } |
| |
| /* If *VR has a value rante that is a single constant value return that, |
| otherwise return NULL_TREE. */ |
| |
| tree |
| value_range_constant_singleton (value_range *vr) |
| { |
| if (vr->type == VR_RANGE |
| && vrp_operand_equal_p (vr->min, vr->max) |
| && is_gimple_min_invariant (vr->min)) |
| return vr->min; |
| |
| return NULL_TREE; |
| } |
| |
| /* Wrapper around int_const_binop. Return true if we can compute the |
| result; i.e. if the operation doesn't overflow or if the overflow is |
| undefined. In the latter case (if the operation overflows and |
| overflow is undefined), then adjust the result to be -INF or +INF |
| depending on CODE, VAL1 and VAL2. Return the value in *RES. |
| |
| Return false for division by zero, for which the result is |
| indeterminate. */ |
| |
| static bool |
| vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res) |
| { |
| bool overflow = false; |
| signop sign = TYPE_SIGN (TREE_TYPE (val1)); |
| |
| switch (code) |
| { |
| case RSHIFT_EXPR: |
| case LSHIFT_EXPR: |
| { |
| wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1))); |
| if (wi::neg_p (wval2)) |
| { |
| wval2 = -wval2; |
| if (code == RSHIFT_EXPR) |
| code = LSHIFT_EXPR; |
| else |
| code = RSHIFT_EXPR; |
| } |
| |
| if (code == RSHIFT_EXPR) |
| /* It's unclear from the C standard whether shifts can overflow. |
| The following code ignores overflow; perhaps a C standard |
| interpretation ruling is needed. */ |
| *res = wi::rshift (wi::to_wide (val1), wval2, sign); |
| else |
| *res = wi::lshift (wi::to_wide (val1), wval2); |
| break; |
| } |
| |
| case MULT_EXPR: |
| *res = wi::mul (wi::to_wide (val1), |
| wi::to_wide (val2), sign, &overflow); |
| break; |
| |
| case TRUNC_DIV_EXPR: |
| case EXACT_DIV_EXPR: |
| if (val2 == 0) |
| return false; |
| else |
| *res = wi::div_trunc (wi::to_wide (val1), |
| wi::to_wide (val2), sign, &overflow); |
| break; |
| |
| case FLOOR_DIV_EXPR: |
| if (val2 == 0) |
| return false; |
| *res = wi::div_floor (wi::to_wide (val1), |
| wi::to_wide (val2), sign, &overflow); |
| break; |
| |
| case CEIL_DIV_EXPR: |
| if (val2 == 0) |
| return false; |
| *res = wi::div_ceil (wi::to_wide (val1), |
| wi::to_wide (val2), sign, &overflow); |
| break; |
| |
| case ROUND_DIV_EXPR: |
| if (val2 == 0) |
| return false; |
| *res = wi::div_round (wi::to_wide (val1), |
| wi::to_wide (val2), sign, &overflow); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| if (overflow |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |
| { |
| /* If the operation overflowed return -INF or +INF depending |
| on the operation and the combination of signs of the operands. */ |
| int sgn1 = tree_int_cst_sgn (val1); |
| int sgn2 = tree_int_cst_sgn (val2); |
| |
| /* Notice that we only need to handle the restricted set of |
| operations handled by extract_range_from_binary_expr. |
| Among them, only multiplication, addition and subtraction |
| can yield overflow without overflown operands because we |
| are working with integral types only... except in the |
| case VAL1 = -INF and VAL2 = -1 which overflows to +INF |
| for division too. */ |
| |
| /* For multiplication, the sign of the overflow is given |
| by the comparison of the signs of the operands. */ |
| if ((code == MULT_EXPR && sgn1 == sgn2) |
| /* For addition, the operands must be of the same sign |
| to yield an overflow. Its sign is therefore that |
| of one of the operands, for example the first. */ |
| || (code == PLUS_EXPR && sgn1 >= 0) |
| /* For subtraction, operands must be of |
| different signs to yield an overflow. Its sign is |
| therefore that of the first operand or the opposite of |
| that of the second operand. A first operand of 0 counts |
| as positive here, for the corner case 0 - (-INF), which |
| overflows, but must yield +INF. */ |
| || (code == MINUS_EXPR && sgn1 >= 0) |
| /* For division, the only case is -INF / -1 = +INF. */ |
| || code == TRUNC_DIV_EXPR |
| || code == FLOOR_DIV_EXPR |
| || code == CEIL_DIV_EXPR |
| || code == EXACT_DIV_EXPR |
| || code == ROUND_DIV_EXPR) |
| *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), |
| TYPE_SIGN (TREE_TYPE (val1))); |
| else |
| *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), |
| TYPE_SIGN (TREE_TYPE (val1))); |
| return true; |
| } |
| |
| return !overflow; |
| } |
| |
| |
| /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO |
| bitmask if some bit is unset, it means for all numbers in the range |
| the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO |
| bitmask if some bit is set, it means for all numbers in the range |
| the bit is 1, otherwise it might be 0 or 1. */ |
| |
| bool |
| zero_nonzero_bits_from_vr (const tree expr_type, |
| value_range *vr, |
| wide_int *may_be_nonzero, |
| wide_int *must_be_nonzero) |
| { |
| *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
| *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); |
| if (!range_int_cst_p (vr)) |
| return false; |
| |
| if (range_int_cst_singleton_p (vr)) |
| { |
| *may_be_nonzero = wi::to_wide (vr->min); |
| *must_be_nonzero = *may_be_nonzero; |
| } |
| else if (tree_int_cst_sgn (vr->min) >= 0 |
| || tree_int_cst_sgn (vr->max) < 0) |
| { |
| wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max); |
| *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max); |
| *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max); |
| if (xor_mask != 0) |
| { |
| wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, |
| may_be_nonzero->get_precision ()); |
| *may_be_nonzero = *may_be_nonzero | mask; |
| *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask); |
| } |
| } |
| |
| return true; |
| } |
| |
| /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
| so that *VR0 U *VR1 == *AR. Returns true if that is possible, |
| false otherwise. If *AR can be represented with a single range |
| *VR1 will be VR_UNDEFINED. */ |
| |
| static bool |
| ranges_from_anti_range (value_range *ar, |
| value_range *vr0, value_range *vr1) |
| { |
| tree type = TREE_TYPE (ar->min); |
| |
| vr0->type = VR_UNDEFINED; |
| vr1->type = VR_UNDEFINED; |
| |
| if (ar->type != VR_ANTI_RANGE |
| || TREE_CODE (ar->min) != INTEGER_CST |
| || TREE_CODE (ar->max) != INTEGER_CST |
| || !vrp_val_min (type) |
| || !vrp_val_max (type)) |
| return false; |
| |
| if (!vrp_val_is_min (ar->min)) |
| { |
| vr0->type = VR_RANGE; |
| vr0->min = vrp_val_min (type); |
| vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1); |
| } |
| if (!vrp_val_is_max (ar->max)) |
| { |
| vr1->type = VR_RANGE; |
| vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1); |
| vr1->max = vrp_val_max (type); |
| } |
| if (vr0->type == VR_UNDEFINED) |
| { |
| *vr0 = *vr1; |
| vr1->type = VR_UNDEFINED; |
| } |
| |
| return vr0->type != VR_UNDEFINED; |
| } |
| |
| /* Helper to extract a value-range *VR for a multiplicative operation |
| *VR0 CODE *VR1. */ |
| |
| static void |
| extract_range_from_multiplicative_op_1 (value_range *vr, |
| enum tree_code code, |
| value_range *vr0, value_range *vr1) |
| { |
| enum value_range_type rtype; |
| wide_int val, min, max; |
| tree type; |
| |
| /* Multiplications, divisions and shifts are a bit tricky to handle, |
| depending on the mix of signs we have in the two ranges, we |
| need to operate on different values to get the minimum and |
| maximum values for the new range. One approach is to figure |
| out all the variations of range combinations and do the |
| operations. |
| |
| However, this involves several calls to compare_values and it |
| is pretty convoluted. It's simpler to do the 4 operations |
| (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP |
| MAX1) and then figure the smallest and largest values to form |
| the new range. */ |
| gcc_assert (code == MULT_EXPR |
| || code == TRUNC_DIV_EXPR |
| || code == FLOOR_DIV_EXPR |
| || code == CEIL_DIV_EXPR |
| || code == EXACT_DIV_EXPR |
| || code == ROUND_DIV_EXPR |
| || code == RSHIFT_EXPR |
| || code == LSHIFT_EXPR); |
| gcc_assert (vr0->type == VR_RANGE |
| && vr0->type == vr1->type); |
| |
| rtype = vr0->type; |
| type = TREE_TYPE (vr0->min); |
| signop sgn = TYPE_SIGN (type); |
| |
| /* Compute the 4 cross operations and their minimum and maximum value. */ |
| if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| min = max = val; |
| |
| if (vr1->max != vr1->min) |
| { |
| if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| if (wi::lt_p (val, min, sgn)) |
| min = val; |
| else if (wi::gt_p (val, max, sgn)) |
| max = val; |
| } |
| |
| if (vr0->max != vr0->min) |
| { |
| if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| if (wi::lt_p (val, min, sgn)) |
| min = val; |
| else if (wi::gt_p (val, max, sgn)) |
| max = val; |
| } |
| |
| if (vr0->min != vr0->max && vr1->min != vr1->max) |
| { |
| if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| if (wi::lt_p (val, min, sgn)) |
| min = val; |
| else if (wi::gt_p (val, max, sgn)) |
| max = val; |
| } |
| |
| /* If the new range has its limits swapped around (MIN > MAX), |
| then the operation caused one of them to wrap around, mark |
| the new range VARYING. */ |
| if (wi::gt_p (min, max, sgn)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* We punt for [-INF, +INF]. |
| We learn nothing when we have INF on both sides. |
| Note that we do accept [-INF, -INF] and [+INF, +INF]. */ |
| if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn)) |
| && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn))) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| set_value_range (vr, rtype, |
| wide_int_to_tree (type, min), |
| wide_int_to_tree (type, max), NULL); |
| } |
| |
| /* Extract range information from a binary operation CODE based on |
| the ranges of each of its operands *VR0 and *VR1 with resulting |
| type EXPR_TYPE. The resulting range is stored in *VR. */ |
| |
| void |
| extract_range_from_binary_expr_1 (value_range *vr, |
| enum tree_code code, tree expr_type, |
| value_range *vr0_, value_range *vr1_) |
| { |
| value_range vr0 = *vr0_, vr1 = *vr1_; |
| value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
| enum value_range_type type; |
| tree min = NULL_TREE, max = NULL_TREE; |
| int cmp; |
| |
| if (!INTEGRAL_TYPE_P (expr_type) |
| && !POINTER_TYPE_P (expr_type)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* Not all binary expressions can be applied to ranges in a |
| meaningful way. Handle only arithmetic operations. */ |
| if (code != PLUS_EXPR |
| && code != MINUS_EXPR |
| && code != POINTER_PLUS_EXPR |
| && code != MULT_EXPR |
| && code != TRUNC_DIV_EXPR |
| && code != FLOOR_DIV_EXPR |
| && code != CEIL_DIV_EXPR |
| && code != EXACT_DIV_EXPR |
| && code != ROUND_DIV_EXPR |
| && code != TRUNC_MOD_EXPR |
| && code != RSHIFT_EXPR |
| && code != LSHIFT_EXPR |
| && code != MIN_EXPR |
| && code != MAX_EXPR |
| && code != BIT_AND_EXPR |
| && code != BIT_IOR_EXPR |
| && code != BIT_XOR_EXPR) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* If both ranges are UNDEFINED, so is the result. */ |
| if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) |
| { |
| set_value_range_to_undefined (vr); |
| return; |
| } |
| /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
| code. At some point we may want to special-case operations that |
| have UNDEFINED result for all or some value-ranges of the not UNDEFINED |
| operand. */ |
| else if (vr0.type == VR_UNDEFINED) |
| set_value_range_to_varying (&vr0); |
| else if (vr1.type == VR_UNDEFINED) |
| set_value_range_to_varying (&vr1); |
| |
| /* We get imprecise results from ranges_from_anti_range when |
| code is EXACT_DIV_EXPR. We could mask out bits in the resulting |
| range, but then we also need to hack up vrp_meet. It's just |
| easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ |
| if (code == EXACT_DIV_EXPR |
| && vr0.type == VR_ANTI_RANGE |
| && vr0.min == vr0.max |
| && integer_zerop (vr0.min)) |
| { |
| set_value_range_to_nonnull (vr, expr_type); |
| return; |
| } |
| |
| /* Now canonicalize anti-ranges to ranges when they are not symbolic |
| and express ~[] op X as ([]' op X) U ([]'' op X). */ |
| if (vr0.type == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); |
| if (vrtem1.type != VR_UNDEFINED) |
| { |
| value_range vrres = VR_INITIALIZER; |
| extract_range_from_binary_expr_1 (&vrres, code, expr_type, |
| &vrtem1, vr1_); |
| vrp_meet (vr, &vrres); |
| } |
| return; |
| } |
| /* Likewise for X op ~[]. */ |
| if (vr1.type == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); |
| if (vrtem1.type != VR_UNDEFINED) |
| { |
| value_range vrres = VR_INITIALIZER; |
| extract_range_from_binary_expr_1 (&vrres, code, expr_type, |
| vr0_, &vrtem1); |
| vrp_meet (vr, &vrres); |
| } |
| return; |
| } |
| |
| /* The type of the resulting value range defaults to VR0.TYPE. */ |
| type = vr0.type; |
| |
| /* Refuse to operate on VARYING ranges, ranges of different kinds |
| and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |
| because we may be able to derive a useful range even if one of |
| the operands is VR_VARYING or symbolic range. Similarly for |
| divisions, MIN/MAX and PLUS/MINUS. |
| |
| TODO, we may be able to derive anti-ranges in some cases. */ |
| if (code != BIT_AND_EXPR |
| && code != BIT_IOR_EXPR |
| && code != TRUNC_DIV_EXPR |
| && code != FLOOR_DIV_EXPR |
| && code != CEIL_DIV_EXPR |
| && code != EXACT_DIV_EXPR |
| && code != ROUND_DIV_EXPR |
| && code != TRUNC_MOD_EXPR |
| && code != MIN_EXPR |
| && code != MAX_EXPR |
| && code != PLUS_EXPR |
| && code != MINUS_EXPR |
| && code != RSHIFT_EXPR |
| && (vr0.type == VR_VARYING |
| || vr1.type == VR_VARYING |
| || vr0.type != vr1.type |
| || symbolic_range_p (&vr0) |
| || symbolic_range_p (&vr1))) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* Now evaluate the expression to determine the new range. */ |
| if (POINTER_TYPE_P (expr_type)) |
| { |
| if (code == MIN_EXPR || code == MAX_EXPR) |
| { |
| /* For MIN/MAX expressions with pointers, we only care about |
| nullness, if both are non null, then the result is nonnull. |
| If both are null, then the result is null. Otherwise they |
| are varying. */ |
| if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) |
| set_value_range_to_nonnull (vr, expr_type); |
| else if (range_is_null (&vr0) && range_is_null (&vr1)) |
| set_value_range_to_null (vr, expr_type); |
| else |
| set_value_range_to_varying (vr); |
| } |
| else if (code == POINTER_PLUS_EXPR) |
| { |
| /* For pointer types, we are really only interested in asserting |
| whether the expression evaluates to non-NULL. */ |
| if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) |
| set_value_range_to_nonnull (vr, expr_type); |
| else if (range_is_null (&vr0) && range_is_null (&vr1)) |
| set_value_range_to_null (vr, expr_type); |
| else |
| set_value_range_to_varying (vr); |
| } |
| else if (code == BIT_AND_EXPR) |
| { |
| /* For pointer types, we are really only interested in asserting |
| whether the expression evaluates to non-NULL. */ |
| if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) |
| set_value_range_to_nonnull (vr, expr_type); |
| else if (range_is_null (&vr0) || range_is_null (&vr1)) |
| set_value_range_to_null (vr, expr_type); |
| else |
| set_value_range_to_varying (vr); |
| } |
| else |
| set_value_range_to_varying (vr); |
| |
| return; |
| } |
| |
| /* For integer ranges, apply the operation to each end of the |
| range and see what we end up with. */ |
| if (code == PLUS_EXPR || code == MINUS_EXPR) |
| { |
| const bool minus_p = (code == MINUS_EXPR); |
| tree min_op0 = vr0.min; |
| tree min_op1 = minus_p ? vr1.max : vr1.min; |
| tree max_op0 = vr0.max; |
| tree max_op1 = minus_p ? vr1.min : vr1.max; |
| tree sym_min_op0 = NULL_TREE; |
| tree sym_min_op1 = NULL_TREE; |
| tree sym_max_op0 = NULL_TREE; |
| tree sym_max_op1 = NULL_TREE; |
| bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; |
| |
| /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or |
| single-symbolic ranges, try to compute the precise resulting range, |
| but only if we know that this resulting range will also be constant |
| or single-symbolic. */ |
| if (vr0.type == VR_RANGE && vr1.type == VR_RANGE |
| && (TREE_CODE (min_op0) == INTEGER_CST |
| || (sym_min_op0 |
| = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) |
| && (TREE_CODE (min_op1) == INTEGER_CST |
| || (sym_min_op1 |
| = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) |
| && (!(sym_min_op0 && sym_min_op1) |
| || (sym_min_op0 == sym_min_op1 |
| && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) |
| && (TREE_CODE (max_op0) == INTEGER_CST |
| || (sym_max_op0 |
| = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) |
| && (TREE_CODE (max_op1) == INTEGER_CST |
| || (sym_max_op1 |
| = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) |
| && (!(sym_max_op0 && sym_max_op1) |
| || (sym_max_op0 == sym_max_op1 |
| && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) |
| { |
| const signop sgn = TYPE_SIGN (expr_type); |
| const unsigned int prec = TYPE_PRECISION (expr_type); |
| wide_int type_min, type_max, wmin, wmax; |
| int min_ovf = 0; |
| int max_ovf = 0; |
| |
| /* Get the lower and upper bounds of the type. */ |
| if (TYPE_OVERFLOW_WRAPS (expr_type)) |
| { |
| type_min = wi::min_value (prec, sgn); |
| type_max = wi::max_value (prec, sgn); |
| } |
| else |
| { |
| type_min = wi::to_wide (vrp_val_min (expr_type)); |
| type_max = wi::to_wide (vrp_val_max (expr_type)); |
| } |
| |
| /* Combine the lower bounds, if any. */ |
| if (min_op0 && min_op1) |
| { |
| if (minus_p) |
| { |
| wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1); |
| |
| /* Check for overflow. */ |
| if (wi::cmp (0, wi::to_wide (min_op1), sgn) |
| != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) |
| min_ovf = wi::cmp (wi::to_wide (min_op0), |
| wi::to_wide (min_op1), sgn); |
| } |
| else |
| { |
| wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1); |
| |
| /* Check for overflow. */ |
| if (wi::cmp (wi::to_wide (min_op1), 0, sgn) |
| != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) |
| min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn); |
| } |
| } |
| else if (min_op0) |
| wmin = wi::to_wide (min_op0); |
| else if (min_op1) |
| { |
| if (minus_p) |
| { |
| wmin = -wi::to_wide (min_op1); |
| |
| /* Check for overflow. */ |
| if (sgn == SIGNED |
| && wi::neg_p (wi::to_wide (min_op1)) |
| && wi::neg_p (wmin)) |
| min_ovf = 1; |
| else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0) |
| min_ovf = -1; |
| } |
| else |
| wmin = wi::to_wide (min_op1); |
| } |
| else |
| wmin = wi::shwi (0, prec); |
| |
| /* Combine the upper bounds, if any. */ |
| if (max_op0 && max_op1) |
| { |
| if (minus_p) |
| { |
| wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1); |
| |
| /* Check for overflow. */ |
| if (wi::cmp (0, wi::to_wide (max_op1), sgn) |
| != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) |
| max_ovf = wi::cmp (wi::to_wide (max_op0), |
| wi::to_wide (max_op1), sgn); |
| } |
| else |
| { |
| wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1); |
| |
| if (wi::cmp (wi::to_wide (max_op1), 0, sgn) |
| != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) |
| max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn); |
| } |
| } |
| else if (max_op0) |
| wmax = wi::to_wide (max_op0); |
| else if (max_op1) |
| { |
| if (minus_p) |
| { |
| wmax = -wi::to_wide (max_op1); |
| |
| /* Check for overflow. */ |
| if (sgn == SIGNED |
| && wi::neg_p (wi::to_wide (max_op1)) |
| && wi::neg_p (wmax)) |
| max_ovf = 1; |
| else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0) |
| max_ovf = -1; |
| } |
| else |
| wmax = wi::to_wide (max_op1); |
| } |
| else |
| wmax = wi::shwi (0, prec); |
| |
| /* Check for type overflow. */ |
| if (min_ovf == 0) |
| { |
| if (wi::cmp (wmin, type_min, sgn) == -1) |
| min_ovf = -1; |
| else if (wi::cmp (wmin, type_max, sgn) == 1) |
| min_ovf = 1; |
| } |
| if (max_ovf == 0) |
| { |
| if (wi::cmp (wmax, type_min, sgn) == -1) |
| max_ovf = -1; |
| else if (wi::cmp (wmax, type_max, sgn) == 1) |
| max_ovf = 1; |
| } |
| |
| /* If we have overflow for the constant part and the resulting |
| range will be symbolic, drop to VR_VARYING. */ |
| if ((min_ovf && sym_min_op0 != sym_min_op1) |
| || (max_ovf && sym_max_op0 != sym_max_op1)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| if (TYPE_OVERFLOW_WRAPS (expr_type)) |
| { |
| /* If overflow wraps, truncate the values and adjust the |
| range kind and bounds appropriately. */ |
| wide_int tmin = wide_int::from (wmin, prec, sgn); |
| wide_int tmax = wide_int::from (wmax, prec, sgn); |
| if (min_ovf == max_ovf) |
| { |
| /* No overflow or both overflow or underflow. The |
| range kind stays VR_RANGE. */ |
| min = wide_int_to_tree (expr_type, tmin); |
| max = wide_int_to_tree (expr_type, tmax); |
| } |
| else if ((min_ovf == -1 && max_ovf == 0) |
| || (max_ovf == 1 && min_ovf == 0)) |
| { |
| /* Min underflow or max overflow. The range kind |
| changes to VR_ANTI_RANGE. */ |
| bool covers = false; |
| wide_int tem = tmin; |
| type = VR_ANTI_RANGE; |
| tmin = tmax + 1; |
| if (wi::cmp (tmin, tmax, sgn) < 0) |
| covers = true; |
| tmax = tem - 1; |
| if (wi::cmp (tmax, tem, sgn) > 0) |
| covers = true; |
| /* If the anti-range would cover nothing, drop to varying. |
| Likewise if the anti-range bounds are outside of the |
| types values. */ |
| if (covers || wi::cmp (tmin, tmax, sgn) > 0) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| min = wide_int_to_tree (expr_type, tmin); |
| max = wide_int_to_tree (expr_type, tmax); |
| } |
| else |
| { |
| /* Other underflow and/or overflow, drop to VR_VARYING. */ |
| set_value_range_to_varying (vr); |
| return; |
| } |
| } |
| else |
| { |
| /* If overflow does not wrap, saturate to the types min/max |
| value. */ |
| if (min_ovf == -1) |
| min = wide_int_to_tree (expr_type, type_min); |
| else if (min_ovf == 1) |
| min = wide_int_to_tree (expr_type, type_max); |
| else |
| min = wide_int_to_tree (expr_type, wmin); |
| |
| if (max_ovf == -1) |
| max = wide_int_to_tree (expr_type, type_min); |
| else if (max_ovf == 1) |
| max = wide_int_to_tree (expr_type, type_max); |
| else |
| max = wide_int_to_tree (expr_type, wmax); |
| } |
| |
| /* If the result lower bound is constant, we're done; |
| otherwise, build the symbolic lower bound. */ |
| if (sym_min_op0 == sym_min_op1) |
| ; |
| else if (sym_min_op0) |
| min = build_symbolic_expr (expr_type, sym_min_op0, |
| neg_min_op0, min); |
| else if (sym_min_op1) |
| { |
| /* We may not negate if that might introduce |
| undefined overflow. */ |
| if (! minus_p |
| || neg_min_op1 |
| || TYPE_OVERFLOW_WRAPS (expr_type)) |
| min = build_symbolic_expr (expr_type, sym_min_op1, |
| neg_min_op1 ^ minus_p, min); |
| else |
| min = NULL_TREE; |
| } |
| |
| /* Likewise for the upper bound. */ |
| if (sym_max_op0 == sym_max_op1) |
| ; |
| else if (sym_max_op0) |
| max = build_symbolic_expr (expr_type, sym_max_op0, |
| neg_max_op0, max); |
| else if (sym_max_op1) |
| { |
| /* We may not negate if that might introduce |
| undefined overflow. */ |
| if (! minus_p |
| || neg_max_op1 |
| || TYPE_OVERFLOW_WRAPS (expr_type)) |
| max = build_symbolic_expr (expr_type, sym_max_op1, |
| neg_max_op1 ^ minus_p, max); |
| else |
| max = NULL_TREE; |
| } |
| } |
| else |
| { |
| /* For other cases, for example if we have a PLUS_EXPR with two |
| VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort |
| to compute a precise range for such a case. |
| ??? General even mixed range kind operations can be expressed |
| by for example transforming ~[3, 5] + [1, 2] to range-only |
| operations and a union primitive: |
| [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] |
| [-INF+1, 4] U [6, +INF(OVF)] |
| though usually the union is not exactly representable with |
| a single range or anti-range as the above is |
| [-INF+1, +INF(OVF)] intersected with ~[5, 5] |
| but one could use a scheme similar to equivalences for this. */ |
| set_value_range_to_varying (vr); |
| return; |
| } |
| } |
| else if (code == MIN_EXPR |
| || code == MAX_EXPR) |
| { |
| if (vr0.type == VR_RANGE |
| && !symbolic_range_p (&vr0)) |
| { |
| type = VR_RANGE; |
| if (vr1.type == VR_RANGE |
| && !symbolic_range_p (&vr1)) |
| { |
| /* For operations that make the resulting range directly |
| proportional to the original ranges, apply the operation to |
| the same end of each range. */ |
| min = int_const_binop (code, vr0.min, vr1.min); |
| max = int_const_binop (code, vr0.max, vr1.max); |
| } |
| else if (code == MIN_EXPR) |
| { |
| min = vrp_val_min (expr_type); |
| max = vr0.max; |
| } |
| else if (code == MAX_EXPR) |
| { |
| min = vr0.min; |
| max = vrp_val_max (expr_type); |
| } |
| } |
| else if (vr1.type == VR_RANGE |
| && !symbolic_range_p (&vr1)) |
| { |
| type = VR_RANGE; |
| if (code == MIN_EXPR) |
| { |
| min = vrp_val_min (expr_type); |
| max = vr1.max; |
| } |
| else if (code == MAX_EXPR) |
| { |
| min = vr1.min; |
| max = vrp_val_max (expr_type); |
| } |
| } |
| else |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| } |
| else if (code == MULT_EXPR) |
| { |
| /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not |
| drop to varying. This test requires 2*prec bits if both |
| operands are signed and 2*prec + 2 bits if either is not. */ |
| |
| signop sign = TYPE_SIGN (expr_type); |
| unsigned int prec = TYPE_PRECISION (expr_type); |
| |
| if (!range_int_cst_p (&vr0) |
| || !range_int_cst_p (&vr1)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| if (TYPE_OVERFLOW_WRAPS (expr_type)) |
| { |
| typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; |
| typedef generic_wide_int |
| <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; |
| vrp_int sizem1 = wi::mask <vrp_int> (prec, false); |
| vrp_int size = sizem1 + 1; |
| |
| /* Extend the values using the sign of the result to PREC2. |
| From here on out, everthing is just signed math no matter |
| what the input types were. */ |
| vrp_int min0 = vrp_int_cst (vr0.min); |
| vrp_int max0 = vrp_int_cst (vr0.max); |
| vrp_int min1 = vrp_int_cst (vr1.min); |
| vrp_int max1 = vrp_int_cst (vr1.max); |
| /* Canonicalize the intervals. */ |
| if (sign == UNSIGNED) |
| { |
| if (wi::ltu_p (size, min0 + max0)) |
| { |
| min0 -= size; |
| max0 -= size; |
| } |
| |
| if (wi::ltu_p (size, min1 + max1)) |
| { |
| min1 -= size; |
| max1 -= size; |
| } |
| } |
| |
| vrp_int prod0 = min0 * min1; |
| vrp_int prod1 = min0 * max1; |
| vrp_int prod2 = max0 * min1; |
| vrp_int prod3 = max0 * max1; |
| |
| /* Sort the 4 products so that min is in prod0 and max is in |
| prod3. */ |
| /* min0min1 > max0max1 */ |
| if (prod0 > prod3) |
| std::swap (prod0, prod3); |
| |
| /* min0max1 > max0min1 */ |
| if (prod1 > prod2) |
| std::swap (prod1, prod2); |
| |
| if (prod0 > prod1) |
| std::swap (prod0, prod1); |
| |
| if (prod2 > prod3) |
| std::swap (prod2, prod3); |
| |
| /* diff = max - min. */ |
| prod2 = prod3 - prod0; |
| if (wi::geu_p (prod2, sizem1)) |
| { |
| /* the range covers all values. */ |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* The following should handle the wrapping and selecting |
| VR_ANTI_RANGE for us. */ |
| min = wide_int_to_tree (expr_type, prod0); |
| max = wide_int_to_tree (expr_type, prod3); |
| set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |
| return; |
| } |
| |
| /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |
| drop to VR_VARYING. It would take more effort to compute a |
| precise range for such a case. For example, if we have |
| op0 == 65536 and op1 == 65536 with their ranges both being |
| ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so |
| we cannot claim that the product is in ~[0,0]. Note that we |
| are guaranteed to have vr0.type == vr1.type at this |
| point. */ |
| if (vr0.type == VR_ANTI_RANGE |
| && !TYPE_OVERFLOW_UNDEFINED (expr_type)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
| return; |
| } |
| else if (code == RSHIFT_EXPR |
| || code == LSHIFT_EXPR) |
| { |
| /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |
| then drop to VR_VARYING. Outside of this range we get undefined |
| behavior from the shift operation. We cannot even trust |
| SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |
| shifts, and the operation at the tree level may be widened. */ |
| if (range_int_cst_p (&vr1) |
| && compare_tree_int (vr1.min, 0) >= 0 |
| && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) |
| { |
| if (code == RSHIFT_EXPR) |
| { |
| /* Even if vr0 is VARYING or otherwise not usable, we can derive |
| useful ranges just from the shift count. E.g. |
| x >> 63 for signed 64-bit x is always [-1, 0]. */ |
| if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) |
| { |
| vr0.type = type = VR_RANGE; |
| vr0.min = vrp_val_min (expr_type); |
| vr0.max = vrp_val_max (expr_type); |
| } |
| extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
| return; |
| } |
| /* We can map lshifts by constants to MULT_EXPR handling. */ |
| else if (code == LSHIFT_EXPR |
| && range_int_cst_singleton_p (&vr1)) |
| { |
| bool saved_flag_wrapv; |
| value_range vr1p = VR_INITIALIZER; |
| vr1p.type = VR_RANGE; |
| vr1p.min = (wide_int_to_tree |
| (expr_type, |
| wi::set_bit_in_zero (tree_to_shwi (vr1.min), |
| TYPE_PRECISION (expr_type)))); |
| vr1p.max = vr1p.min; |
| /* We have to use a wrapping multiply though as signed overflow |
| on lshifts is implementation defined in C89. */ |
| saved_flag_wrapv = flag_wrapv; |
| flag_wrapv = 1; |
| extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, |
| &vr0, &vr1p); |
| flag_wrapv = saved_flag_wrapv; |
| return; |
| } |
| else if (code == LSHIFT_EXPR |
| && range_int_cst_p (&vr0)) |
| { |
| int prec = TYPE_PRECISION (expr_type); |
| int overflow_pos = prec; |
| int bound_shift; |
| wide_int low_bound, high_bound; |
| bool uns = TYPE_UNSIGNED (expr_type); |
| bool in_bounds = false; |
| |
| if (!uns) |
| overflow_pos -= 1; |
| |
| bound_shift = overflow_pos - tree_to_shwi (vr1.max); |
| /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can |
| overflow. However, for that to happen, vr1.max needs to be |
| zero, which means vr1 is a singleton range of zero, which |
| means it should be handled by the previous LSHIFT_EXPR |
| if-clause. */ |
| wide_int bound = wi::set_bit_in_zero (bound_shift, prec); |
| wide_int complement = ~(bound - 1); |
| |
| if (uns) |
| { |
| low_bound = bound; |
| high_bound = complement; |
| if (wi::ltu_p (wi::to_wide (vr0.max), low_bound)) |
| { |
| /* [5, 6] << [1, 2] == [10, 24]. */ |
| /* We're shifting out only zeroes, the value increases |
| monotonically. */ |
| in_bounds = true; |
| } |
| else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min))) |
| { |
| /* [0xffffff00, 0xffffffff] << [1, 2] |
| == [0xfffffc00, 0xfffffffe]. */ |
| /* We're shifting out only ones, the value decreases |
| monotonically. */ |
| in_bounds = true; |
| } |
| } |
| else |
| { |
| /* [-1, 1] << [1, 2] == [-4, 4]. */ |
| low_bound = complement; |
| high_bound = bound; |
| if (wi::lts_p (wi::to_wide (vr0.max), high_bound) |
| && wi::lts_p (low_bound, wi::to_wide (vr0.min))) |
| { |
| /* For non-negative numbers, we're shifting out only |
| zeroes, the value increases monotonically. |
| For negative numbers, we're shifting out only ones, the |
| value decreases monotomically. */ |
| in_bounds = true; |
| } |
| } |
| |
| if (in_bounds) |
| { |
| extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
| return; |
| } |
| } |
| } |
| set_value_range_to_varying (vr); |
| return; |
| } |
| else if (code == TRUNC_DIV_EXPR |
| || code == FLOOR_DIV_EXPR |
| || code == CEIL_DIV_EXPR |
| || code == EXACT_DIV_EXPR |
| || code == ROUND_DIV_EXPR) |
| { |
| if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) |
| { |
| /* For division, if op1 has VR_RANGE but op0 does not, something |
| can be deduced just from that range. Say [min, max] / [4, max] |
| gives [min / 4, max / 4] range. */ |
| if (vr1.type == VR_RANGE |
| && !symbolic_range_p (&vr1) |
| && range_includes_zero_p (vr1.min, vr1.max) == 0) |
| { |
| vr0.type = type = VR_RANGE; |
| vr0.min = vrp_val_min (expr_type); |
| vr0.max = vrp_val_max (expr_type); |
| } |
| else |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| } |
| |
| /* For divisions, if flag_non_call_exceptions is true, we must |
| not eliminate a division by zero. */ |
| if (cfun->can_throw_non_call_exceptions |
| && (vr1.type != VR_RANGE |
| || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* For divisions, if op0 is VR_RANGE, we can deduce a range |
| even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can |
| include 0. */ |
| if (vr0.type == VR_RANGE |
| && (vr1.type != VR_RANGE |
| || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
| { |
| tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); |
| int cmp; |
| |
| min = NULL_TREE; |
| max = NULL_TREE; |
| if (TYPE_UNSIGNED (expr_type) |
| || value_range_nonnegative_p (&vr1)) |
| { |
| /* For unsigned division or when divisor is known |
| to be non-negative, the range has to cover |
| all numbers from 0 to max for positive max |
| and all numbers from min to 0 for negative min. */ |
| cmp = compare_values (vr0.max, zero); |
| if (cmp == -1) |
| { |
| /* When vr0.max < 0, vr1.min != 0 and value |
| ranges for dividend and divisor are available. */ |
| if (vr1.type == VR_RANGE |
| && !symbolic_range_p (&vr0) |
| && !symbolic_range_p (&vr1) |
| && compare_values (vr1.min, zero) != 0) |
| max = int_const_binop (code, vr0.max, vr1.min); |
| else |
| max = zero; |
| } |
| else if (cmp == 0 || cmp == 1) |
| max = vr0.max; |
| else |
| type = VR_VARYING; |
| cmp = compare_values (vr0.min, zero); |
| if (cmp == 1) |
| { |
| /* For unsigned division when value ranges for dividend |
| and divisor are available. */ |
| if (vr1.type == VR_RANGE |
| && !symbolic_range_p (&vr0) |
| && !symbolic_range_p (&vr1) |
| && compare_values (vr1.max, zero) != 0) |
| min = int_const_binop (code, vr0.min, vr1.max); |
| else |
| min = zero; |
| } |
| else if (cmp == 0 || cmp == -1) |
| min = vr0.min; |
| else |
| type = VR_VARYING; |
| } |
| else |
| { |
| /* Otherwise the range is -max .. max or min .. -min |
| depending on which bound is bigger in absolute value, |
| as the division can change the sign. */ |
| abs_extent_range (vr, vr0.min, vr0.max); |
| return; |
| } |
| if (type == VR_VARYING) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| } |
| else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1)) |
| { |
| extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
| return; |
| } |
| } |
| else if (code == TRUNC_MOD_EXPR) |
| { |
| if (range_is_null (&vr1)) |
| { |
| set_value_range_to_undefined (vr); |
| return; |
| } |
| /* ABS (A % B) < ABS (B) and either |
| 0 <= A % B <= A or A <= A % B <= 0. */ |
| type = VR_RANGE; |
| signop sgn = TYPE_SIGN (expr_type); |
| unsigned int prec = TYPE_PRECISION (expr_type); |
| wide_int wmin, wmax, tmp; |
| if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) |
| { |
| wmax = wi::to_wide (vr1.max) - 1; |
| if (sgn == SIGNED) |
| { |
| tmp = -1 - wi::to_wide (vr1.min); |
| wmax = wi::smax (wmax, tmp); |
| } |
| } |
| else |
| { |
| wmax = wi::max_value (prec, sgn); |
| /* X % INT_MIN may be INT_MAX. */ |
| if (sgn == UNSIGNED) |
| wmax = wmax - 1; |
| } |
| |
| if (sgn == UNSIGNED) |
| wmin = wi::zero (prec); |
| else |
| { |
| wmin = -wmax; |
| if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST) |
| { |
| tmp = wi::to_wide (vr0.min); |
| if (wi::gts_p (tmp, 0)) |
| tmp = wi::zero (prec); |
| wmin = wi::smax (wmin, tmp); |
| } |
| } |
| |
| if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST) |
| { |
| tmp = wi::to_wide (vr0.max); |
| if (sgn == SIGNED && wi::neg_p (tmp)) |
| tmp = wi::zero (prec); |
| wmax = wi::min (wmax, tmp, sgn); |
| } |
| |
| min = wide_int_to_tree (expr_type, wmin); |
| max = wide_int_to_tree (expr_type, wmax); |
| } |
| else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |
| { |
| bool int_cst_range0, int_cst_range1; |
| wide_int may_be_nonzero0, may_be_nonzero1; |
| wide_int must_be_nonzero0, must_be_nonzero1; |
| |
| int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, |
| &may_be_nonzero0, |
| &must_be_nonzero0); |
| int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, |
| &may_be_nonzero1, |
| &must_be_nonzero1); |
| |
| if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR) |
| { |
| value_range *vr0p = NULL, *vr1p = NULL; |
| if (range_int_cst_singleton_p (&vr1)) |
| { |
| vr0p = &vr0; |
| vr1p = &vr1; |
| } |
| else if (range_int_cst_singleton_p (&vr0)) |
| { |
| vr0p = &vr1; |
| vr1p = &vr0; |
| } |
| /* For op & or | attempt to optimize: |
| [x, y] op z into [x op z, y op z] |
| if z is a constant which (for op | its bitwise not) has n |
| consecutive least significant bits cleared followed by m 1 |
| consecutive bits set immediately above it and either |
| m + n == precision, or (x >> (m + n)) == (y >> (m + n)). |
| The least significant n bits of all the values in the range are |
| cleared or set, the m bits above it are preserved and any bits |
| above these are required to be the same for all values in the |
| range. */ |
| if (vr0p && range_int_cst_p (vr0p)) |
| { |
| wide_int w = wi::to_wide (vr1p->min); |
| int m = 0, n = 0; |
| if (code == BIT_IOR_EXPR) |
| w = ~w; |
| if (wi::eq_p (w, 0)) |
| n = TYPE_PRECISION (expr_type); |
| else |
| { |
| n = wi::ctz (w); |
| w = ~(w | wi::mask (n, false, w.get_precision ())); |
| if (wi::eq_p (w, 0)) |
| m = TYPE_PRECISION (expr_type) - n; |
| else |
| m = wi::ctz (w) - n; |
| } |
| wide_int mask = wi::mask (m + n, true, w.get_precision ()); |
| if ((mask & wi::to_wide (vr0p->min)) |
| == (mask & wi::to_wide (vr0p->max))) |
| { |
| min = int_const_binop (code, vr0p->min, vr1p->min); |
| max = int_const_binop (code, vr0p->max, vr1p->min); |
| } |
| } |
| } |
| |
| type = VR_RANGE; |
| if (min && max) |
| /* Optimized above already. */; |
| else if (code == BIT_AND_EXPR) |
| { |
| min = wide_int_to_tree (expr_type, |
| must_be_nonzero0 & must_be_nonzero1); |
| wide_int wmax = may_be_nonzero0 & may_be_nonzero1; |
| /* If both input ranges contain only negative values we can |
| truncate the result range maximum to the minimum of the |
| input range maxima. */ |
| if (int_cst_range0 && int_cst_range1 |
| && tree_int_cst_sgn (vr0.max) < 0 |
| && tree_int_cst_sgn (vr1.max) < 0) |
| { |
| wmax = wi::min (wmax, wi::to_wide (vr0.max), |
| TYPE_SIGN (expr_type)); |
| wmax = wi::min (wmax, wi::to_wide (vr1.max), |
| TYPE_SIGN (expr_type)); |
| } |
| /* If either input range contains only non-negative values |
| we can truncate the result range maximum to the respective |
| maximum of the input range. */ |
| if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) |
| wmax = wi::min (wmax, wi::to_wide (vr0.max), |
| TYPE_SIGN (expr_type)); |
| if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) |
| wmax = wi::min (wmax, wi::to_wide (vr1.max), |
| TYPE_SIGN (expr_type)); |
| max = wide_int_to_tree (expr_type, wmax); |
| cmp = compare_values (min, max); |
| /* PR68217: In case of signed & sign-bit-CST should |
| result in [-INF, 0] instead of [-INF, INF]. */ |
| if (cmp == -2 || cmp == 1) |
| { |
| wide_int sign_bit |
| = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1, |
| TYPE_PRECISION (expr_type)); |
| if (!TYPE_UNSIGNED (expr_type) |
| && ((int_cst_range0 |
| && value_range_constant_singleton (&vr0) |
| && !wi::cmps (wi::to_wide (vr0.min), sign_bit)) |
| || (int_cst_range1 |
| && value_range_constant_singleton (&vr1) |
| && !wi::cmps (wi::to_wide (vr1.min), sign_bit)))) |
| { |
| min = TYPE_MIN_VALUE (expr_type); |
| max = build_int_cst (expr_type, 0); |
| } |
| } |
| } |
| else if (code == BIT_IOR_EXPR) |
| { |
| max = wide_int_to_tree (expr_type, |
| may_be_nonzero0 | may_be_nonzero1); |
| wide_int wmin = must_be_nonzero0 | must_be_nonzero1; |
| /* If the input ranges contain only positive values we can |
| truncate the minimum of the result range to the maximum |
| of the input range minima. */ |
| if (int_cst_range0 && int_cst_range1 |
| && tree_int_cst_sgn (vr0.min) >= 0 |
| && tree_int_cst_sgn (vr1.min) >= 0) |
| { |
| wmin = wi::max (wmin, wi::to_wide (vr0.min), |
| TYPE_SIGN (expr_type)); |
| wmin = wi::max (wmin, wi::to_wide (vr1.min), |
| TYPE_SIGN (expr_type)); |
| } |
| /* If either input range contains only negative values |
| we can truncate the minimum of the result range to the |
| respective minimum range. */ |
| if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) |
| wmin = wi::max (wmin, wi::to_wide (vr0.min), |
| TYPE_SIGN (expr_type)); |
| if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) |
| wmin = wi::max (wmin, wi::to_wide (vr1.min), |
| TYPE_SIGN (expr_type)); |
| min = wide_int_to_tree (expr_type, wmin); |
| } |
| else if (code == BIT_XOR_EXPR) |
| { |
| wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) |
| | ~(may_be_nonzero0 | may_be_nonzero1)); |
| wide_int result_one_bits |
| = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1) |
| | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0)); |
| max = wide_int_to_tree (expr_type, ~result_zero_bits); |
| min = wide_int_to_tree (expr_type, result_one_bits); |
| /* If the range has all positive or all negative values the |
| result is better than VARYING. */ |
| if (tree_int_cst_sgn (min) < 0 |
| || tree_int_cst_sgn (max) >= 0) |
| ; |
| else |
| max = min = NULL_TREE; |
| } |
| } |
| else |
| gcc_unreachable (); |
| |
| /* If either MIN or MAX overflowed, then set the resulting range to |
| VARYING. */ |
| if (min == NULL_TREE |
| || TREE_OVERFLOW_P (min) |
| || max == NULL_TREE |
| || TREE_OVERFLOW_P (max)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* We punt for [-INF, +INF]. |
| We learn nothing when we have INF on both sides. |
| Note that we do accept [-INF, -INF] and [+INF, +INF]. */ |
| if (vrp_val_is_min (min) && vrp_val_is_max (max)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| cmp = compare_values (min, max); |
| if (cmp == -2 || cmp == 1) |
| { |
| /* If the new range has its limits swapped around (MIN > MAX), |
| then the operation caused one of them to wrap around, mark |
| the new range VARYING. */ |
| set_value_range_to_varying (vr); |
| } |
| else |
| set_value_range (vr, type, min, max, NULL); |
| } |
| |
| /* Extract range information from a unary operation CODE based on |
| the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. |
| The resulting range is stored in *VR. */ |
| |
| void |
| extract_range_from_unary_expr (value_range *vr, |
| enum tree_code code, tree type, |
| value_range *vr0_, tree op0_type) |
| { |
| value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
| |
| /* VRP only operates on integral and pointer types. */ |
| if (!(INTEGRAL_TYPE_P (op0_type) |
| || POINTER_TYPE_P (op0_type)) |
| || !(INTEGRAL_TYPE_P (type) |
| || POINTER_TYPE_P (type))) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* If VR0 is UNDEFINED, so is the result. */ |
| if (vr0.type == VR_UNDEFINED) |
| { |
| set_value_range_to_undefined (vr); |
| return; |
| } |
| |
| /* Handle operations that we express in terms of others. */ |
| if (code == PAREN_EXPR || code == OBJ_TYPE_REF) |
| { |
| /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |
| copy_value_range (vr, &vr0); |
| return; |
| } |
| else if (code == NEGATE_EXPR) |
| { |
| /* -X is simply 0 - X, so re-use existing code that also handles |
| anti-ranges fine. */ |
| value_range zero = VR_INITIALIZER; |
| set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); |
| extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); |
| return; |
| } |
| else if (code == BIT_NOT_EXPR) |
| { |
| /* ~X is simply -1 - X, so re-use existing code that also handles |
| anti-ranges fine. */ |
| value_range minusone = VR_INITIALIZER; |
| set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); |
| extract_range_from_binary_expr_1 (vr, MINUS_EXPR, |
| type, &minusone, &vr0); |
| return; |
| } |
| |
| /* Now canonicalize anti-ranges to ranges when they are not symbolic |
| and express op ~[] as (op []') U (op []''). */ |
| if (vr0.type == VR_ANTI_RANGE |
| && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
| { |
| extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type); |
| if (vrtem1.type != VR_UNDEFINED) |
| { |
| value_range vrres = VR_INITIALIZER; |
| extract_range_from_unary_expr (&vrres, code, type, |
| &vrtem1, op0_type); |
| vrp_meet (vr, &vrres); |
| } |
| return; |
| } |
| |
| if (CONVERT_EXPR_CODE_P (code)) |
| { |
| tree inner_type = op0_type; |
| tree outer_type = type; |
| |
| /* If the expression evaluates to a pointer, we are only interested in |
| determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ |
| if (POINTER_TYPE_P (type)) |
| { |
| if (range_is_nonnull (&vr0)) |
| set_value_range_to_nonnull (vr, type); |
| else if (range_is_null (&vr0)) |
| set_value_range_to_null (vr, type); |
| else |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* If VR0 is varying and we increase the type precision, assume |
| a full range for the following transformation. */ |
| if (vr0.type == VR_VARYING |
| && INTEGRAL_TYPE_P (inner_type) |
| && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) |
| { |
| vr0.type = VR_RANGE; |
| vr0.min = TYPE_MIN_VALUE (inner_type); |
| vr0.max = TYPE_MAX_VALUE (inner_type); |
| } |
| |
| /* If VR0 is a constant range or anti-range and the conversion is |
| not truncating we can convert the min and max values and |
| canonicalize the resulting range. Otherwise we can do the |
| conversion if the size of the range is less than what the |
| precision of the target type can represent and the range is |
| not an anti-range. */ |
| if ((vr0.type == VR_RANGE |
| || vr0.type == VR_ANTI_RANGE) |
| && TREE_CODE (vr0.min) == INTEGER_CST |
| && TREE_CODE (vr0.max) == INTEGER_CST |
| && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) |
| || (vr0.type == VR_RANGE |
| && integer_zerop (int_const_binop (RSHIFT_EXPR, |
| int_const_binop (MINUS_EXPR, vr0.max, vr0.min), |
| size_int (TYPE_PRECISION (outer_type))))))) |
| { |
| tree new_min, new_max; |
| new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), |
| 0, false); |
| new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), |
| 0, false); |
| set_and_canonicalize_value_range (vr, vr0.type, |
| new_min, new_max, NULL); |
| return; |
| } |
| |
| set_value_range_to_varying (vr); |
| return; |
| } |
| else if (code == ABS_EXPR) |
| { |
| tree min, max; |
| int cmp; |
| |
| /* Pass through vr0 in the easy cases. */ |
| if (TYPE_UNSIGNED (type) |
| || value_range_nonnegative_p (&vr0)) |
| { |
| copy_value_range (vr, &vr0); |
| return; |
| } |
| |
| /* For the remaining varying or symbolic ranges we can't do anything |
| useful. */ |
| if (vr0.type == VR_VARYING |
| || symbolic_range_p (&vr0)) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |
| useful range. */ |
| if (!TYPE_OVERFLOW_UNDEFINED (type) |
| && ((vr0.type == VR_RANGE |
| && vrp_val_is_min (vr0.min)) |
| || (vr0.type == VR_ANTI_RANGE |
| && !vrp_val_is_min (vr0.min)))) |
| { |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* ABS_EXPR may flip the range around, if the original range |
| included negative values. */ |
| if (!vrp_val_is_min (vr0.min)) |
| min = fold_unary_to_constant (code, type, vr0.min); |
| else |
| min = TYPE_MAX_VALUE (type); |
| |
| if (!vrp_val_is_min (vr0.max)) |
| max = fold_unary_to_constant (code, type, vr0.max); |
| else |
| max = TYPE_MAX_VALUE (type); |
| |
| cmp = compare_values (min, max); |
| |
| /* If a VR_ANTI_RANGEs contains zero, then we have |
| ~[-INF, min(MIN, MAX)]. */ |
| if (vr0.type == VR_ANTI_RANGE) |
| { |
| if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
| { |
| /* Take the lower of the two values. */ |
| if (cmp != 1) |
| max = min; |
| |
| /* Create ~[-INF, min (abs(MIN), abs(MAX))] |
| or ~[-INF + 1, min (abs(MIN), abs(MAX))] when |
| flag_wrapv is set and the original anti-range doesn't include |
| TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ |
| if (TYPE_OVERFLOW_WRAPS (type)) |
| { |
| tree type_min_value = TYPE_MIN_VALUE (type); |
| |
| min = (vr0.min != type_min_value |
| ? int_const_binop (PLUS_EXPR, type_min_value, |
| build_int_cst (TREE_TYPE (type_min_value), 1)) |
| : type_min_value); |
| } |
| else |
| min = TYPE_MIN_VALUE (type); |
| } |
| else |
| { |
| /* All else has failed, so create the range [0, INF], even for |
| flag_wrapv since TYPE_MIN_VALUE is in the original |
| anti-range. */ |
| vr0.type = VR_RANGE; |
| min = build_int_cst (type, 0); |
| max = TYPE_MAX_VALUE (type); |
| } |
| } |
| |
| /* If the range contains zero then we know that the minimum value in the |
| range will be zero. */ |
| else if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
| { |
| if (cmp == 1) |
| max = min; |
| min = build_int_cst (type, 0); |
| } |
| else |
| { |
| /* If the range was reversed, swap MIN and MAX. */ |
| if (cmp == 1) |
| std::swap (min, max); |
| } |
| |
| cmp = compare_values (min, max); |
| if (cmp == -2 || cmp == 1) |
| { |
| /* If the new range has its limits swapped around (MIN > MAX), |
| then the operation caused one of them to wrap around, mark |
| the new range VARYING. */ |
| set_value_range_to_varying (vr); |
| } |
| else |
| set_value_range (vr, vr0.type, min, max, NULL); |
| return; |
| } |
| |
| /* For unhandled operations fall back to varying. */ |
| set_value_range_to_varying (vr); |
| return; |
| } |
| |
| /* Debugging dumps. */ |
| |
| void dump_value_range (FILE *, const value_range *); |
| void debug_value_range (value_range *); |
| void dump_all_value_ranges (FILE *); |
| void dump_vr_equiv (FILE *, bitmap); |
| void debug_vr_equiv (bitmap); |
| |
| |
| /* Dump value range VR to FILE. */ |
| |
| void |
| dump_value_range (FILE *file, const value_range *vr) |
| { |
| if (vr == NULL) |
| fprintf (file, "[]"); |
| else if (vr->type == VR_UNDEFINED) |
| fprintf (file, "UNDEFINED"); |
| else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) |
| { |
| tree type = TREE_TYPE (vr->min); |
| |
| fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); |
| |
| if (INTEGRAL_TYPE_P (type) |
| && !TYPE_UNSIGNED (type) |
| && vrp_val_is_min (vr->min)) |
| fprintf (file, "-INF"); |
| else |
| print_generic_expr (file, vr->min); |
| |
| fprintf (file, ", "); |
| |
| if (INTEGRAL_TYPE_P (type) |
| && vrp_val_is_max (vr->max)) |
| fprintf (file, "+INF"); |
| else |
| print_generic_expr (file, vr->max); |
| |
| fprintf (file, "]"); |
| |
| if (vr->equiv) |
| { |
| bitmap_iterator bi; |
| unsigned i, c = 0; |
| |
| fprintf (file, " EQUIVALENCES: { "); |
| |
| EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) |
| { |
| print_generic_expr (file, ssa_name (i)); |
| fprintf (file, " "); |
| c++; |
| } |
| |
| fprintf (file, "} (%u elements)", c); |
| } |
| } |
| else if (vr->type == VR_VARYING) |
| fprintf (file, "VARYING"); |
| else |
| fprintf (file, "INVALID RANGE"); |
| } |
| |
| |
| /* Dump value range VR to stderr. */ |
| |
| DEBUG_FUNCTION void |
| debug_value_range (value_range *vr) |
| { |
| dump_value_range (stderr, vr); |
| fprintf (stderr, "\n"); |
| } |
| |
| |
| /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
| create a new SSA name N and return the assertion assignment |
| 'N = ASSERT_EXPR <V, V OP W>'. */ |
| |
| static gimple * |
| build_assert_expr_for (tree cond, tree v) |
| { |
| tree a; |
| gassign *assertion; |
| |
| gcc_assert (TREE_CODE (v) == SSA_NAME |
| && COMPARISON_CLASS_P (cond)); |
| |
| a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
| assertion = gimple_build_assign (NULL_TREE, a); |
| |
| /* The new ASSERT_EXPR, creates a new SSA name that replaces the |
| operand of the ASSERT_EXPR. Create it so the new name and the old one |
| are registered in the replacement table so that we can fix the SSA web |
| after adding all the ASSERT_EXPRs. */ |
| tree new_def = create_new_def_for (v, assertion, NULL); |
| /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain |
| given we have to be able to fully propagate those out to re-create |
| valid SSA when removing the asserts. */ |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v)) |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1; |
| |
| return assertion; |
| } |
| |
| |
| /* Return false if EXPR is a predicate expression involving floating |
| point values. */ |
| |
| static inline bool |
| fp_predicate (gimple *stmt) |
| { |
| GIMPLE_CHECK (stmt, GIMPLE_COND); |
| |
| return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); |
| } |
| |
| /* If the range of values taken by OP can be inferred after STMT executes, |
| return the comparison code (COMP_CODE_P) and value (VAL_P) that |
| describes the inferred range. Return true if a range could be |
| inferred. */ |
| |
| bool |
| infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) |
| { |
| *val_p = NULL_TREE; |
| *comp_code_p = ERROR_MARK; |
| |
| /* Do not attempt to infer anything in names that flow through |
| abnormal edges. */ |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) |
| return false; |
| |
| /* If STMT is the last statement of a basic block with no normal |
| successors, there is no point inferring anything about any of its |
| operands. We would not be able to find a proper insertion point |
| for the assertion, anyway. */ |
| if (stmt_ends_bb_p (stmt)) |
| { |
| edge_iterator ei; |
| edge e; |
| |
| FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
| if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH))) |
| break; |
| if (e == NULL) |
| return false; |
| } |
| |
| if (infer_nonnull_range (stmt, op)) |
| { |
| *val_p = build_int_cst (TREE_TYPE (op), 0); |
| *comp_code_p = NE_EXPR; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| void dump_asserts_for (FILE *, tree); |
| void debug_asserts_for (tree); |
| void dump_all_asserts (FILE *); |
| void debug_all_asserts (void); |
| |
| /* Dump all the registered assertions for NAME to FILE. */ |
| |
| void |
| dump_asserts_for (FILE *file, tree name) |
| { |
| assert_locus *loc; |
| |
| fprintf (file, "Assertions to be inserted for "); |
| print_generic_expr (file, name); |
| fprintf (file, "\n"); |
| |
| loc = asserts_for[SSA_NAME_VERSION (name)]; |
| while (loc) |
| { |
| fprintf (file, "\t"); |
| print_gimple_stmt (file, gsi_stmt (loc->si), 0); |
| fprintf (file, "\n\tBB #%d", loc->bb->index); |
| if (loc->e) |
| { |
| fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, |
| loc->e->dest->index); |
| dump_edge_info (file, loc->e, dump_flags, 0); |
| } |
| fprintf (file, "\n\tPREDICATE: "); |
| print_generic_expr (file, loc->expr); |
| fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); |
| print_generic_expr (file, loc->val); |
| fprintf (file, "\n\n"); |
| loc = loc->next; |
| } |
| |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump all the registered assertions for NAME to stderr. */ |
| |
| DEBUG_FUNCTION void |
| debug_asserts_for (tree name) |
| { |
| dump_asserts_for (stderr, name); |
| } |
| |
| |
| /* Dump all the registered assertions for all the names to FILE. */ |
| |
| void |
| dump_all_asserts (FILE *file) |
| { |
| unsigned i; |
| bitmap_iterator bi; |
| |
| fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); |
| EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
| dump_asserts_for (file, ssa_name (i)); |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump all the registered assertions for all the names to stderr. */ |
| |
| DEBUG_FUNCTION void |
| debug_all_asserts (void) |
| { |
| dump_all_asserts (stderr); |
| } |
| |
| /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */ |
| |
| static void |
| add_assert_info (vec<assert_info> &asserts, |
| tree name, tree expr, enum tree_code comp_code, tree val) |
| { |
| assert_info info; |
| info.comp_code = comp_code; |
| info.name = name; |
| info.val = val; |
| info.expr = expr; |
| asserts.safe_push (info); |
| } |
| |
| /* If NAME doesn't have an ASSERT_EXPR registered for asserting |
| 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
| E->DEST, then register this location as a possible insertion point |
| for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
| |
| BB, E and SI provide the exact insertion point for the new |
| ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted |
| on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on |
| BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E |
| must not be NULL. */ |
| |
| static void |
| register_new_assert_for (tree name, tree expr, |
| enum tree_code comp_code, |
| tree val, |
| basic_block bb, |
| edge e, |
| gimple_stmt_iterator si) |
| { |
| assert_locus *n, *loc, *last_loc; |
| basic_block dest_bb; |
| |
| gcc_checking_assert (bb == NULL || e == NULL); |
| |
| if (e == NULL) |
| gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
| && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); |
| |
| /* Never build an assert comparing against an integer constant with |
| TREE_OVERFLOW set. This confuses our undefined overflow warning |
| machinery. */ |
| if (TREE_OVERFLOW_P (val)) |
| val = drop_tree_overflow (val); |
| |
| /* The new assertion A will be inserted at BB or E. We need to |
| determine if the new location is dominated by a previously |
| registered location for A. If we are doing an edge insertion, |
| assume that A will be inserted at E->DEST. Note that this is not |
| necessarily true. |
| |
| If E is a critical edge, it will be split. But even if E is |
| split, the new block will dominate the same set of blocks that |
| E->DEST dominates. |
| |
| The reverse, however, is not true, blocks dominated by E->DEST |
| will not be dominated by the new block created to split E. So, |
| if the insertion location is on a critical edge, we will not use |
| the new location to move another assertion previously registered |
| at a block dominated by E->DEST. */ |
| dest_bb = (bb) ? bb : e->dest; |
| |
| /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and |
| VAL at a block dominating DEST_BB, then we don't need to insert a new |
| one. Similarly, if the same assertion already exists at a block |
| dominated by DEST_BB and the new location is not on a critical |
| edge, then update the existing location for the assertion (i.e., |
| move the assertion up in the dominance tree). |
| |
| Note, this is implemented as a simple linked list because there |
| should not be more than a handful of assertions registered per |
| name. If this becomes a performance problem, a table hashed by |
| COMP_CODE and VAL could be implemented. */ |
| loc = asserts_for[SSA_NAME_VERSION (name)]; |
| last_loc = loc; |
| while (loc) |
| { |
| if (loc->comp_code == comp_code |
| && (loc->val == val |
| || operand_equal_p (loc->val, val, 0)) |
| && (loc->expr == expr |
| || operand_equal_p (loc->expr, expr, 0))) |
| { |
| /* If E is not a critical edge and DEST_BB |
| dominates the existing location for the assertion, move |
| the assertion up in the dominance tree by updating its |
| location information. */ |
| if ((e == NULL || !EDGE_CRITICAL_P (e)) |
| && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) |
| { |
| loc->bb = dest_bb; |
| loc->e = e; |
| loc->si = si; |
| return; |
| } |
| } |
| |
| /* Update the last node of the list and move to the next one. */ |
| last_loc = loc; |
| loc = loc->next; |
| } |
| |
| /* If we didn't find an assertion already registered for |
| NAME COMP_CODE VAL, add a new one at the end of the list of |
| assertions associated with NAME. */ |
| n = XNEW (struct assert_locus); |
| n->bb = dest_bb; |
| n->e = e; |
| n->si = si; |
| n->comp_code = comp_code; |
| n->val = val; |
| n->expr = expr; |
| n->next = NULL; |
| |
| if (last_loc) |
| last_loc->next = n; |
| else |
| asserts_for[SSA_NAME_VERSION (name)] = n; |
| |
| bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); |
| } |
| |
| /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
| Extract a suitable test code and value and store them into *CODE_P and |
| *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. |
| |
| If no extraction was possible, return FALSE, otherwise return TRUE. |
| |
| If INVERT is true, then we invert the result stored into *CODE_P. */ |
| |
| static bool |
| extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, |
| tree cond_op0, tree cond_op1, |
| bool invert, enum tree_code *code_p, |
| tree *val_p) |
| { |
| enum tree_code comp_code; |
| tree val; |
| |
| /* Otherwise, we have a comparison of the form NAME COMP VAL |
| or VAL COMP NAME. */ |
| if (name == cond_op1) |
| { |
| /* If the predicate is of the form VAL COMP NAME, flip |
| COMP around because we need to register NAME as the |
| first operand in the predicate. */ |
| comp_code = swap_tree_comparison (cond_code); |
| val = cond_op0; |
| } |
| else if (name == cond_op0) |
| { |
| /* The comparison is of the form NAME COMP VAL, so the |
| comparison code remains unchanged. */ |
| comp_code = cond_code; |
| val = cond_op1; |
| } |
| else |
| gcc_unreachable (); |
| |
| /* Invert the comparison code as necessary. */ |
| if (invert) |
| comp_code = invert_tree_comparison (comp_code, 0); |
| |
| /* VRP only handles integral and pointer types. */ |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (val)) |
| && ! POINTER_TYPE_P (TREE_TYPE (val))) |
| return false; |
| |
| /* Do not register always-false predicates. |
| FIXME: this works around a limitation in fold() when dealing with |
| enumerations. Given 'enum { N1, N2 } x;', fold will not |
| fold 'if (x > N2)' to 'if (0)'. */ |
| if ((comp_code == GT_EXPR || comp_code == LT_EXPR) |
| && INTEGRAL_TYPE_P (TREE_TYPE (val))) |
| { |
| tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); |
| tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); |
| |
| if (comp_code == GT_EXPR |
| && (!max |
| || compare_values (val, max) == 0)) |
| return false; |
| |
| if (comp_code == LT_EXPR |
| && (!min |
| || compare_values (val, min) == 0)) |
| return false; |
| } |
| *code_p = comp_code; |
| *val_p = val; |
| return true; |
| } |
| |
| /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |
| (otherwise return VAL). VAL and MASK must be zero-extended for |
| precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT |
| (to transform signed values into unsigned) and at the end xor |
| SGNBIT back. */ |
| |
| static wide_int |
| masked_increment (const wide_int &val_in, const wide_int &mask, |
| const wide_int &sgnbit, unsigned int prec) |
| { |
| wide_int bit = wi::one (prec), res; |
| unsigned int i; |
| |
| wide_int val = val_in ^ sgnbit; |
| for (i = 0; i < prec; i++, bit += bit) |
| { |
| res = mask; |
| if ((res & bit) == 0) |
| continue; |
| res = bit - 1; |
| res = wi::bit_and_not (val + bit, res); |
| res &= mask; |
| if (wi::gtu_p (res, val)) |
| return res ^ sgnbit; |
| } |
| return val ^ sgnbit; |
| } |
| |
| /* Helper for overflow_comparison_p |
| |
| OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
| OP1's defining statement to see if it ultimately has the form |
| OP0 CODE (OP0 PLUS INTEGER_CST) |
| |
| If so, return TRUE indicating this is an overflow test and store into |
| *NEW_CST an updated constant that can be used in a narrowed range test. |
| |
| REVERSED indicates if the comparison was originally: |
| |
| OP1 CODE' OP0. |
| |
| This affects how we build the updated constant. */ |
| |
| static bool |
| overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1, |
| bool follow_assert_exprs, bool reversed, tree *new_cst) |
| { |
| /* See if this is a relational operation between two SSA_NAMES with |
| unsigned, overflow wrapping values. If so, check it more deeply. */ |
| if ((code == LT_EXPR || code == LE_EXPR |
| || code == GE_EXPR || code == GT_EXPR) |
| && TREE_CODE (op0) == SSA_NAME |
| && TREE_CODE (op1) == SSA_NAME |
| && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
| && TYPE_UNSIGNED (TREE_TYPE (op0)) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))) |
| { |
| gimple *op1_def = SSA_NAME_DEF_STMT (op1); |
| |
| /* If requested, follow any ASSERT_EXPRs backwards for OP1. */ |
| if (follow_assert_exprs) |
| { |
| while (gimple_assign_single_p (op1_def) |
| && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR) |
| { |
| op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0); |
| if (TREE_CODE (op1) != SSA_NAME) |
| break; |
| op1_def = SSA_NAME_DEF_STMT (op1); |
| } |
| } |
| |
| /* Now look at the defining statement of OP1 to see if it adds |
| or subtracts a nonzero constant from another operand. */ |
| if (op1_def |
| && is_gimple_assign (op1_def) |
| && gimple_assign_rhs_code (op1_def) == PLUS_EXPR |
| && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST |
| && !integer_zerop (gimple_assign_rhs2 (op1_def))) |
| { |
| tree target = gimple_assign_rhs1 (op1_def); |
| |
| /* If requested, follow ASSERT_EXPRs backwards for op0 looking |
| for one where TARGET appears on the RHS. */ |
| if (follow_assert_exprs) |
| { |
| /* Now see if that "other operand" is op0, following the chain |
| of ASSERT_EXPRs if necessary. */ |
| gimple *op0_def = SSA_NAME_DEF_STMT (op0); |
| while (op0 != target |
| && gimple_assign_single_p (op0_def) |
| && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR) |
| { |
| op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0); |
| if (TREE_CODE (op0) != SSA_NAME) |
| break; |
| op0_def = SSA_NAME_DEF_STMT (op0); |
| } |
| } |
| |
| /* If we did not find our target SSA_NAME, then this is not |
| an overflow test. */ |
| if (op0 != target) |
| return false; |
| |
| tree type = TREE_TYPE (op0); |
| wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED); |
| tree inc = gimple_assign_rhs2 (op1_def); |
| if (reversed) |
| *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc)); |
| else |
| *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc)); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
| OP1's defining statement to see if it ultimately has the form |
| OP0 CODE (OP0 PLUS INTEGER_CST) |
| |
| If so, return TRUE indicating this is an overflow test and store into |
| *NEW_CST an updated constant that can be used in a narrowed range test. |
| |
| These statements are left as-is in the IL to facilitate discovery of |
| {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But |
| the alternate range representation is often useful within VRP. */ |
| |
| bool |
| overflow_comparison_p (tree_code code, tree name, tree val, |
| bool use_equiv_p, tree *new_cst) |
| { |
| if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst)) |
| return true; |
| return overflow_comparison_p_1 (swap_tree_comparison (code), val, name, |
| use_equiv_p, true, new_cst); |
| } |
| |
| |
| /* Try to register an edge assertion for SSA name NAME on edge E for |
| the condition COND contributing to the conditional jump pointed to by BSI. |
| Invert the condition COND if INVERT is true. */ |
| |
| static void |
| register_edge_assert_for_2 (tree name, edge e, |
| enum tree_code cond_code, |
| tree cond_op0, tree cond_op1, bool invert, |
| vec<assert_info> &asserts) |
| { |
| tree val; |
| enum tree_code comp_code; |
| |
| if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
| cond_op0, |
| cond_op1, |
| invert, &comp_code, &val)) |
| return; |
| |
| /* Queue the assert. */ |
| tree x; |
| if (overflow_comparison_p (comp_code, name, val, false, &x)) |
| { |
| enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR) |
| ? GT_EXPR : LE_EXPR); |
| add_assert_info (asserts, name, name, new_code, x); |
| } |
| add_assert_info (asserts, name, name, comp_code, val); |
| |
| /* In the case of NAME <= CST and NAME being defined as |
| NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 |
| and NAME2 <= CST - CST2. We can do the same for NAME > CST. |
| This catches range and anti-range tests. */ |
| if ((comp_code == LE_EXPR |
| || comp_code == GT_EXPR) |
| && TREE_CODE (val) == INTEGER_CST |
| && TYPE_UNSIGNED (TREE_TYPE (val))) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
| tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
| |
| /* Extract CST2 from the (optional) addition. */ |
| if (is_gimple_assign (def_stmt) |
| && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) |
| { |
| name2 = gimple_assign_rhs1 (def_stmt); |
| cst2 = gimple_assign_rhs2 (def_stmt); |
| if (TREE_CODE (name2) == SSA_NAME |
| && TREE_CODE (cst2) == INTEGER_CST) |
| def_stmt = SSA_NAME_DEF_STMT (name2); |
| } |
| |
| /* Extract NAME2 from the (optional) sign-changing cast. */ |
| if (gimple_assign_cast_p (def_stmt)) |
| { |
| |