| /* Code for range operators. |
| Copyright (C) 2017-2023 Free Software Foundation, Inc. |
| Contributed by Andrew MacLeod <amacleod@redhat.com> |
| and Aldy Hernandez <aldyh@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "insn-codes.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "gimple-pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "flags.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "calls.h" |
| #include "cfganal.h" |
| #include "gimple-iterator.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "wide-int.h" |
| #include "value-relation.h" |
| #include "range-op.h" |
| #include "tree-ssa-ccp.h" |
| #include "range-op-mixed.h" |
| |
| // Instantiate the operators which apply to multiple types here. |
| |
| operator_equal op_equal; |
| operator_not_equal op_not_equal; |
| operator_lt op_lt; |
| operator_le op_le; |
| operator_gt op_gt; |
| operator_ge op_ge; |
| operator_identity op_ident; |
| operator_cst op_cst; |
| operator_cast op_cast; |
| operator_plus op_plus; |
| operator_abs op_abs; |
| operator_minus op_minus; |
| operator_negate op_negate; |
| operator_mult op_mult; |
| operator_addr_expr op_addr; |
| operator_bitwise_not op_bitwise_not; |
| operator_bitwise_xor op_bitwise_xor; |
| operator_bitwise_and op_bitwise_and; |
| operator_bitwise_or op_bitwise_or; |
| operator_min op_min; |
| operator_max op_max; |
| |
| // Instantaite a range operator table. |
| range_op_table operator_table; |
| |
| // Invoke the initialization routines for each class of range. |
| |
| range_op_table::range_op_table () |
| { |
| initialize_integral_ops (); |
| initialize_pointer_ops (); |
| initialize_float_ops (); |
| |
| set (EQ_EXPR, op_equal); |
| set (NE_EXPR, op_not_equal); |
| set (LT_EXPR, op_lt); |
| set (LE_EXPR, op_le); |
| set (GT_EXPR, op_gt); |
| set (GE_EXPR, op_ge); |
| set (SSA_NAME, op_ident); |
| set (PAREN_EXPR, op_ident); |
| set (OBJ_TYPE_REF, op_ident); |
| set (REAL_CST, op_cst); |
| set (INTEGER_CST, op_cst); |
| set (NOP_EXPR, op_cast); |
| set (CONVERT_EXPR, op_cast); |
| set (PLUS_EXPR, op_plus); |
| set (ABS_EXPR, op_abs); |
| set (MINUS_EXPR, op_minus); |
| set (NEGATE_EXPR, op_negate); |
| set (MULT_EXPR, op_mult); |
| |
| // Occur in both integer and pointer tables, but currently share |
| // integral implementation. |
| set (ADDR_EXPR, op_addr); |
| set (BIT_NOT_EXPR, op_bitwise_not); |
| set (BIT_XOR_EXPR, op_bitwise_xor); |
| |
| // These are in both integer and pointer tables, but pointer has a different |
| // implementation. |
| // If commented out, there is a hybrid version in range-op-ptr.cc which |
| // is used until there is a pointer range class. Then we can simply |
| // uncomment the operator here and use the unified version. |
| |
| // set (BIT_AND_EXPR, op_bitwise_and); |
| // set (BIT_IOR_EXPR, op_bitwise_or); |
| // set (MIN_EXPR, op_min); |
| // set (MAX_EXPR, op_max); |
| } |
| |
| // Instantiate a default range operator for opcodes with no entry. |
| |
| range_operator default_operator; |
| |
| // Create a default range_op_handler. |
| |
| range_op_handler::range_op_handler () |
| { |
| m_operator = &default_operator; |
| } |
| |
| // Create a range_op_handler for CODE. Use a default operatoer if CODE |
| // does not have an entry. |
| |
| range_op_handler::range_op_handler (unsigned code) |
| { |
| m_operator = operator_table[code]; |
| if (!m_operator) |
| m_operator = &default_operator; |
| } |
| |
| // Return TRUE if this handler has a non-default operator. |
| |
| range_op_handler::operator bool () const |
| { |
| return m_operator != &default_operator; |
| } |
| |
| // Return a pointer to the range operator assocaited with this handler. |
| // If it is a default operator, return NULL. |
| // This is the equivalent of indexing the range table. |
| |
| range_operator * |
| range_op_handler::range_op () const |
| { |
| if (m_operator != &default_operator) |
| return m_operator; |
| return NULL; |
| } |
| |
| // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2. |
| // This is used to produce a unique value for each dispatch pattern. Shift |
| // values are based on the size of the m_discriminator field in value_range.h. |
| |
| constexpr unsigned |
| dispatch_trio (unsigned lhs, unsigned op1, unsigned op2) |
| { |
| return ((lhs << 8) + (op1 << 4) + (op2)); |
| } |
| |
| // These are the supported dispatch patterns. These map to the parameter list |
| // of the routines in range_operator. Note the last 3 characters are |
| // shorthand for the LHS, OP1, and OP2 range discriminator class. |
| |
| const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE); |
| const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE); |
| const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE); |
| const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE); |
| const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE); |
| const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE); |
| |
| // Return a dispatch value for parameter types LHS, OP1 and OP2. |
| |
| unsigned |
| range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1, |
| const vrange& op2) const |
| { |
| return dispatch_trio (lhs.m_discriminator, op1.m_discriminator, |
| op2.m_discriminator); |
| } |
| |
| // Dispatch a call to fold_range based on the types of R, LH and RH. |
| |
| bool |
| range_op_handler::fold_range (vrange &r, tree type, |
| const vrange &lh, |
| const vrange &rh, |
| relation_trio rel) const |
| { |
| gcc_checking_assert (m_operator); |
| #if CHECKING_P |
| if (!lh.undefined_p () && !rh.undefined_p ()) |
| gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ())); |
| #endif |
| switch (dispatch_kind (r, lh, rh)) |
| { |
| case RO_III: |
| return m_operator->fold_range (as_a <irange> (r), type, |
| as_a <irange> (lh), |
| as_a <irange> (rh), rel); |
| case RO_IFI: |
| return m_operator->fold_range (as_a <irange> (r), type, |
| as_a <frange> (lh), |
| as_a <irange> (rh), rel); |
| case RO_IFF: |
| return m_operator->fold_range (as_a <irange> (r), type, |
| as_a <frange> (lh), |
| as_a <frange> (rh), rel); |
| case RO_FFF: |
| return m_operator->fold_range (as_a <frange> (r), type, |
| as_a <frange> (lh), |
| as_a <frange> (rh), rel); |
| case RO_FII: |
| return m_operator->fold_range (as_a <frange> (r), type, |
| as_a <irange> (lh), |
| as_a <irange> (rh), rel); |
| default: |
| return false; |
| } |
| } |
| |
| // Dispatch a call to op1_range based on the types of R, LHS and OP2. |
| |
| bool |
| range_op_handler::op1_range (vrange &r, tree type, |
| const vrange &lhs, |
| const vrange &op2, |
| relation_trio rel) const |
| { |
| gcc_checking_assert (m_operator); |
| if (lhs.undefined_p ()) |
| return false; |
| #if CHECKING_P |
| if (!op2.undefined_p ()) |
| gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ())); |
| #endif |
| switch (dispatch_kind (r, lhs, op2)) |
| { |
| case RO_III: |
| return m_operator->op1_range (as_a <irange> (r), type, |
| as_a <irange> (lhs), |
| as_a <irange> (op2), rel); |
| case RO_FIF: |
| return m_operator->op1_range (as_a <frange> (r), type, |
| as_a <irange> (lhs), |
| as_a <frange> (op2), rel); |
| case RO_FFF: |
| return m_operator->op1_range (as_a <frange> (r), type, |
| as_a <frange> (lhs), |
| as_a <frange> (op2), rel); |
| default: |
| return false; |
| } |
| } |
| |
| // Dispatch a call to op2_range based on the types of R, LHS and OP1. |
| |
| bool |
| range_op_handler::op2_range (vrange &r, tree type, |
| const vrange &lhs, |
| const vrange &op1, |
| relation_trio rel) const |
| { |
| gcc_checking_assert (m_operator); |
| if (lhs.undefined_p ()) |
| return false; |
| #if CHECKING_P |
| if (!op1.undefined_p ()) |
| gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type)); |
| #endif |
| switch (dispatch_kind (r, lhs, op1)) |
| { |
| case RO_III: |
| return m_operator->op2_range (as_a <irange> (r), type, |
| as_a <irange> (lhs), |
| as_a <irange> (op1), rel); |
| case RO_FIF: |
| return m_operator->op2_range (as_a <frange> (r), type, |
| as_a <irange> (lhs), |
| as_a <frange> (op1), rel); |
| case RO_FFF: |
| return m_operator->op2_range (as_a <frange> (r), type, |
| as_a <frange> (lhs), |
| as_a <frange> (op1), rel); |
| default: |
| return false; |
| } |
| } |
| |
| // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2. |
| |
| relation_kind |
| range_op_handler::lhs_op1_relation (const vrange &lhs, |
| const vrange &op1, |
| const vrange &op2, |
| relation_kind rel) const |
| { |
| gcc_checking_assert (m_operator); |
| |
| switch (dispatch_kind (lhs, op1, op2)) |
| { |
| case RO_III: |
| return m_operator->lhs_op1_relation (as_a <irange> (lhs), |
| as_a <irange> (op1), |
| as_a <irange> (op2), rel); |
| case RO_IFF: |
| return m_operator->lhs_op1_relation (as_a <irange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2), rel); |
| case RO_FFF: |
| return m_operator->lhs_op1_relation (as_a <frange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2), rel); |
| default: |
| return VREL_VARYING; |
| } |
| } |
| |
| // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2. |
| |
| relation_kind |
| range_op_handler::lhs_op2_relation (const vrange &lhs, |
| const vrange &op1, |
| const vrange &op2, |
| relation_kind rel) const |
| { |
| gcc_checking_assert (m_operator); |
| switch (dispatch_kind (lhs, op1, op2)) |
| { |
| case RO_III: |
| return m_operator->lhs_op2_relation (as_a <irange> (lhs), |
| as_a <irange> (op1), |
| as_a <irange> (op2), rel); |
| case RO_IFF: |
| return m_operator->lhs_op2_relation (as_a <irange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2), rel); |
| case RO_FFF: |
| return m_operator->lhs_op2_relation (as_a <frange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2), rel); |
| default: |
| return VREL_VARYING; |
| } |
| } |
| |
| // Dispatch a call to op1_op2_relation based on the type of LHS. |
| |
| relation_kind |
| range_op_handler::op1_op2_relation (const vrange &lhs, |
| const vrange &op1, |
| const vrange &op2) const |
| { |
| gcc_checking_assert (m_operator); |
| switch (dispatch_kind (lhs, op1, op2)) |
| { |
| case RO_III: |
| return m_operator->op1_op2_relation (as_a <irange> (lhs), |
| as_a <irange> (op1), |
| as_a <irange> (op2)); |
| |
| case RO_IFF: |
| return m_operator->op1_op2_relation (as_a <irange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2)); |
| |
| case RO_FFF: |
| return m_operator->op1_op2_relation (as_a <frange> (lhs), |
| as_a <frange> (op1), |
| as_a <frange> (op2)); |
| |
| default: |
| return VREL_VARYING; |
| } |
| } |
| |
| bool |
| range_op_handler::overflow_free_p (const vrange &lh, |
| const vrange &rh, |
| relation_trio rel) const |
| { |
| gcc_checking_assert (m_operator); |
| switch (dispatch_kind (lh, lh, rh)) |
| { |
| case RO_III: |
| return m_operator->overflow_free_p(as_a <irange> (lh), |
| as_a <irange> (rh), |
| rel); |
| default: |
| return false; |
| } |
| } |
| |
| bool |
| range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const |
| { |
| gcc_checking_assert (m_operator); |
| return m_operator->operand_check_p (t1, t2, t3); |
| } |
| |
| // Update the known bitmasks in R when applying the operation CODE to |
| // LH and RH. |
| |
| void |
| update_known_bitmask (irange &r, tree_code code, |
| const irange &lh, const irange &rh) |
| { |
| if (r.undefined_p () || lh.undefined_p () || rh.undefined_p () |
| || r.singleton_p ()) |
| return; |
| |
| widest_int widest_value, widest_mask; |
| tree type = r.type (); |
| signop sign = TYPE_SIGN (type); |
| int prec = TYPE_PRECISION (type); |
| irange_bitmask lh_bits = lh.get_bitmask (); |
| irange_bitmask rh_bits = rh.get_bitmask (); |
| |
| switch (get_gimple_rhs_class (code)) |
| { |
| case GIMPLE_UNARY_RHS: |
| bit_value_unop (code, sign, prec, &widest_value, &widest_mask, |
| TYPE_SIGN (lh.type ()), |
| TYPE_PRECISION (lh.type ()), |
| widest_int::from (lh_bits.value (), sign), |
| widest_int::from (lh_bits.mask (), sign)); |
| break; |
| case GIMPLE_BINARY_RHS: |
| bit_value_binop (code, sign, prec, &widest_value, &widest_mask, |
| TYPE_SIGN (lh.type ()), |
| TYPE_PRECISION (lh.type ()), |
| widest_int::from (lh_bits.value (), sign), |
| widest_int::from (lh_bits.mask (), sign), |
| TYPE_SIGN (rh.type ()), |
| TYPE_PRECISION (rh.type ()), |
| widest_int::from (rh_bits.value (), sign), |
| widest_int::from (rh_bits.mask (), sign)); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| wide_int mask = wide_int::from (widest_mask, prec, sign); |
| wide_int value = wide_int::from (widest_value, prec, sign); |
| // Bitmasks must have the unknown value bits cleared. |
| value &= ~mask; |
| irange_bitmask bm (value, mask); |
| r.update_bitmask (bm); |
| } |
| |
| // Return the upper limit for a type. |
| |
| static inline wide_int |
| max_limit (const_tree type) |
| { |
| return irange_val_max (type); |
| } |
| |
| // Return the lower limit for a type. |
| |
| static inline wide_int |
| min_limit (const_tree type) |
| { |
| return irange_val_min (type); |
| } |
| |
| // Return false if shifting by OP is undefined behavior. Otherwise, return |
| // true and the range it is to be shifted by. This allows trimming out of |
| // undefined ranges, leaving only valid ranges if there are any. |
| |
| static inline bool |
| get_shift_range (irange &r, tree type, const irange &op) |
| { |
| if (op.undefined_p ()) |
| return false; |
| |
| // Build valid range and intersect it with the shift range. |
| r = value_range (op.type (), |
| wi::shwi (0, TYPE_PRECISION (op.type ())), |
| wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ()))); |
| r.intersect (op); |
| |
| // If there are no valid ranges in the shift range, returned false. |
| if (r.undefined_p ()) |
| return false; |
| return true; |
| } |
| |
| // Default wide_int fold operation returns [MIN, MAX]. |
| |
| void |
| range_operator::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb ATTRIBUTE_UNUSED, |
| const wide_int &lh_ub ATTRIBUTE_UNUSED, |
| const wide_int &rh_lb ATTRIBUTE_UNUSED, |
| const wide_int &rh_ub ATTRIBUTE_UNUSED) const |
| { |
| gcc_checking_assert (r.supports_type_p (type)); |
| r.set_varying (type); |
| } |
| |
| // Call wi_fold when both op1 and op2 are equivalent. Further split small |
| // subranges into constants. This can provide better precision. |
| // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce |
| // [0,0][2, 2][4,4][6, 6][8, 8] |
| // LIMIT is the maximum number of elements in range allowed before we |
| // do not process them individually. |
| |
| void |
| range_operator::wi_fold_in_parts_equiv (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| unsigned limit) const |
| { |
| int_range_max tmp; |
| widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)), |
| widest_int::from (lh_lb, TYPE_SIGN (type))); |
| // if there are 1 to 8 values in the LH range, split them up. |
| r.set_undefined (); |
| if (lh_range >= 0 && lh_range < limit) |
| { |
| for (unsigned x = 0; x <= lh_range; x++) |
| { |
| wide_int val = lh_lb + x; |
| wi_fold (tmp, type, val, val, val, val); |
| r.union_ (tmp); |
| } |
| } |
| // Otherwise just call wi_fold. |
| else |
| wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub); |
| } |
| |
| // Call wi_fold, except further split small subranges into constants. |
| // This can provide better precision. For something 8 >> [0,1] |
| // Instead of [8, 16], we will produce [8,8][16,16] |
| |
| void |
| range_operator::wi_fold_in_parts (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| int_range_max tmp; |
| widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)), |
| widest_int::from (rh_lb, TYPE_SIGN (type))); |
| widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)), |
| widest_int::from (lh_lb, TYPE_SIGN (type))); |
| // If there are 2, 3, or 4 values in the RH range, do them separately. |
| // Call wi_fold_in_parts to check the RH side. |
| if (rh_range > 0 && rh_range < 4) |
| { |
| wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb); |
| if (rh_range > 1) |
| { |
| wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1); |
| r.union_ (tmp); |
| if (rh_range == 3) |
| { |
| wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2); |
| r.union_ (tmp); |
| } |
| } |
| wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub); |
| r.union_ (tmp); |
| } |
| // Otherwise check for 2, 3, or 4 values in the LH range and split them up. |
| // The RH side has been checked, so no recursion needed. |
| else if (lh_range > 0 && lh_range < 4) |
| { |
| wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub); |
| if (lh_range > 1) |
| { |
| wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub); |
| r.union_ (tmp); |
| if (lh_range == 3) |
| { |
| wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub); |
| r.union_ (tmp); |
| } |
| } |
| wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub); |
| r.union_ (tmp); |
| } |
| // Otherwise just call wi_fold. |
| else |
| wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub); |
| } |
| |
| // The default for fold is to break all ranges into sub-ranges and |
| // invoke the wi_fold method on each sub-range pair. |
| |
| bool |
| range_operator::fold_range (irange &r, tree type, |
| const irange &lh, |
| const irange &rh, |
| relation_trio trio) const |
| { |
| gcc_checking_assert (r.supports_type_p (type)); |
| if (empty_range_varying (r, type, lh, rh)) |
| return true; |
| |
| relation_kind rel = trio.op1_op2 (); |
| unsigned num_lh = lh.num_pairs (); |
| unsigned num_rh = rh.num_pairs (); |
| |
| // If op1 and op2 are equivalences, then we don't need a complete cross |
| // product, just pairs of matching elements. |
| if (relation_equiv_p (rel) && lh == rh) |
| { |
| int_range_max tmp; |
| r.set_undefined (); |
| for (unsigned x = 0; x < num_lh; ++x) |
| { |
| // If the number of subranges is too high, limit subrange creation. |
| unsigned limit = (r.num_pairs () > 32) ? 0 : 8; |
| wide_int lh_lb = lh.lower_bound (x); |
| wide_int lh_ub = lh.upper_bound (x); |
| wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit); |
| r.union_ (tmp); |
| if (r.varying_p ()) |
| break; |
| } |
| op1_op2_relation_effect (r, type, lh, rh, rel); |
| update_bitmask (r, lh, rh); |
| return true; |
| } |
| |
| // If both ranges are single pairs, fold directly into the result range. |
| // If the number of subranges grows too high, produce a summary result as the |
| // loop becomes exponential with little benefit. See PR 103821. |
| if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12) |
| { |
| wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (), |
| rh.lower_bound (), rh.upper_bound ()); |
| op1_op2_relation_effect (r, type, lh, rh, rel); |
| update_bitmask (r, lh, rh); |
| return true; |
| } |
| |
| int_range_max tmp; |
| r.set_undefined (); |
| for (unsigned x = 0; x < num_lh; ++x) |
| for (unsigned y = 0; y < num_rh; ++y) |
| { |
| wide_int lh_lb = lh.lower_bound (x); |
| wide_int lh_ub = lh.upper_bound (x); |
| wide_int rh_lb = rh.lower_bound (y); |
| wide_int rh_ub = rh.upper_bound (y); |
| wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub); |
| r.union_ (tmp); |
| if (r.varying_p ()) |
| { |
| op1_op2_relation_effect (r, type, lh, rh, rel); |
| update_bitmask (r, lh, rh); |
| return true; |
| } |
| } |
| op1_op2_relation_effect (r, type, lh, rh, rel); |
| update_bitmask (r, lh, rh); |
| return true; |
| } |
| |
| // The default for op1_range is to return false. |
| |
| bool |
| range_operator::op1_range (irange &r ATTRIBUTE_UNUSED, |
| tree type ATTRIBUTE_UNUSED, |
| const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op2 ATTRIBUTE_UNUSED, |
| relation_trio) const |
| { |
| return false; |
| } |
| |
| // The default for op2_range is to return false. |
| |
| bool |
| range_operator::op2_range (irange &r ATTRIBUTE_UNUSED, |
| tree type ATTRIBUTE_UNUSED, |
| const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op1 ATTRIBUTE_UNUSED, |
| relation_trio) const |
| { |
| return false; |
| } |
| |
| // The default relation routines return VREL_VARYING. |
| |
| relation_kind |
| range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op1 ATTRIBUTE_UNUSED, |
| const irange &op2 ATTRIBUTE_UNUSED, |
| relation_kind rel ATTRIBUTE_UNUSED) const |
| { |
| return VREL_VARYING; |
| } |
| |
| relation_kind |
| range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op1 ATTRIBUTE_UNUSED, |
| const irange &op2 ATTRIBUTE_UNUSED, |
| relation_kind rel ATTRIBUTE_UNUSED) const |
| { |
| return VREL_VARYING; |
| } |
| |
| relation_kind |
| range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op1 ATTRIBUTE_UNUSED, |
| const irange &op2 ATTRIBUTE_UNUSED) const |
| { |
| return VREL_VARYING; |
| } |
| |
| // Default is no relation affects the LHS. |
| |
| bool |
| range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED, |
| tree type ATTRIBUTE_UNUSED, |
| const irange &op1_range ATTRIBUTE_UNUSED, |
| const irange &op2_range ATTRIBUTE_UNUSED, |
| relation_kind rel ATTRIBUTE_UNUSED) const |
| { |
| return false; |
| } |
| |
| bool |
| range_operator::overflow_free_p (const irange &, const irange &, |
| relation_trio) const |
| { |
| return false; |
| } |
| |
| // Apply any known bitmask updates based on this operator. |
| |
| void |
| range_operator::update_bitmask (irange &, const irange &, |
| const irange &) const |
| { |
| } |
| |
| // Check that operand types are OK. Default to always OK. |
| |
| bool |
| range_operator::operand_check_p (tree, tree, tree) const |
| { |
| return true; |
| } |
| |
| // Create and return a range from a pair of wide-ints that are known |
| // to have overflowed (or underflowed). |
| |
| static void |
| value_range_from_overflowed_bounds (irange &r, tree type, |
| const wide_int &wmin, |
| const wide_int &wmax) |
| { |
| const signop sgn = TYPE_SIGN (type); |
| const unsigned int prec = TYPE_PRECISION (type); |
| |
| wide_int tmin = wide_int::from (wmin, prec, sgn); |
| wide_int tmax = wide_int::from (wmax, prec, sgn); |
| |
| bool covers = false; |
| wide_int tem = tmin; |
| tmin = tmax + 1; |
| if (wi::cmp (tmin, tmax, sgn) < 0) |
| covers = true; |
| tmax = tem - 1; |
| if (wi::cmp (tmax, tem, sgn) > 0) |
| covers = true; |
| |
| // If the anti-range would cover nothing, drop to varying. |
| // Likewise if the anti-range bounds are outside of the types |
| // values. |
| if (covers || wi::cmp (tmin, tmax, sgn) > 0) |
| r.set_varying (type); |
| else |
| r.set (type, tmin, tmax, VR_ANTI_RANGE); |
| } |
| |
| // Create and return a range from a pair of wide-ints. MIN_OVF and |
| // MAX_OVF describe any overflow that might have occurred while |
| // calculating WMIN and WMAX respectively. |
| |
| static void |
| value_range_with_overflow (irange &r, tree type, |
| const wide_int &wmin, const wide_int &wmax, |
| wi::overflow_type min_ovf = wi::OVF_NONE, |
| wi::overflow_type max_ovf = wi::OVF_NONE) |
| { |
| const signop sgn = TYPE_SIGN (type); |
| const unsigned int prec = TYPE_PRECISION (type); |
| const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type); |
| |
| // For one bit precision if max != min, then the range covers all |
| // values. |
| if (prec == 1 && wi::ne_p (wmax, wmin)) |
| { |
| r.set_varying (type); |
| return; |
| } |
| |
| if (overflow_wraps) |
| { |
| // If overflow wraps, truncate the values and adjust the range, |
| // kind, and bounds appropriately. |
| if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE)) |
| { |
| wide_int tmin = wide_int::from (wmin, prec, sgn); |
| wide_int tmax = wide_int::from (wmax, prec, sgn); |
| // If the limits are swapped, we wrapped around and cover |
| // the entire range. |
| if (wi::gt_p (tmin, tmax, sgn)) |
| r.set_varying (type); |
| else |
| // No overflow or both overflow or underflow. The range |
| // kind stays normal. |
| r.set (type, tmin, tmax); |
| return; |
| } |
| |
| if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE) |
| || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE)) |
| value_range_from_overflowed_bounds (r, type, wmin, wmax); |
| else |
| // Other underflow and/or overflow, drop to VR_VARYING. |
| r.set_varying (type); |
| } |
| else |
| { |
| // If both bounds either underflowed or overflowed, then the result |
| // is undefined. |
| if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW) |
| || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW)) |
| { |
| r.set_undefined (); |
| return; |
| } |
| |
| // If overflow does not wrap, saturate to [MIN, MAX]. |
| wide_int new_lb, new_ub; |
| if (min_ovf == wi::OVF_UNDERFLOW) |
| new_lb = wi::min_value (prec, sgn); |
| else if (min_ovf == wi::OVF_OVERFLOW) |
| new_lb = wi::max_value (prec, sgn); |
| else |
| new_lb = wmin; |
| |
| if (max_ovf == wi::OVF_UNDERFLOW) |
| new_ub = wi::min_value (prec, sgn); |
| else if (max_ovf == wi::OVF_OVERFLOW) |
| new_ub = wi::max_value (prec, sgn); |
| else |
| new_ub = wmax; |
| |
| r.set (type, new_lb, new_ub); |
| } |
| } |
| |
| // Create and return a range from a pair of wide-ints. Canonicalize |
| // the case where the bounds are swapped. In which case, we transform |
| // [10,5] into [MIN,5][10,MAX]. |
| |
| static inline void |
| create_possibly_reversed_range (irange &r, tree type, |
| const wide_int &new_lb, const wide_int &new_ub) |
| { |
| signop s = TYPE_SIGN (type); |
| // If the bounds are swapped, treat the result as if an overflow occurred. |
| if (wi::gt_p (new_lb, new_ub, s)) |
| value_range_from_overflowed_bounds (r, type, new_lb, new_ub); |
| else |
| // Otherwise it's just a normal range. |
| r.set (type, new_lb, new_ub); |
| } |
| |
| // Return the summary information about boolean range LHS. If EMPTY/FULL, |
| // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing. |
| |
| bool_range_state |
| get_bool_state (vrange &r, const vrange &lhs, tree val_type) |
| { |
| // If there is no result, then this is unexecutable. |
| if (lhs.undefined_p ()) |
| { |
| r.set_undefined (); |
| return BRS_EMPTY; |
| } |
| |
| if (lhs.zero_p ()) |
| return BRS_FALSE; |
| |
| // For TRUE, we can't just test for [1,1] because Ada can have |
| // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc. |
| if (lhs.contains_p (build_zero_cst (lhs.type ()))) |
| { |
| r.set_varying (val_type); |
| return BRS_FULL; |
| } |
| |
| return BRS_TRUE; |
| } |
| |
| // ------------------------------------------------------------------------ |
| |
| void |
| operator_equal::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, EQ_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_equal::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 == op2 indicates NE_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_NE; |
| |
| // TRUE = op1 == op2 indicates EQ_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_EQ; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_equal::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ)) |
| return true; |
| |
| // We can be sure the values are always equal or not if both ranges |
| // consist of a single value, and then compare them. |
| bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ()); |
| bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ()); |
| if (op1_const && op2_const) |
| { |
| if (wi::eq_p (op1.lower_bound (), op2.upper_bound())) |
| r = range_true (type); |
| else |
| r = range_false (type); |
| } |
| else |
| { |
| // If ranges do not intersect, we know the range is not equal, |
| // otherwise we don't know anything for sure. |
| int_range_max tmp = op1; |
| tmp.intersect (op2); |
| if (tmp.undefined_p ()) |
| r = range_false (type); |
| // Check if a constant cannot satisfy the bitmask requirements. |
| else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ())) |
| r = range_false (type); |
| else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ())) |
| r = range_false (type); |
| else |
| r = range_true_and_false (type); |
| } |
| return true; |
| } |
| |
| bool |
| operator_equal::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| // If it's true, the result is the same as OP2. |
| r = op2; |
| break; |
| |
| case BRS_FALSE: |
| // If the result is false, the only time we know anything is |
| // if OP2 is a constant. |
| if (!op2.undefined_p () |
| && wi::eq_p (op2.lower_bound(), op2.upper_bound())) |
| { |
| r = op2; |
| r.invert (); |
| } |
| else |
| r.set_varying (type); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| bool |
| operator_equal::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio rel) const |
| { |
| return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ()); |
| } |
| |
| // ------------------------------------------------------------------------- |
| |
| void |
| operator_not_equal::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, NE_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_not_equal::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 != op2 indicates EQ_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_EQ; |
| |
| // TRUE = op1 != op2 indicates NE_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_NE; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_not_equal::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE)) |
| return true; |
| |
| // We can be sure the values are always equal or not if both ranges |
| // consist of a single value, and then compare them. |
| bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ()); |
| bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ()); |
| if (op1_const && op2_const) |
| { |
| if (wi::ne_p (op1.lower_bound (), op2.upper_bound())) |
| r = range_true (type); |
| else |
| r = range_false (type); |
| } |
| else |
| { |
| // If ranges do not intersect, we know the range is not equal, |
| // otherwise we don't know anything for sure. |
| int_range_max tmp = op1; |
| tmp.intersect (op2); |
| if (tmp.undefined_p ()) |
| r = range_true (type); |
| // Check if a constant cannot satisfy the bitmask requirements. |
| else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ())) |
| r = range_true (type); |
| else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ())) |
| r = range_true (type); |
| else |
| r = range_true_and_false (type); |
| } |
| return true; |
| } |
| |
| bool |
| operator_not_equal::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| // If the result is true, the only time we know anything is if |
| // OP2 is a constant. |
| if (!op2.undefined_p () |
| && wi::eq_p (op2.lower_bound(), op2.upper_bound())) |
| { |
| r = op2; |
| r.invert (); |
| } |
| else |
| r.set_varying (type); |
| break; |
| |
| case BRS_FALSE: |
| // If it's false, the result is the same as OP2. |
| r = op2; |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| |
| bool |
| operator_not_equal::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio rel) const |
| { |
| return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ()); |
| } |
| |
| // (X < VAL) produces the range of [MIN, VAL - 1]. |
| |
| static void |
| build_lt (irange &r, tree type, const wide_int &val) |
| { |
| wi::overflow_type ov; |
| wide_int lim; |
| signop sgn = TYPE_SIGN (type); |
| |
| // Signed 1 bit cannot represent 1 for subtraction. |
| if (sgn == SIGNED) |
| lim = wi::add (val, -1, sgn, &ov); |
| else |
| lim = wi::sub (val, 1, sgn, &ov); |
| |
| // If val - 1 underflows, check if X < MIN, which is an empty range. |
| if (ov) |
| r.set_undefined (); |
| else |
| r = int_range<1> (type, min_limit (type), lim); |
| } |
| |
| // (X <= VAL) produces the range of [MIN, VAL]. |
| |
| static void |
| build_le (irange &r, tree type, const wide_int &val) |
| { |
| r = int_range<1> (type, min_limit (type), val); |
| } |
| |
| // (X > VAL) produces the range of [VAL + 1, MAX]. |
| |
| static void |
| build_gt (irange &r, tree type, const wide_int &val) |
| { |
| wi::overflow_type ov; |
| wide_int lim; |
| signop sgn = TYPE_SIGN (type); |
| |
| // Signed 1 bit cannot represent 1 for addition. |
| if (sgn == SIGNED) |
| lim = wi::sub (val, -1, sgn, &ov); |
| else |
| lim = wi::add (val, 1, sgn, &ov); |
| // If val + 1 overflows, check is for X > MAX, which is an empty range. |
| if (ov) |
| r.set_undefined (); |
| else |
| r = int_range<1> (type, lim, max_limit (type)); |
| } |
| |
| // (X >= val) produces the range of [VAL, MAX]. |
| |
| static void |
| build_ge (irange &r, tree type, const wide_int &val) |
| { |
| r = int_range<1> (type, val, max_limit (type)); |
| } |
| |
| |
| void |
| operator_lt::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, LT_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_lt::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 < op2 indicates GE_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_GE; |
| |
| // TRUE = op1 < op2 indicates LT_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_LT; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_lt::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT)) |
| return true; |
| |
| signop sign = TYPE_SIGN (op1.type ()); |
| gcc_checking_assert (sign == TYPE_SIGN (op2.type ())); |
| |
| if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign)) |
| r = range_true (type); |
| else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign)) |
| r = range_false (type); |
| // Use nonzero bits to determine if < 0 is false. |
| else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign)) |
| r = range_false (type); |
| else |
| r = range_true_and_false (type); |
| return true; |
| } |
| |
| bool |
| operator_lt::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (op2.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_lt (r, type, op2.upper_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_ge (r, type, op2.lower_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| bool |
| operator_lt::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio) const |
| { |
| if (op1.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_gt (r, type, op1.lower_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_le (r, type, op1.upper_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| |
| void |
| operator_le::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, LE_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_le::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 <= op2 indicates GT_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_GT; |
| |
| // TRUE = op1 <= op2 indicates LE_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_LE; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_le::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE)) |
| return true; |
| |
| signop sign = TYPE_SIGN (op1.type ()); |
| gcc_checking_assert (sign == TYPE_SIGN (op2.type ())); |
| |
| if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign)) |
| r = range_true (type); |
| else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign)) |
| r = range_false (type); |
| else |
| r = range_true_and_false (type); |
| return true; |
| } |
| |
| bool |
| operator_le::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (op2.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_le (r, type, op2.upper_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_gt (r, type, op2.lower_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| bool |
| operator_le::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio) const |
| { |
| if (op1.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_ge (r, type, op1.lower_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_lt (r, type, op1.upper_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| |
| void |
| operator_gt::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, GT_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_gt::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 > op2 indicates LE_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_LE; |
| |
| // TRUE = op1 > op2 indicates GT_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_GT; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_gt::fold_range (irange &r, tree type, |
| const irange &op1, const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT)) |
| return true; |
| |
| signop sign = TYPE_SIGN (op1.type ()); |
| gcc_checking_assert (sign == TYPE_SIGN (op2.type ())); |
| |
| if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign)) |
| r = range_true (type); |
| else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign)) |
| r = range_false (type); |
| else |
| r = range_true_and_false (type); |
| return true; |
| } |
| |
| bool |
| operator_gt::op1_range (irange &r, tree type, |
| const irange &lhs, const irange &op2, |
| relation_trio) const |
| { |
| if (op2.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_gt (r, type, op2.lower_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_le (r, type, op2.upper_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| bool |
| operator_gt::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio) const |
| { |
| if (op1.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_lt (r, type, op1.upper_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_ge (r, type, op1.lower_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| |
| void |
| operator_ge::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, GE_EXPR, lh, rh); |
| } |
| |
| // Check if the LHS range indicates a relation between OP1 and OP2. |
| |
| relation_kind |
| operator_ge::op1_op2_relation (const irange &lhs, const irange &, |
| const irange &) const |
| { |
| if (lhs.undefined_p ()) |
| return VREL_UNDEFINED; |
| |
| // FALSE = op1 >= op2 indicates LT_EXPR. |
| if (lhs.zero_p ()) |
| return VREL_LT; |
| |
| // TRUE = op1 >= op2 indicates GE_EXPR. |
| if (!contains_zero_p (lhs)) |
| return VREL_GE; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_ge::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE)) |
| return true; |
| |
| signop sign = TYPE_SIGN (op1.type ()); |
| gcc_checking_assert (sign == TYPE_SIGN (op2.type ())); |
| |
| if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign)) |
| r = range_true (type); |
| else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign)) |
| r = range_false (type); |
| else |
| r = range_true_and_false (type); |
| return true; |
| } |
| |
| bool |
| operator_ge::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (op2.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_ge (r, type, op2.lower_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_lt (r, type, op2.upper_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| bool |
| operator_ge::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio) const |
| { |
| if (op1.undefined_p ()) |
| return false; |
| |
| switch (get_bool_state (r, lhs, type)) |
| { |
| case BRS_TRUE: |
| build_le (r, type, op1.upper_bound ()); |
| break; |
| |
| case BRS_FALSE: |
| build_gt (r, type, op1.lower_bound ()); |
| break; |
| |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| |
| void |
| operator_plus::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, PLUS_EXPR, lh, rh); |
| } |
| |
| // Check to see if the range of OP2 indicates anything about the relation |
| // between LHS and OP1. |
| |
| relation_kind |
| operator_plus::lhs_op1_relation (const irange &lhs, |
| const irange &op1, |
| const irange &op2, |
| relation_kind) const |
| { |
| if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ()) |
| return VREL_VARYING; |
| |
| tree type = lhs.type (); |
| unsigned prec = TYPE_PRECISION (type); |
| wi::overflow_type ovf1, ovf2; |
| signop sign = TYPE_SIGN (type); |
| |
| // LHS = OP1 + 0 indicates LHS == OP1. |
| if (op2.zero_p ()) |
| return VREL_EQ; |
| |
| if (TYPE_OVERFLOW_WRAPS (type)) |
| { |
| wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1); |
| wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2); |
| } |
| else |
| ovf1 = ovf2 = wi::OVF_NONE; |
| |
| // Never wrapping additions. |
| if (!ovf1 && !ovf2) |
| { |
| // Positive op2 means lhs > op1. |
| if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign)) |
| return VREL_GT; |
| if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign)) |
| return VREL_GE; |
| |
| // Negative op2 means lhs < op1. |
| if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign)) |
| return VREL_LT; |
| if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign)) |
| return VREL_LE; |
| } |
| // Always wrapping additions. |
| else if (ovf1 && ovf1 == ovf2) |
| { |
| // Positive op2 means lhs < op1. |
| if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign)) |
| return VREL_LT; |
| if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign)) |
| return VREL_LE; |
| |
| // Negative op2 means lhs > op1. |
| if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign)) |
| return VREL_GT; |
| if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign)) |
| return VREL_GE; |
| } |
| |
| // If op2 does not contain 0, then LHS and OP1 can never be equal. |
| if (!range_includes_zero_p (&op2)) |
| return VREL_NE; |
| |
| return VREL_VARYING; |
| } |
| |
| // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed |
| // operands. |
| |
| relation_kind |
| operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1, |
| const irange &op2, relation_kind rel) const |
| { |
| return lhs_op1_relation (lhs, op2, op1, rel); |
| } |
| |
| void |
| operator_plus::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| wi::overflow_type ov_lb, ov_ub; |
| signop s = TYPE_SIGN (type); |
| wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb); |
| wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub); |
| value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub); |
| } |
| |
| // Given addition or subtraction, determine the possible NORMAL ranges and |
| // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition. |
| // Return the relation that exists between the LHS and OP1 in order for the |
| // NORMAL range to apply. |
| // a return value of VREL_VARYING means no ranges were applicable. |
| |
| static relation_kind |
| plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset, |
| bool add_p) |
| { |
| relation_kind kind = VREL_VARYING; |
| // For now, only deal with constant adds. This could be extended to ranges |
| // when someone is so motivated. |
| if (!offset.singleton_p () || offset.zero_p ()) |
| return kind; |
| |
| // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2 |
| wide_int off = offset.lower_bound (); |
| if (wi::neg_p (off, SIGNED)) |
| { |
| add_p = !add_p; |
| off = wi::neg (off); |
| } |
| |
| wi::overflow_type ov; |
| tree type = offset.type (); |
| unsigned prec = TYPE_PRECISION (type); |
| wide_int ub; |
| wide_int lb; |
| // calculate the normal range and relation for the operation. |
| if (add_p) |
| { |
| // [ 0 , INF - OFF] |
| lb = wi::zero (prec); |
| ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov); |
| kind = VREL_GT; |
| } |
| else |
| { |
| // [ OFF, INF ] |
| lb = off; |
| ub = irange_val_max (type); |
| kind = VREL_LT; |
| } |
| int_range<2> normal_range (type, lb, ub); |
| int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE); |
| |
| r_ov = ov_range; |
| r_normal = normal_range; |
| return kind; |
| } |
| |
| // Once op1 has been calculated by operator_plus or operator_minus, check |
| // to see if the relation passed causes any part of the calculation to |
| // be not possible. ie |
| // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF] |
| // and that further refines a_2 to [0, 0]. |
| // R is the value of op1, OP2 is the offset being added/subtracted, REL is the |
| // relation between LHS relation OP1 and ADD_P is true for PLUS, false for |
| // MINUS. IF any adjustment can be made, R will reflect it. |
| |
| static void |
| adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel, |
| bool add_p) |
| { |
| if (r.undefined_p ()) |
| return; |
| tree type = r.type (); |
| // Check for unsigned overflow and calculate the overflow part. |
| signop s = TYPE_SIGN (type); |
| if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED) |
| return; |
| |
| // Only work with <, <=, >, >= relations. |
| if (!relation_lt_le_gt_ge_p (rel)) |
| return; |
| |
| // Get the ranges for this offset. |
| int_range_max normal, overflow; |
| relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p); |
| |
| // VREL_VARYING means there are no adjustments. |
| if (k == VREL_VARYING) |
| return; |
| |
| // If the relations match use the normal range, otherwise use overflow range. |
| if (relation_intersect (k, rel) == k) |
| r.intersect (normal); |
| else |
| r.intersect (overflow); |
| return; |
| } |
| |
| bool |
| operator_plus::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| // Start with the default operation. |
| range_op_handler minus (MINUS_EXPR); |
| if (!minus) |
| return false; |
| bool res = minus.fold_range (r, type, lhs, op2); |
| relation_kind rel = trio.lhs_op1 (); |
| // Check for a relation refinement. |
| if (res) |
| adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */); |
| return res; |
| } |
| |
| bool |
| operator_plus::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio rel) const |
| { |
| return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ()); |
| } |
| |
| class operator_widen_plus_signed : public range_operator |
| { |
| public: |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const; |
| } op_widen_plus_signed; |
| |
| void |
| operator_widen_plus_signed::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| wi::overflow_type ov_lb, ov_ub; |
| signop s = TYPE_SIGN (type); |
| |
| wide_int lh_wlb |
| = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED); |
| wide_int lh_wub |
| = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED); |
| wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s); |
| wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s); |
| |
| wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb); |
| wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub); |
| |
| r = int_range<2> (type, new_lb, new_ub); |
| } |
| |
| class operator_widen_plus_unsigned : public range_operator |
| { |
| public: |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const; |
| } op_widen_plus_unsigned; |
| |
| void |
| operator_widen_plus_unsigned::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| wi::overflow_type ov_lb, ov_ub; |
| signop s = TYPE_SIGN (type); |
| |
| wide_int lh_wlb |
| = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED); |
| wide_int lh_wub |
| = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED); |
| wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s); |
| wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s); |
| |
| wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb); |
| wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub); |
| |
| r = int_range<2> (type, new_lb, new_ub); |
| } |
| |
| void |
| operator_minus::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, MINUS_EXPR, lh, rh); |
| } |
| |
| void |
| operator_minus::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| wi::overflow_type ov_lb, ov_ub; |
| signop s = TYPE_SIGN (type); |
| wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb); |
| wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub); |
| value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub); |
| } |
| |
| |
| // Return the relation between LHS and OP1 based on the relation between |
| // OP1 and OP2. |
| |
| relation_kind |
| operator_minus::lhs_op1_relation (const irange &, const irange &op1, |
| const irange &, relation_kind rel) const |
| { |
| if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED) |
| switch (rel) |
| { |
| case VREL_GT: |
| case VREL_GE: |
| return VREL_LE; |
| default: |
| break; |
| } |
| return VREL_VARYING; |
| } |
| |
| // Check to see if the relation REL between OP1 and OP2 has any effect on the |
| // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper |
| // function for both MINUS_EXPR and POINTER_DIFF_EXPR. |
| |
| bool |
| minus_op1_op2_relation_effect (irange &lhs_range, tree type, |
| const irange &op1_range ATTRIBUTE_UNUSED, |
| const irange &op2_range ATTRIBUTE_UNUSED, |
| relation_kind rel) |
| { |
| if (rel == VREL_VARYING) |
| return false; |
| |
| int_range<2> rel_range; |
| unsigned prec = TYPE_PRECISION (type); |
| signop sgn = TYPE_SIGN (type); |
| |
| // == and != produce [0,0] and ~[0,0] regardless of wrapping. |
| if (rel == VREL_EQ) |
| rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec)); |
| else if (rel == VREL_NE) |
| rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec), |
| VR_ANTI_RANGE); |
| else if (TYPE_OVERFLOW_WRAPS (type)) |
| { |
| switch (rel) |
| { |
| // For wrapping signed values and unsigned, if op1 > op2 or |
| // op1 < op2, then op1 - op2 can be restricted to ~[0, 0]. |
| case VREL_GT: |
| case VREL_LT: |
| rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec), |
| VR_ANTI_RANGE); |
| break; |
| default: |
| return false; |
| } |
| } |
| else |
| { |
| switch (rel) |
| { |
| // op1 > op2, op1 - op2 can be restricted to [1, +INF] |
| case VREL_GT: |
| rel_range = int_range<2> (type, wi::one (prec), |
| wi::max_value (prec, sgn)); |
| break; |
| // op1 >= op2, op1 - op2 can be restricted to [0, +INF] |
| case VREL_GE: |
| rel_range = int_range<2> (type, wi::zero (prec), |
| wi::max_value (prec, sgn)); |
| break; |
| // op1 < op2, op1 - op2 can be restricted to [-INF, -1] |
| case VREL_LT: |
| rel_range = int_range<2> (type, wi::min_value (prec, sgn), |
| wi::minus_one (prec)); |
| break; |
| // op1 <= op2, op1 - op2 can be restricted to [-INF, 0] |
| case VREL_LE: |
| rel_range = int_range<2> (type, wi::min_value (prec, sgn), |
| wi::zero (prec)); |
| break; |
| default: |
| return false; |
| } |
| } |
| lhs_range.intersect (rel_range); |
| return true; |
| } |
| |
| bool |
| operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type, |
| const irange &op1_range, |
| const irange &op2_range, |
| relation_kind rel) const |
| { |
| return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range, |
| rel); |
| } |
| |
| bool |
| operator_minus::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| // Start with the default operation. |
| range_op_handler minus (PLUS_EXPR); |
| if (!minus) |
| return false; |
| bool res = minus.fold_range (r, type, lhs, op2); |
| relation_kind rel = trio.lhs_op1 (); |
| if (res) |
| adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */); |
| return res; |
| |
| } |
| |
| bool |
| operator_minus::op2_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op1, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| return fold_range (r, type, op1, lhs); |
| } |
| |
| void |
| operator_min::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, MIN_EXPR, lh, rh); |
| } |
| |
| void |
| operator_min::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| signop s = TYPE_SIGN (type); |
| wide_int new_lb = wi::min (lh_lb, rh_lb, s); |
| wide_int new_ub = wi::min (lh_ub, rh_ub, s); |
| value_range_with_overflow (r, type, new_lb, new_ub); |
| } |
| |
| |
| void |
| operator_max::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, MAX_EXPR, lh, rh); |
| } |
| |
| void |
| operator_max::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| signop s = TYPE_SIGN (type); |
| wide_int new_lb = wi::max (lh_lb, rh_lb, s); |
| wide_int new_ub = wi::max (lh_ub, rh_ub, s); |
| value_range_with_overflow (r, type, new_lb, new_ub); |
| } |
| |
| |
| // Calculate the cross product of two sets of ranges and return it. |
| // |
| // Multiplications, divisions and shifts are a bit tricky to handle, |
| // depending on the mix of signs we have in the two ranges, we need to |
| // operate on different values to get the minimum and maximum values |
| // for the new range. One approach is to figure out all the |
| // variations of range combinations and do the operations. |
| // |
| // However, this involves several calls to compare_values and it is |
| // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP |
| // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then |
| // figure the smallest and largest values to form the new range. |
| |
| void |
| cross_product_operator::wi_cross_product (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| wide_int cp1, cp2, cp3, cp4; |
| // Default to varying. |
| r.set_varying (type); |
| |
| // Compute the 4 cross operations, bailing if we get an overflow we |
| // can't handle. |
| if (wi_op_overflows (cp1, type, lh_lb, rh_lb)) |
| return; |
| if (wi::eq_p (lh_lb, lh_ub)) |
| cp3 = cp1; |
| else if (wi_op_overflows (cp3, type, lh_ub, rh_lb)) |
| return; |
| if (wi::eq_p (rh_lb, rh_ub)) |
| cp2 = cp1; |
| else if (wi_op_overflows (cp2, type, lh_lb, rh_ub)) |
| return; |
| if (wi::eq_p (lh_lb, lh_ub)) |
| cp4 = cp2; |
| else if (wi_op_overflows (cp4, type, lh_ub, rh_ub)) |
| return; |
| |
| // Order pairs. |
| signop sign = TYPE_SIGN (type); |
| if (wi::gt_p (cp1, cp2, sign)) |
| std::swap (cp1, cp2); |
| if (wi::gt_p (cp3, cp4, sign)) |
| std::swap (cp3, cp4); |
| |
| // Choose min and max from the ordered pairs. |
| wide_int res_lb = wi::min (cp1, cp3, sign); |
| wide_int res_ub = wi::max (cp2, cp4, sign); |
| value_range_with_overflow (r, type, res_lb, res_ub); |
| } |
| |
| |
| void |
| operator_mult::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, MULT_EXPR, lh, rh); |
| } |
| |
| bool |
| operator_mult::op1_range (irange &r, tree type, |
| const irange &lhs, const irange &op2, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| |
| // We can't solve 0 = OP1 * N by dividing by N with a wrapping type. |
| // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas |
| // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit) |
| if (TYPE_OVERFLOW_WRAPS (type)) |
| return false; |
| |
| wide_int offset; |
| if (op2.singleton_p (offset) && offset != 0) |
| return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2); |
| return false; |
| } |
| |
| bool |
| operator_mult::op2_range (irange &r, tree type, |
| const irange &lhs, const irange &op1, |
| relation_trio rel) const |
| { |
| return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ()); |
| } |
| |
| bool |
| operator_mult::wi_op_overflows (wide_int &res, tree type, |
| const wide_int &w0, const wide_int &w1) const |
| { |
| wi::overflow_type overflow = wi::OVF_NONE; |
| signop sign = TYPE_SIGN (type); |
| res = wi::mul (w0, w1, sign, &overflow); |
| if (overflow && TYPE_OVERFLOW_UNDEFINED (type)) |
| { |
| // For multiplication, the sign of the overflow is given |
| // by the comparison of the signs of the operands. |
| if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ()) |
| res = wi::max_value (w0.get_precision (), sign); |
| else |
| res = wi::min_value (w0.get_precision (), sign); |
| return false; |
| } |
| return overflow; |
| } |
| |
| void |
| operator_mult::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| if (TYPE_OVERFLOW_UNDEFINED (type)) |
| { |
| wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub); |
| return; |
| } |
| |
| // Multiply the ranges when overflow wraps. This is basically fancy |
| // code so we don't drop to varying with an unsigned |
| // [-3,-1]*[-3,-1]. |
| // |
| // This test requires 2*prec bits if both operands are signed and |
| // 2*prec + 2 bits if either is not. Therefore, extend the values |
| // using the sign of the result to PREC2. From here on out, |
| // everything is just signed math no matter what the input types |
| // were. |
| |
| signop sign = TYPE_SIGN (type); |
| unsigned prec = TYPE_PRECISION (type); |
| widest2_int min0 = widest2_int::from (lh_lb, sign); |
| widest2_int max0 = widest2_int::from (lh_ub, sign); |
| widest2_int min1 = widest2_int::from (rh_lb, sign); |
| widest2_int max1 = widest2_int::from (rh_ub, sign); |
| widest2_int sizem1 = wi::mask <widest2_int> (prec, false); |
| widest2_int size = sizem1 + 1; |
| |
| // Canonicalize the intervals. |
| if (sign == UNSIGNED) |
| { |
| if (wi::ltu_p (size, min0 + max0)) |
| { |
| min0 -= size; |
| max0 -= size; |
| } |
| if (wi::ltu_p (size, min1 + max1)) |
| { |
| min1 -= size; |
| max1 -= size; |
| } |
| } |
| |
| // Sort the 4 products so that min is in prod0 and max is in |
| // prod3. |
| widest2_int prod0 = min0 * min1; |
| widest2_int prod1 = min0 * max1; |
| widest2_int prod2 = max0 * min1; |
| widest2_int prod3 = max0 * max1; |
| |
| // min0min1 > max0max1 |
| if (prod0 > prod3) |
| std::swap (prod0, prod3); |
| |
| // min0max1 > max0min1 |
| if (prod1 > prod2) |
| std::swap (prod1, prod2); |
| |
| if (prod0 > prod1) |
| std::swap (prod0, prod1); |
| |
| if (prod2 > prod3) |
| std::swap (prod2, prod3); |
| |
| // diff = max - min |
| prod2 = prod3 - prod0; |
| if (wi::geu_p (prod2, sizem1)) |
| { |
| // Multiplying by X, where X is a power of 2 is [0,0][X,+INF]. |
| if (TYPE_UNSIGNED (type) && rh_lb == rh_ub |
| && wi::exact_log2 (rh_lb) != -1 && prec > 1) |
| { |
| r.set (type, rh_lb, wi::max_value (prec, sign)); |
| int_range<2> zero; |
| zero.set_zero (type); |
| r.union_ (zero); |
| } |
| else |
| // The range covers all values. |
| r.set_varying (type); |
| } |
| else |
| { |
| wide_int new_lb = wide_int::from (prod0, prec, sign); |
| wide_int new_ub = wide_int::from (prod3, prec, sign); |
| create_possibly_reversed_range (r, type, new_lb, new_ub); |
| } |
| } |
| |
| class operator_widen_mult_signed : public range_operator |
| { |
| public: |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) |
| const; |
| } op_widen_mult_signed; |
| |
| void |
| operator_widen_mult_signed::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| signop s = TYPE_SIGN (type); |
| |
| wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED); |
| wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED); |
| wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s); |
| wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s); |
| |
| /* We don't expect a widening multiplication to be able to overflow but range |
| calculations for multiplications are complicated. After widening the |
| operands lets call the base class. */ |
| return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub); |
| } |
| |
| |
| class operator_widen_mult_unsigned : public range_operator |
| { |
| public: |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) |
| const; |
| } op_widen_mult_unsigned; |
| |
| void |
| operator_widen_mult_unsigned::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const |
| { |
| signop s = TYPE_SIGN (type); |
| |
| wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED); |
| wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED); |
| wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s); |
| wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s); |
| |
| /* We don't expect a widening multiplication to be able to overflow but range |
| calculations for multiplications are complicated. After widening the |
| operands lets call the base class. */ |
| return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub); |
| } |
| |
| class operator_div : public cross_product_operator |
| { |
| public: |
| operator_div (tree_code div_kind) { m_code = div_kind; } |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const final override; |
| virtual bool wi_op_overflows (wide_int &res, tree type, |
| const wide_int &, const wide_int &) |
| const final override; |
| void update_bitmask (irange &r, const irange &lh, const irange &rh) const |
| { update_known_bitmask (r, m_code, lh, rh); } |
| protected: |
| tree_code m_code; |
| }; |
| |
| static operator_div op_trunc_div (TRUNC_DIV_EXPR); |
| static operator_div op_floor_div (FLOOR_DIV_EXPR); |
| static operator_div op_round_div (ROUND_DIV_EXPR); |
| static operator_div op_ceil_div (CEIL_DIV_EXPR); |
| |
| bool |
| operator_div::wi_op_overflows (wide_int &res, tree type, |
| const wide_int &w0, const wide_int &w1) const |
| { |
| if (w1 == 0) |
| return true; |
| |
| wi::overflow_type overflow = wi::OVF_NONE; |
| signop sign = TYPE_SIGN (type); |
| |
| switch (m_code) |
| { |
| case EXACT_DIV_EXPR: |
| case TRUNC_DIV_EXPR: |
| res = wi::div_trunc (w0, w1, sign, &overflow); |
| break; |
| case FLOOR_DIV_EXPR: |
| res = wi::div_floor (w0, w1, sign, &overflow); |
| break; |
| case ROUND_DIV_EXPR: |
| res = wi::div_round (w0, w1, sign, &overflow); |
| break; |
| case CEIL_DIV_EXPR: |
| res = wi::div_ceil (w0, w1, sign, &overflow); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| if (overflow && TYPE_OVERFLOW_UNDEFINED (type)) |
| { |
| // For division, the only case is -INF / -1 = +INF. |
| res = wi::max_value (w0.get_precision (), sign); |
| return false; |
| } |
| return overflow; |
| } |
| |
| void |
| operator_div::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| const wide_int dividend_min = lh_lb; |
| const wide_int dividend_max = lh_ub; |
| const wide_int divisor_min = rh_lb; |
| const wide_int divisor_max = rh_ub; |
| signop sign = TYPE_SIGN (type); |
| unsigned prec = TYPE_PRECISION (type); |
| wide_int extra_min, extra_max; |
| |
| // If we know we won't divide by zero, just do the division. |
| if (!wi_includes_zero_p (type, divisor_min, divisor_max)) |
| { |
| wi_cross_product (r, type, dividend_min, dividend_max, |
| divisor_min, divisor_max); |
| return; |
| } |
| |
| // If we're definitely dividing by zero, there's nothing to do. |
| if (wi_zero_p (type, divisor_min, divisor_max)) |
| { |
| r.set_undefined (); |
| return; |
| } |
| |
| // Perform the division in 2 parts, [LB, -1] and [1, UB], which will |
| // skip any division by zero. |
| |
| // First divide by the negative numbers, if any. |
| if (wi::neg_p (divisor_min, sign)) |
| wi_cross_product (r, type, dividend_min, dividend_max, |
| divisor_min, wi::minus_one (prec)); |
| else |
| r.set_undefined (); |
| |
| // Then divide by the non-zero positive numbers, if any. |
| if (wi::gt_p (divisor_max, wi::zero (prec), sign)) |
| { |
| int_range_max tmp; |
| wi_cross_product (tmp, type, dividend_min, dividend_max, |
| wi::one (prec), divisor_max); |
| r.union_ (tmp); |
| } |
| // We shouldn't still have undefined here. |
| gcc_checking_assert (!r.undefined_p ()); |
| } |
| |
| |
| class operator_exact_divide : public operator_div |
| { |
| using range_operator::op1_range; |
| public: |
| operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { } |
| virtual bool op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const; |
| |
| } op_exact_div; |
| |
| bool |
| operator_exact_divide::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| wide_int offset; |
| // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about |
| // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12]. |
| // We wont bother trying to enumerate all the in between stuff :-P |
| // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of |
| // the time however. |
| // If op2 is a multiple of 2, we would be able to set some non-zero bits. |
| if (op2.singleton_p (offset) && offset != 0) |
| return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2); |
| return false; |
| } |
| |
| |
| class operator_lshift : public cross_product_operator |
| { |
| using range_operator::fold_range; |
| using range_operator::op1_range; |
| public: |
| virtual bool op1_range (irange &r, tree type, const irange &lhs, |
| const irange &op2, relation_trio rel = TRIO_VARYING) |
| const final override; |
| virtual bool fold_range (irange &r, tree type, const irange &op1, |
| const irange &op2, relation_trio rel = TRIO_VARYING) |
| const final override; |
| |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const final override; |
| virtual bool wi_op_overflows (wide_int &res, |
| tree type, |
| const wide_int &, |
| const wide_int &) const final override; |
| void update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const final override |
| { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); } |
| // Check compatibility of LHS and op1. |
| bool operand_check_p (tree t1, tree t2, tree) const final override |
| { return range_compatible_p (t1, t2); } |
| } op_lshift; |
| |
| class operator_rshift : public cross_product_operator |
| { |
| using range_operator::fold_range; |
| using range_operator::op1_range; |
| using range_operator::lhs_op1_relation; |
| public: |
| virtual bool fold_range (irange &r, tree type, const irange &op1, |
| const irange &op2, relation_trio rel = TRIO_VARYING) |
| const final override; |
| virtual void wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, |
| const wide_int &lh_ub, |
| const wide_int &rh_lb, |
| const wide_int &rh_ub) const final override; |
| virtual bool wi_op_overflows (wide_int &res, |
| tree type, |
| const wide_int &w0, |
| const wide_int &w1) const final override; |
| virtual bool op1_range (irange &, tree type, const irange &lhs, |
| const irange &op2, relation_trio rel = TRIO_VARYING) |
| const final override; |
| virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1, |
| const irange &op2, relation_kind rel) |
| const final override; |
| void update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const final override |
| { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); } |
| // Check compatibility of LHS and op1. |
| bool operand_check_p (tree t1, tree t2, tree) const final override |
| { return range_compatible_p (t1, t2); } |
| } op_rshift; |
| |
| |
| relation_kind |
| operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED, |
| const irange &op1, |
| const irange &op2, |
| relation_kind) const |
| { |
| // If both operands range are >= 0, then the LHS <= op1. |
| if (!op1.undefined_p () && !op2.undefined_p () |
| && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ())) |
| && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ()))) |
| return VREL_LE; |
| return VREL_VARYING; |
| } |
| |
| bool |
| operator_lshift::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| int_range_max shift_range; |
| if (!get_shift_range (shift_range, type, op2)) |
| { |
| if (op2.undefined_p ()) |
| r.set_undefined (); |
| else |
| r.set_zero (type); |
| return true; |
| } |
| |
| // Transform left shifts by constants into multiplies. |
| if (shift_range.singleton_p ()) |
| { |
| unsigned shift = shift_range.lower_bound ().to_uhwi (); |
| wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type)); |
| int_range<1> mult (type, tmp, tmp); |
| |
| // Force wrapping multiplication. |
| bool saved_flag_wrapv = flag_wrapv; |
| bool saved_flag_wrapv_pointer = flag_wrapv_pointer; |
| flag_wrapv = 1; |
| flag_wrapv_pointer = 1; |
| bool b = op_mult.fold_range (r, type, op1, mult); |
| flag_wrapv = saved_flag_wrapv; |
| flag_wrapv_pointer = saved_flag_wrapv_pointer; |
| return b; |
| } |
| else |
| // Otherwise, invoke the generic fold routine. |
| return range_operator::fold_range (r, type, op1, shift_range, rel); |
| } |
| |
| void |
| operator_lshift::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| signop sign = TYPE_SIGN (type); |
| unsigned prec = TYPE_PRECISION (type); |
| int overflow_pos = sign == SIGNED ? prec - 1 : prec; |
| int bound_shift = overflow_pos - rh_ub.to_shwi (); |
| // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can |
| // overflow. However, for that to happen, rh.max needs to be zero, |
| // which means rh is a singleton range of zero, which means we simply return |
| // [lh_lb, lh_ub] as the range. |
| if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0)) |
| { |
| r = int_range<2> (type, lh_lb, lh_ub); |
| return; |
| } |
| |
| wide_int bound = wi::set_bit_in_zero (bound_shift, prec); |
| wide_int complement = ~(bound - 1); |
| wide_int low_bound, high_bound; |
| bool in_bounds = false; |
| |
| if (sign == UNSIGNED) |
| { |
| low_bound = bound; |
| high_bound = complement; |
| if (wi::ltu_p (lh_ub, low_bound)) |
| { |
| // [5, 6] << [1, 2] == [10, 24]. |
| // We're shifting out only zeroes, the value increases |
| // monotonically. |
| in_bounds = true; |
| } |
| else if (wi::ltu_p (high_bound, lh_lb)) |
| { |
| // [0xffffff00, 0xffffffff] << [1, 2] |
| // == [0xfffffc00, 0xfffffffe]. |
| // We're shifting out only ones, the value decreases |
| // monotonically. |
| in_bounds = true; |
| } |
| } |
| else |
| { |
| // [-1, 1] << [1, 2] == [-4, 4] |
| low_bound = complement; |
| high_bound = bound; |
| if (wi::lts_p (lh_ub, high_bound) |
| && wi::lts_p (low_bound, lh_lb)) |
| { |
| // For non-negative numbers, we're shifting out only zeroes, |
| // the value increases monotonically. For negative numbers, |
| // we're shifting out only ones, the value decreases |
| // monotonically. |
| in_bounds = true; |
| } |
| } |
| |
| if (in_bounds) |
| wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub); |
| else |
| r.set_varying (type); |
| } |
| |
| bool |
| operator_lshift::wi_op_overflows (wide_int &res, tree type, |
| const wide_int &w0, const wide_int &w1) const |
| { |
| signop sign = TYPE_SIGN (type); |
| if (wi::neg_p (w1)) |
| { |
| // It's unclear from the C standard whether shifts can overflow. |
| // The following code ignores overflow; perhaps a C standard |
| // interpretation ruling is needed. |
| res = wi::rshift (w0, -w1, sign); |
| } |
| else |
| res = wi::lshift (w0, w1); |
| return false; |
| } |
| |
| bool |
| operator_lshift::op1_range (irange &r, |
| tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| |
| if (!contains_zero_p (lhs)) |
| r.set_nonzero (type); |
| else |
| r.set_varying (type); |
| |
| wide_int shift; |
| if (op2.singleton_p (shift)) |
| { |
| if (wi::lt_p (shift, 0, SIGNED)) |
| return false; |
| if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type), |
| TYPE_PRECISION (op2.type ())), |
| UNSIGNED)) |
| return false; |
| if (shift == 0) |
| { |
| r.intersect (lhs); |
| return true; |
| } |
| |
| // Work completely in unsigned mode to start. |
| tree utype = type; |
| int_range_max tmp_range; |
| if (TYPE_SIGN (type) == SIGNED) |
| { |
| int_range_max tmp = lhs; |
| utype = unsigned_type_for (type); |
| range_cast (tmp, utype); |
| op_rshift.fold_range (tmp_range, utype, tmp, op2); |
| } |
| else |
| op_rshift.fold_range (tmp_range, utype, lhs, op2); |
| |
| // Start with ranges which can produce the LHS by right shifting the |
| // result by the shift amount. |
| // ie [0x08, 0xF0] = op1 << 2 will start with |
| // [00001000, 11110000] = op1 << 2 |
| // [0x02, 0x4C] aka [00000010, 00111100] |
| |
| // Then create a range from the LB with the least significant upper bit |
| // set, to the upper bound with all the bits set. |
| // This would be [0x42, 0xFC] aka [01000010, 11111100]. |
| |
| // Ideally we do this for each subrange, but just lump them all for now. |
| unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi (); |
| wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype)); |
| wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ()); |
| wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits); |
| int_range<2> fill_range (utype, new_lb, new_ub); |
| tmp_range.union_ (fill_range); |
| |
| if (utype != type) |
| range_cast (tmp_range, type); |
| |
| r.intersect (tmp_range); |
| return true; |
| } |
| |
| return !r.varying_p (); |
| } |
| |
| bool |
| operator_rshift::op1_range (irange &r, |
| tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| wide_int shift; |
| if (op2.singleton_p (shift)) |
| { |
| // Ignore nonsensical shifts. |
| unsigned prec = TYPE_PRECISION (type); |
| if (wi::ge_p (shift, |
| wi::uhwi (prec, TYPE_PRECISION (op2.type ())), |
| UNSIGNED)) |
| return false; |
| if (shift == 0) |
| { |
| r = lhs; |
| return true; |
| } |
| |
| // Folding the original operation may discard some impossible |
| // ranges from the LHS. |
| int_range_max lhs_refined; |
| op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2); |
| lhs_refined.intersect (lhs); |
| if (lhs_refined.undefined_p ()) |
| { |
| r.set_undefined (); |
| return true; |
| } |
| int_range_max shift_range (op2.type (), shift, shift); |
| int_range_max lb, ub; |
| op_lshift.fold_range (lb, type, lhs_refined, shift_range); |
| // LHS |
| // 0000 0111 = OP1 >> 3 |
| // |
| // OP1 is anything from 0011 1000 to 0011 1111. That is, a |
| // range from LHS<<3 plus a mask of the 3 bits we shifted on the |
| // right hand side (0x07). |
| wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift)); |
| int_range_max mask_range (type, |
| wi::zero (TYPE_PRECISION (type)), |
| mask); |
| op_plus.fold_range (ub, type, lb, mask_range); |
| r = lb; |
| r.union_ (ub); |
| if (!contains_zero_p (lhs_refined)) |
| { |
| mask_range.invert (); |
| r.intersect (mask_range); |
| } |
| return true; |
| } |
| return false; |
| } |
| |
| bool |
| operator_rshift::wi_op_overflows (wide_int &res, |
| tree type, |
| const wide_int &w0, |
| const wide_int &w1) const |
| { |
| signop sign = TYPE_SIGN (type); |
| if (wi::neg_p (w1)) |
| res = wi::lshift (w0, -w1); |
| else |
| { |
| // It's unclear from the C standard whether shifts can overflow. |
| // The following code ignores overflow; perhaps a C standard |
| // interpretation ruling is needed. |
| res = wi::rshift (w0, w1, sign); |
| } |
| return false; |
| } |
| |
| bool |
| operator_rshift::fold_range (irange &r, tree type, |
| const irange &op1, |
| const irange &op2, |
| relation_trio rel) const |
| { |
| int_range_max shift; |
| if (!get_shift_range (shift, type, op2)) |
| { |
| if (op2.undefined_p ()) |
| r.set_undefined (); |
| else |
| r.set_zero (type); |
| return true; |
| } |
| |
| return range_operator::fold_range (r, type, op1, shift, rel); |
| } |
| |
| void |
| operator_rshift::wi_fold (irange &r, tree type, |
| const wide_int &lh_lb, const wide_int &lh_ub, |
| const wide_int &rh_lb, const wide_int &rh_ub) const |
| { |
| wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub); |
| } |
| |
| |
| // Add a partial equivalence between the LHS and op1 for casts. |
| |
| relation_kind |
| operator_cast::lhs_op1_relation (const irange &lhs, |
| const irange &op1, |
| const irange &op2 ATTRIBUTE_UNUSED, |
| relation_kind) const |
| { |
| if (lhs.undefined_p () || op1.undefined_p ()) |
| return VREL_VARYING; |
| unsigned lhs_prec = TYPE_PRECISION (lhs.type ()); |
| unsigned op1_prec = TYPE_PRECISION (op1.type ()); |
| // If the result gets sign extended into a larger type check first if this |
| // qualifies as a partial equivalence. |
| if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec) |
| { |
| // If the result is sign extended, and the LHS is larger than op1, |
| // check if op1's range can be negative as the sign extension will |
| // cause the upper bits to be 1 instead of 0, invalidating the PE. |
| int_range<3> negs = range_negatives (op1.type ()); |
| negs.intersect (op1); |
| if (!negs.undefined_p ()) |
| return VREL_VARYING; |
| } |
| |
| unsigned prec = MIN (lhs_prec, op1_prec); |
| return bits_to_pe (prec); |
| } |
| |
| // Return TRUE if casting from INNER to OUTER is a truncating cast. |
| |
| inline bool |
| operator_cast::truncating_cast_p (const irange &inner, |
| const irange &outer) const |
| { |
| return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ()); |
| } |
| |
| // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type. |
| |
| bool |
| operator_cast::inside_domain_p (const wide_int &min, |
| const wide_int &max, |
| const irange &range) const |
| { |
| wide_int domain_min = irange_val_min (range.type ()); |
| wide_int domain_max = irange_val_max (range.type ()); |
| signop domain_sign = TYPE_SIGN (range.type ()); |
| return (wi::le_p (min, domain_max, domain_sign) |
| && wi::le_p (max, domain_max, domain_sign) |
| && wi::ge_p (min, domain_min, domain_sign) |
| && wi::ge_p (max, domain_min, domain_sign)); |
| } |
| |
| |
| // Helper for fold_range which work on a pair at a time. |
| |
| void |
| operator_cast::fold_pair (irange &r, unsigned index, |
| const irange &inner, |
| const irange &outer) const |
| { |
| tree inner_type = inner.type (); |
| tree outer_type = outer.type (); |
| signop inner_sign = TYPE_SIGN (inner_type); |
| unsigned outer_prec = TYPE_PRECISION (outer_type); |
| |
| // check to see if casting from INNER to OUTER is a conversion that |
| // fits in the resulting OUTER type. |
| wide_int inner_lb = inner.lower_bound (index); |
| wide_int inner_ub = inner.upper_bound (index); |
| if (truncating_cast_p (inner, outer)) |
| { |
| // We may be able to accommodate a truncating cast if the |
| // resulting range can be represented in the target type... |
| if (wi::rshift (wi::sub (inner_ub, inner_lb), |
| wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())), |
| inner_sign) != 0) |
| { |
| r.set_varying (outer_type); |
| return; |
| } |
| } |
| // ...but we must still verify that the final range fits in the |
| // domain. This catches -fstrict-enum restrictions where the domain |
| // range is smaller than what fits in the underlying type. |
| wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign); |
| wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign); |
| if (inside_domain_p (min, max, outer)) |
| create_possibly_reversed_range (r, outer_type, min, max); |
| else |
| r.set_varying (outer_type); |
| } |
| |
| |
| bool |
| operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED, |
| const irange &inner, |
| const irange &outer, |
| relation_trio) const |
| { |
| if (empty_range_varying (r, type, inner, outer)) |
| return true; |
| |
| gcc_checking_assert (outer.varying_p ()); |
| gcc_checking_assert (inner.num_pairs () > 0); |
| |
| // Avoid a temporary by folding the first pair directly into the result. |
| fold_pair (r, 0, inner, outer); |
| |
| // Then process any additional pairs by unioning with their results. |
| for (unsigned x = 1; x < inner.num_pairs (); ++x) |
| { |
| int_range_max tmp; |
| fold_pair (tmp, x, inner, outer); |
| r.union_ (tmp); |
| if (r.varying_p ()) |
| return true; |
| } |
| |
| update_bitmask (r, inner, outer); |
| return true; |
| } |
| |
| void |
| operator_cast::update_bitmask (irange &r, const irange &lh, |
| const irange &rh) const |
| { |
| update_known_bitmask (r, CONVERT_EXPR, lh, rh); |
| } |
| |
| bool |
| operator_cast::op1_range (irange &r, tree type, |
| const irange &lhs, |
| const irange &op2, |
| relation_trio) const |
| { |
| if (lhs.undefined_p ()) |
| return false; |
| tree lhs_type = lhs.type (); |
| gcc_checking_assert (types_compatible_p (op2.type(), type)); |
| |
| // If we are calculating a pointer, shortcut to what we really care about. |
| if (POINTER_TYPE_P (type)) |
| { |
| // Conversion from other pointers or a constant (including 0/NULL) |
| // are straightforward. |
| if (POINTER_TYPE_P (lhs.type ()) |
| || (lhs.singleton_p () |
| && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type))) |
| { |
| r = lhs; |
| range_cast (r, type); |
| } |
| else |
| { |
| // If the LHS is not a pointer nor a singleton, then it is |
| // either VARYING or non-zero. |
| if (!lhs.undefined_p () && !contains_zero_p (lhs)) |
| r.set_nonzero (type); |
| else |
| r.set_varying (type); |
| } |
| r.intersect (op2); |
| return true; |
| } |
| |
| if (truncating_cast_p (op2, lhs)) |
| { |
| if (lhs.varying_p ()) |
| r.set_varying (type); |
| else |
| { |
| // We want to insert the LHS as an unsigned value since it |
| // would not trigger the signed bit of the larger type. |
| int_range_max converted_lhs = lhs; |
| range_cast (converted_lhs, unsigned_type_for (lhs_type)); |
| range_cast (converted_lhs, type); |
| // Start by building the positive signed outer range for the type. |
| wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type), |
| TYPE_PRECISION (type)); |
| create_possibly_reversed_range (r, type, lim, |
| wi::max_value (TYPE_PRECISION (type), |
| SIGNED)); |
| // For the signed part, we need to simply union the 2 ranges now. |
| r.union_ (converted_lhs); |
| |
| // Create maximal negative number outside of LHS bits. |
| lim = wi::mask (TYPE_PRECISION (lhs_type), true, |
| TYPE_PRECISION (type)); |
| // Add this to the unsigned LHS range(s). |
| int_range_max lim_range (type, lim, lim); |
| int_range_max lhs_neg; |
| range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type, |
| converted_lhs, lim_range); |
| // lhs_neg now has all the negative versions of the LHS. |
| // Now union in all the values from SIGNED MIN (0x80000) to |
| // lim-1 in order to fill in all the ranges with the upper |
| // bits set. |
| |
| // PR 97317. If the lhs has only 1 bit less precision than the rhs, |
| // we don't need to create a range from min to lim-1 |
| // calculate neg range traps trying to create [lim, lim - 1]. |
| wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED); |
| if (lim != min_val) |
| { |
| int_range_max neg (type, |
| wi::min_value (TYPE_PRECISION (type), |
| SIGNED), |
| lim - 1); |
| lhs_neg.union_ (neg); |
| } |
| // And finally, munge the signed and unsigned portions. |
| r.union_ (lhs_neg); |
| } |
| // And intersect with any known value passed in the extra operand. |
| r.intersect (op2); |
| return true; |
| } |
| |
| int_range_max tmp; |
| if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type)) |
| tmp = lhs; |
| else |
| { |
| // The cast is not truncating, and the range is restricted to |
| |