| /* Fold a constant sub-tree into a single node for C-compiler |
| Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, |
| 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 2, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING. If not, write to the Free |
| Software Foundation, 59 Temple Place - Suite 330, Boston, MA |
| 02111-1307, USA. */ |
| |
| /*@@ This file should be rewritten to use an arbitrary precision |
| @@ representation for "struct tree_int_cst" and "struct tree_real_cst". |
| @@ Perhaps the routines could also be used for bc/dc, and made a lib. |
| @@ The routines that translate from the ap rep should |
| @@ warn if precision et. al. is lost. |
| @@ This would also make life easier when this technology is used |
| @@ for cross-compilers. */ |
| |
| /* The entry points in this file are fold, size_int_wide, size_binop |
| and force_fit_type. |
| |
| fold takes a tree as argument and returns a simplified tree. |
| |
| size_binop takes a tree code for an arithmetic operation |
| and two operands that are trees, and produces a tree for the |
| result, assuming the type comes from `sizetype'. |
| |
| size_int takes an integer value, and creates a tree constant |
| with type from `sizetype'. |
| |
| force_fit_type takes a constant and prior overflow indicator, and |
| forces the value to fit the type. It returns an overflow indicator. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "flags.h" |
| #include "tree.h" |
| #include "real.h" |
| #include "rtl.h" |
| #include "expr.h" |
| #include "tm_p.h" |
| #include "toplev.h" |
| #include "ggc.h" |
| #include "hashtab.h" |
| #include "langhooks.h" |
| #include "md5.h" |
| |
| static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT); |
| static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); |
| static bool negate_mathfn_p (enum built_in_function); |
| static bool negate_expr_p (tree); |
| static tree negate_expr (tree); |
| static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int); |
| static tree associate_trees (tree, tree, enum tree_code, tree); |
| static tree int_const_binop (enum tree_code, tree, tree, int); |
| static tree const_binop (enum tree_code, tree, tree, int); |
| static hashval_t size_htab_hash (const void *); |
| static int size_htab_eq (const void *, const void *); |
| static tree fold_convert_const (enum tree_code, tree, tree); |
| static tree fold_convert (tree, tree); |
| static enum tree_code invert_tree_comparison (enum tree_code); |
| static enum tree_code swap_tree_comparison (enum tree_code); |
| static int comparison_to_compcode (enum tree_code); |
| static enum tree_code compcode_to_comparison (int); |
| static int truth_value_p (enum tree_code); |
| static int operand_equal_for_comparison_p (tree, tree, tree); |
| static int twoval_comparison_p (tree, tree *, tree *, int *); |
| static tree eval_subst (tree, tree, tree, tree, tree); |
| static tree pedantic_omit_one_operand (tree, tree, tree); |
| static tree distribute_bit_expr (enum tree_code, tree, tree, tree); |
| static tree make_bit_field_ref (tree, tree, int, int, int); |
| static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree); |
| static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *, |
| enum machine_mode *, int *, int *, |
| tree *, tree *); |
| static int all_ones_mask_p (tree, int); |
| static tree sign_bit_p (tree, tree); |
| static int simple_operand_p (tree); |
| static tree range_binop (enum tree_code, tree, tree, int, tree, int); |
| static tree make_range (tree, int *, tree *, tree *); |
| static tree build_range_check (tree, tree, int, tree, tree); |
| static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree, |
| tree); |
| static tree fold_range_test (tree); |
| static tree unextend (tree, int, int, tree); |
| static tree fold_truthop (enum tree_code, tree, tree, tree); |
| static tree optimize_minmax_comparison (tree); |
| static tree extract_muldiv (tree, tree, enum tree_code, tree); |
| static tree extract_muldiv_1 (tree, tree, enum tree_code, tree); |
| static tree strip_compound_expr (tree, tree); |
| static int multiple_of_p (tree, tree, tree); |
| static tree constant_boolean_node (int, tree); |
| static int count_cond (tree, int); |
| static tree fold_binary_op_with_conditional_arg (enum tree_code, tree, tree, |
| tree, int); |
| static bool fold_real_zero_addition_p (tree, tree, int); |
| static tree fold_mathfn_compare (enum built_in_function, enum tree_code, |
| tree, tree, tree); |
| static tree fold_inf_compare (enum tree_code, tree, tree, tree); |
| static bool reorder_operands_p (tree, tree); |
| static bool tree_swap_operands_p (tree, tree, bool); |
| |
| /* The following constants represent a bit based encoding of GCC's |
| comparison operators. This encoding simplifies transformations |
| on relational comparison operators, such as AND and OR. */ |
| #define COMPCODE_FALSE 0 |
| #define COMPCODE_LT 1 |
| #define COMPCODE_EQ 2 |
| #define COMPCODE_LE 3 |
| #define COMPCODE_GT 4 |
| #define COMPCODE_NE 5 |
| #define COMPCODE_GE 6 |
| #define COMPCODE_TRUE 7 |
| |
| /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring |
| overflow. Suppose A, B and SUM have the same respective signs as A1, B1, |
| and SUM1. Then this yields nonzero if overflow occurred during the |
| addition. |
| |
| Overflow occurs if A and B have the same sign, but A and SUM differ in |
| sign. Use `^' to test whether signs differ, and `< 0' to isolate the |
| sign. */ |
| #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0) |
| |
| /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic. |
| We do that by representing the two-word integer in 4 words, with only |
| HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive |
| number. The value of the word is LOWPART + HIGHPART * BASE. */ |
| |
| #define LOWPART(x) \ |
| ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1)) |
| #define HIGHPART(x) \ |
| ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2) |
| #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2) |
| |
| /* Unpack a two-word integer into 4 words. |
| LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. |
| WORDS points to the array of HOST_WIDE_INTs. */ |
| |
| static void |
| encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) |
| { |
| words[0] = LOWPART (low); |
| words[1] = HIGHPART (low); |
| words[2] = LOWPART (hi); |
| words[3] = HIGHPART (hi); |
| } |
| |
| /* Pack an array of 4 words into a two-word integer. |
| WORDS points to the array of words. |
| The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */ |
| |
| static void |
| decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low, |
| HOST_WIDE_INT *hi) |
| { |
| *low = words[0] + words[1] * BASE; |
| *hi = words[2] + words[3] * BASE; |
| } |
| |
| /* Make the integer constant T valid for its type by setting to 0 or 1 all |
| the bits in the constant that don't belong in the type. |
| |
| Return 1 if a signed overflow occurs, 0 otherwise. If OVERFLOW is |
| nonzero, a signed overflow has already occurred in calculating T, so |
| propagate it. */ |
| |
| int |
| force_fit_type (tree t, int overflow) |
| { |
| unsigned HOST_WIDE_INT low; |
| HOST_WIDE_INT high; |
| unsigned int prec; |
| |
| if (TREE_CODE (t) == REAL_CST) |
| { |
| /* ??? Used to check for overflow here via CHECK_FLOAT_TYPE. |
| Consider doing it via real_convert now. */ |
| return overflow; |
| } |
| |
| else if (TREE_CODE (t) != INTEGER_CST) |
| return overflow; |
| |
| low = TREE_INT_CST_LOW (t); |
| high = TREE_INT_CST_HIGH (t); |
| |
| if (POINTER_TYPE_P (TREE_TYPE (t)) |
| || TREE_CODE (TREE_TYPE (t)) == OFFSET_TYPE) |
| prec = POINTER_SIZE; |
| else |
| prec = TYPE_PRECISION (TREE_TYPE (t)); |
| |
| /* First clear all bits that are beyond the type's precision. */ |
| |
| if (prec == 2 * HOST_BITS_PER_WIDE_INT) |
| ; |
| else if (prec > HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_HIGH (t) |
| &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); |
| else |
| { |
| TREE_INT_CST_HIGH (t) = 0; |
| if (prec < HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_LOW (t) &= ~((unsigned HOST_WIDE_INT) (-1) << prec); |
| } |
| |
| /* Unsigned types do not suffer sign extension or overflow unless they |
| are a sizetype. */ |
| if (TREE_UNSIGNED (TREE_TYPE (t)) |
| && ! (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE |
| && TYPE_IS_SIZETYPE (TREE_TYPE (t)))) |
| return overflow; |
| |
| /* If the value's sign bit is set, extend the sign. */ |
| if (prec != 2 * HOST_BITS_PER_WIDE_INT |
| && (prec > HOST_BITS_PER_WIDE_INT |
| ? 0 != (TREE_INT_CST_HIGH (t) |
| & ((HOST_WIDE_INT) 1 |
| << (prec - HOST_BITS_PER_WIDE_INT - 1))) |
| : 0 != (TREE_INT_CST_LOW (t) |
| & ((unsigned HOST_WIDE_INT) 1 << (prec - 1))))) |
| { |
| /* Value is negative: |
| set to 1 all the bits that are outside this type's precision. */ |
| if (prec > HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_HIGH (t) |
| |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); |
| else |
| { |
| TREE_INT_CST_HIGH (t) = -1; |
| if (prec < HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_LOW (t) |= ((unsigned HOST_WIDE_INT) (-1) << prec); |
| } |
| } |
| |
| /* Return nonzero if signed overflow occurred. */ |
| return |
| ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t))) |
| != 0); |
| } |
| |
| /* Add two doubleword integers with doubleword result. |
| Each argument is given as two `HOST_WIDE_INT' pieces. |
| One argument is L1 and H1; the other, L2 and H2. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) |
| { |
| unsigned HOST_WIDE_INT l; |
| HOST_WIDE_INT h; |
| |
| l = l1 + l2; |
| h = h1 + h2 + (l < l1); |
| |
| *lv = l; |
| *hv = h; |
| return OVERFLOW_SUM_SIGN (h1, h2, h); |
| } |
| |
| /* Negate a doubleword integer with doubleword result. |
| Return nonzero if the operation overflows, assuming it's signed. |
| The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) |
| { |
| if (l1 == 0) |
| { |
| *lv = 0; |
| *hv = - h1; |
| return (*hv & h1) < 0; |
| } |
| else |
| { |
| *lv = -l1; |
| *hv = ~h1; |
| return 0; |
| } |
| } |
| |
| /* Multiply two doubleword integers with doubleword result. |
| Return nonzero if the operation overflows, assuming it's signed. |
| Each argument is given as two `HOST_WIDE_INT' pieces. |
| One argument is L1 and H1; the other, L2 and H2. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) |
| { |
| HOST_WIDE_INT arg1[4]; |
| HOST_WIDE_INT arg2[4]; |
| HOST_WIDE_INT prod[4 * 2]; |
| unsigned HOST_WIDE_INT carry; |
| int i, j, k; |
| unsigned HOST_WIDE_INT toplow, neglow; |
| HOST_WIDE_INT tophigh, neghigh; |
| |
| encode (arg1, l1, h1); |
| encode (arg2, l2, h2); |
| |
| memset (prod, 0, sizeof prod); |
| |
| for (i = 0; i < 4; i++) |
| { |
| carry = 0; |
| for (j = 0; j < 4; j++) |
| { |
| k = i + j; |
| /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */ |
| carry += arg1[i] * arg2[j]; |
| /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */ |
| carry += prod[k]; |
| prod[k] = LOWPART (carry); |
| carry = HIGHPART (carry); |
| } |
| prod[i + 4] = carry; |
| } |
| |
| decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */ |
| |
| /* Check for overflow by calculating the top half of the answer in full; |
| it should agree with the low half's sign bit. */ |
| decode (prod + 4, &toplow, &tophigh); |
| if (h1 < 0) |
| { |
| neg_double (l2, h2, &neglow, &neghigh); |
| add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); |
| } |
| if (h2 < 0) |
| { |
| neg_double (l1, h1, &neglow, &neghigh); |
| add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); |
| } |
| return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0; |
| } |
| |
| /* Shift the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. |
| Shift right if COUNT is negative. |
| ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| HOST_WIDE_INT count, unsigned int prec, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith) |
| { |
| unsigned HOST_WIDE_INT signmask; |
| |
| if (count < 0) |
| { |
| rshift_double (l1, h1, -count, prec, lv, hv, arith); |
| return; |
| } |
| |
| #ifdef SHIFT_COUNT_TRUNCATED |
| if (SHIFT_COUNT_TRUNCATED) |
| count %= prec; |
| #endif |
| |
| if (count >= 2 * HOST_BITS_PER_WIDE_INT) |
| { |
| /* Shifting by the host word size is undefined according to the |
| ANSI standard, so we must handle this as a special case. */ |
| *hv = 0; |
| *lv = 0; |
| } |
| else if (count >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv = l1 << (count - HOST_BITS_PER_WIDE_INT); |
| *lv = 0; |
| } |
| else |
| { |
| *hv = (((unsigned HOST_WIDE_INT) h1 << count) |
| | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1)); |
| *lv = l1 << count; |
| } |
| |
| /* Sign extend all bits that are beyond the precision. */ |
| |
| signmask = -((prec > HOST_BITS_PER_WIDE_INT |
| ? ((unsigned HOST_WIDE_INT) *hv |
| >> (prec - HOST_BITS_PER_WIDE_INT - 1)) |
| : (*lv >> (prec - 1))) & 1); |
| |
| if (prec >= 2 * HOST_BITS_PER_WIDE_INT) |
| ; |
| else if (prec >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); |
| *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT); |
| } |
| else |
| { |
| *hv = signmask; |
| *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec); |
| *lv |= signmask << prec; |
| } |
| } |
| |
| /* Shift the doubleword integer in L1, H1 right by COUNT places |
| keeping only PREC bits of result. COUNT must be positive. |
| ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| HOST_WIDE_INT count, unsigned int prec, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, |
| int arith) |
| { |
| unsigned HOST_WIDE_INT signmask; |
| |
| signmask = (arith |
| ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) |
| : 0); |
| |
| #ifdef SHIFT_COUNT_TRUNCATED |
| if (SHIFT_COUNT_TRUNCATED) |
| count %= prec; |
| #endif |
| |
| if (count >= 2 * HOST_BITS_PER_WIDE_INT) |
| { |
| /* Shifting by the host word size is undefined according to the |
| ANSI standard, so we must handle this as a special case. */ |
| *hv = 0; |
| *lv = 0; |
| } |
| else if (count >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv = 0; |
| *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT); |
| } |
| else |
| { |
| *hv = (unsigned HOST_WIDE_INT) h1 >> count; |
| *lv = ((l1 >> count) |
| | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1)); |
| } |
| |
| /* Zero / sign extend all bits that are beyond the precision. */ |
| |
| if (count >= (HOST_WIDE_INT)prec) |
| { |
| *hv = signmask; |
| *lv = signmask; |
| } |
| else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT) |
| ; |
| else if ((prec - count) >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT)); |
| *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT); |
| } |
| else |
| { |
| *hv = signmask; |
| *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count)); |
| *lv |= signmask << (prec - count); |
| } |
| } |
| |
| /* Rotate the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. |
| Rotate right if COUNT is negative. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| HOST_WIDE_INT count, unsigned int prec, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) |
| { |
| unsigned HOST_WIDE_INT s1l, s2l; |
| HOST_WIDE_INT s1h, s2h; |
| |
| count %= prec; |
| if (count < 0) |
| count += prec; |
| |
| lshift_double (l1, h1, count, prec, &s1l, &s1h, 0); |
| rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); |
| *lv = s1l | s2l; |
| *hv = s1h | s2h; |
| } |
| |
| /* Rotate the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. COUNT must be positive. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, |
| HOST_WIDE_INT count, unsigned int prec, |
| unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) |
| { |
| unsigned HOST_WIDE_INT s1l, s2l; |
| HOST_WIDE_INT s1h, s2h; |
| |
| count %= prec; |
| if (count < 0) |
| count += prec; |
| |
| rshift_double (l1, h1, count, prec, &s1l, &s1h, 0); |
| lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); |
| *lv = s1l | s2l; |
| *hv = s1h | s2h; |
| } |
| |
| /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN |
| for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). |
| CODE is a tree code for a kind of division, one of |
| TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR |
| or EXACT_DIV_EXPR |
| It controls how the quotient is rounded to an integer. |
| Return nonzero if the operation overflows. |
| UNS nonzero says do unsigned division. */ |
| |
| int |
| div_and_round_double (enum tree_code code, int uns, |
| unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */ |
| HOST_WIDE_INT hnum_orig, |
| unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */ |
| HOST_WIDE_INT hden_orig, |
| unsigned HOST_WIDE_INT *lquo, |
| HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem, |
| HOST_WIDE_INT *hrem) |
| { |
| int quo_neg = 0; |
| HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */ |
| HOST_WIDE_INT den[4], quo[4]; |
| int i, j; |
| unsigned HOST_WIDE_INT work; |
| unsigned HOST_WIDE_INT carry = 0; |
| unsigned HOST_WIDE_INT lnum = lnum_orig; |
| HOST_WIDE_INT hnum = hnum_orig; |
| unsigned HOST_WIDE_INT lden = lden_orig; |
| HOST_WIDE_INT hden = hden_orig; |
| int overflow = 0; |
| |
| if (hden == 0 && lden == 0) |
| overflow = 1, lden = 1; |
| |
| /* Calculate quotient sign and convert operands to unsigned. */ |
| if (!uns) |
| { |
| if (hnum < 0) |
| { |
| quo_neg = ~ quo_neg; |
| /* (minimum integer) / (-1) is the only overflow case. */ |
| if (neg_double (lnum, hnum, &lnum, &hnum) |
| && ((HOST_WIDE_INT) lden & hden) == -1) |
| overflow = 1; |
| } |
| if (hden < 0) |
| { |
| quo_neg = ~ quo_neg; |
| neg_double (lden, hden, &lden, &hden); |
| } |
| } |
| |
| if (hnum == 0 && hden == 0) |
| { /* single precision */ |
| *hquo = *hrem = 0; |
| /* This unsigned division rounds toward zero. */ |
| *lquo = lnum / lden; |
| goto finish_up; |
| } |
| |
| if (hnum == 0) |
| { /* trivial case: dividend < divisor */ |
| /* hden != 0 already checked. */ |
| *hquo = *lquo = 0; |
| *hrem = hnum; |
| *lrem = lnum; |
| goto finish_up; |
| } |
| |
| memset (quo, 0, sizeof quo); |
| |
| memset (num, 0, sizeof num); /* to zero 9th element */ |
| memset (den, 0, sizeof den); |
| |
| encode (num, lnum, hnum); |
| encode (den, lden, hden); |
| |
| /* Special code for when the divisor < BASE. */ |
| if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE) |
| { |
| /* hnum != 0 already checked. */ |
| for (i = 4 - 1; i >= 0; i--) |
| { |
| work = num[i] + carry * BASE; |
| quo[i] = work / lden; |
| carry = work % lden; |
| } |
| } |
| else |
| { |
| /* Full double precision division, |
| with thanks to Don Knuth's "Seminumerical Algorithms". */ |
| int num_hi_sig, den_hi_sig; |
| unsigned HOST_WIDE_INT quo_est, scale; |
| |
| /* Find the highest nonzero divisor digit. */ |
| for (i = 4 - 1;; i--) |
| if (den[i] != 0) |
| { |
| den_hi_sig = i; |
| break; |
| } |
| |
| /* Insure that the first digit of the divisor is at least BASE/2. |
| This is required by the quotient digit estimation algorithm. */ |
| |
| scale = BASE / (den[den_hi_sig] + 1); |
| if (scale > 1) |
| { /* scale divisor and dividend */ |
| carry = 0; |
| for (i = 0; i <= 4 - 1; i++) |
| { |
| work = (num[i] * scale) + carry; |
| num[i] = LOWPART (work); |
| carry = HIGHPART (work); |
| } |
| |
| num[4] = carry; |
| carry = 0; |
| for (i = 0; i <= 4 - 1; i++) |
| { |
| work = (den[i] * scale) + carry; |
| den[i] = LOWPART (work); |
| carry = HIGHPART (work); |
| if (den[i] != 0) den_hi_sig = i; |
| } |
| } |
| |
| num_hi_sig = 4; |
| |
| /* Main loop */ |
| for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) |
| { |
| /* Guess the next quotient digit, quo_est, by dividing the first |
| two remaining dividend digits by the high order quotient digit. |
| quo_est is never low and is at most 2 high. */ |
| unsigned HOST_WIDE_INT tmp; |
| |
| num_hi_sig = i + den_hi_sig + 1; |
| work = num[num_hi_sig] * BASE + num[num_hi_sig - 1]; |
| if (num[num_hi_sig] != den[den_hi_sig]) |
| quo_est = work / den[den_hi_sig]; |
| else |
| quo_est = BASE - 1; |
| |
| /* Refine quo_est so it's usually correct, and at most one high. */ |
| tmp = work - quo_est * den[den_hi_sig]; |
| if (tmp < BASE |
| && (den[den_hi_sig - 1] * quo_est |
| > (tmp * BASE + num[num_hi_sig - 2]))) |
| quo_est--; |
| |
| /* Try QUO_EST as the quotient digit, by multiplying the |
| divisor by QUO_EST and subtracting from the remaining dividend. |
| Keep in mind that QUO_EST is the I - 1st digit. */ |
| |
| carry = 0; |
| for (j = 0; j <= den_hi_sig; j++) |
| { |
| work = quo_est * den[j] + carry; |
| carry = HIGHPART (work); |
| work = num[i + j] - LOWPART (work); |
| num[i + j] = LOWPART (work); |
| carry += HIGHPART (work) != 0; |
| } |
| |
| /* If quo_est was high by one, then num[i] went negative and |
| we need to correct things. */ |
| if (num[num_hi_sig] < (HOST_WIDE_INT) carry) |
| { |
| quo_est--; |
| carry = 0; /* add divisor back in */ |
| for (j = 0; j <= den_hi_sig; j++) |
| { |
| work = num[i + j] + den[j] + carry; |
| carry = HIGHPART (work); |
| num[i + j] = LOWPART (work); |
| } |
| |
| num [num_hi_sig] += carry; |
| } |
| |
| /* Store the quotient digit. */ |
| quo[i] = quo_est; |
| } |
| } |
| |
| decode (quo, lquo, hquo); |
| |
| finish_up: |
| /* If result is negative, make it so. */ |
| if (quo_neg) |
| neg_double (*lquo, *hquo, lquo, hquo); |
| |
| /* compute trial remainder: rem = num - (quo * den) */ |
| mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); |
| neg_double (*lrem, *hrem, lrem, hrem); |
| add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); |
| |
| switch (code) |
| { |
| case TRUNC_DIV_EXPR: |
| case TRUNC_MOD_EXPR: /* round toward zero */ |
| case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */ |
| return overflow; |
| |
| case FLOOR_DIV_EXPR: |
| case FLOOR_MOD_EXPR: /* round toward negative infinity */ |
| if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */ |
| { |
| /* quo = quo - 1; */ |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, |
| lquo, hquo); |
| } |
| else |
| return overflow; |
| break; |
| |
| case CEIL_DIV_EXPR: |
| case CEIL_MOD_EXPR: /* round toward positive infinity */ |
| if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */ |
| { |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, |
| lquo, hquo); |
| } |
| else |
| return overflow; |
| break; |
| |
| case ROUND_DIV_EXPR: |
| case ROUND_MOD_EXPR: /* round to closest integer */ |
| { |
| unsigned HOST_WIDE_INT labs_rem = *lrem; |
| HOST_WIDE_INT habs_rem = *hrem; |
| unsigned HOST_WIDE_INT labs_den = lden, ltwice; |
| HOST_WIDE_INT habs_den = hden, htwice; |
| |
| /* Get absolute values. */ |
| if (*hrem < 0) |
| neg_double (*lrem, *hrem, &labs_rem, &habs_rem); |
| if (hden < 0) |
| neg_double (lden, hden, &labs_den, &habs_den); |
| |
| /* If (2 * abs (lrem) >= abs (lden)) */ |
| mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0, |
| labs_rem, habs_rem, <wice, &htwice); |
| |
| if (((unsigned HOST_WIDE_INT) habs_den |
| < (unsigned HOST_WIDE_INT) htwice) |
| || (((unsigned HOST_WIDE_INT) habs_den |
| == (unsigned HOST_WIDE_INT) htwice) |
| && (labs_den < ltwice))) |
| { |
| if (*hquo < 0) |
| /* quo = quo - 1; */ |
| add_double (*lquo, *hquo, |
| (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); |
| else |
| /* quo = quo + 1; */ |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, |
| lquo, hquo); |
| } |
| else |
| return overflow; |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| /* Compute true remainder: rem = num - (quo * den) */ |
| mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); |
| neg_double (*lrem, *hrem, lrem, hrem); |
| add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); |
| return overflow; |
| } |
| |
| /* Return true if built-in mathematical function specified by CODE |
| preserves the sign of it argument, i.e. -f(x) == f(-x). */ |
| |
| static bool |
| negate_mathfn_p (enum built_in_function code) |
| { |
| switch (code) |
| { |
| case BUILT_IN_ASIN: |
| case BUILT_IN_ASINF: |
| case BUILT_IN_ASINL: |
| case BUILT_IN_ATAN: |
| case BUILT_IN_ATANF: |
| case BUILT_IN_ATANL: |
| case BUILT_IN_SIN: |
| case BUILT_IN_SINF: |
| case BUILT_IN_SINL: |
| case BUILT_IN_TAN: |
| case BUILT_IN_TANF: |
| case BUILT_IN_TANL: |
| return true; |
| |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| |
| /* Determine whether an expression T can be cheaply negated using |
| the function negate_expr. */ |
| |
| static bool |
| negate_expr_p (tree t) |
| { |
| unsigned HOST_WIDE_INT val; |
| unsigned int prec; |
| tree type; |
| |
| if (t == 0) |
| return false; |
| |
| type = TREE_TYPE (t); |
| |
| STRIP_SIGN_NOPS (t); |
| switch (TREE_CODE (t)) |
| { |
| case INTEGER_CST: |
| if (TREE_UNSIGNED (type) || ! flag_trapv) |
| return true; |
| |
| /* Check that -CST will not overflow type. */ |
| prec = TYPE_PRECISION (type); |
| if (prec > HOST_BITS_PER_WIDE_INT) |
| { |
| if (TREE_INT_CST_LOW (t) != 0) |
| return true; |
| prec -= HOST_BITS_PER_WIDE_INT; |
| val = TREE_INT_CST_HIGH (t); |
| } |
| else |
| val = TREE_INT_CST_LOW (t); |
| if (prec < HOST_BITS_PER_WIDE_INT) |
| val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1; |
| return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1)); |
| |
| case REAL_CST: |
| case NEGATE_EXPR: |
| return true; |
| |
| case COMPLEX_CST: |
| return negate_expr_p (TREE_REALPART (t)) |
| && negate_expr_p (TREE_IMAGPART (t)); |
| |
| case MINUS_EXPR: |
| /* We can't turn -(A-B) into B-A when we honor signed zeros. */ |
| return (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) |
| && reorder_operands_p (TREE_OPERAND (t, 0), |
| TREE_OPERAND (t, 1)); |
| |
| case MULT_EXPR: |
| if (TREE_UNSIGNED (TREE_TYPE (t))) |
| break; |
| |
| /* Fall through. */ |
| |
| case RDIV_EXPR: |
| if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t)))) |
| return negate_expr_p (TREE_OPERAND (t, 1)) |
| || negate_expr_p (TREE_OPERAND (t, 0)); |
| break; |
| |
| case NOP_EXPR: |
| /* Negate -((double)float) as (double)(-float). */ |
| if (TREE_CODE (type) == REAL_TYPE) |
| { |
| tree tem = strip_float_extensions (t); |
| if (tem != t) |
| return negate_expr_p (tem); |
| } |
| break; |
| |
| case CALL_EXPR: |
| /* Negate -f(x) as f(-x). */ |
| if (negate_mathfn_p (builtin_mathfn_code (t))) |
| return negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1))); |
| break; |
| |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| /* Given T, an expression, return the negation of T. Allow for T to be |
| null, in which case return null. */ |
| |
| static tree |
| negate_expr (tree t) |
| { |
| tree type; |
| tree tem; |
| |
| if (t == 0) |
| return 0; |
| |
| type = TREE_TYPE (t); |
| STRIP_SIGN_NOPS (t); |
| |
| switch (TREE_CODE (t)) |
| { |
| case INTEGER_CST: |
| { |
| unsigned HOST_WIDE_INT low; |
| HOST_WIDE_INT high; |
| int overflow = neg_double (TREE_INT_CST_LOW (t), |
| TREE_INT_CST_HIGH (t), |
| &low, &high); |
| tem = build_int_2 (low, high); |
| TREE_TYPE (tem) = type; |
| TREE_OVERFLOW (tem) |
| = (TREE_OVERFLOW (t) |
| | force_fit_type (tem, overflow && !TREE_UNSIGNED (type))); |
| TREE_CONSTANT_OVERFLOW (tem) |
| = TREE_OVERFLOW (tem) | TREE_CONSTANT_OVERFLOW (t); |
| } |
| if (! TREE_OVERFLOW (tem) |
| || TREE_UNSIGNED (type) |
| || ! flag_trapv) |
| return tem; |
| break; |
| |
| case REAL_CST: |
| tem = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (t))); |
| /* Two's complement FP formats, such as c4x, may overflow. */ |
| if (! TREE_OVERFLOW (tem) || ! flag_trapping_math) |
| return fold_convert (type, tem); |
| break; |
| |
| case COMPLEX_CST: |
| { |
| tree rpart = negate_expr (TREE_REALPART (t)); |
| tree ipart = negate_expr (TREE_IMAGPART (t)); |
| |
| if ((TREE_CODE (rpart) == REAL_CST |
| && TREE_CODE (ipart) == REAL_CST) |
| || (TREE_CODE (rpart) == INTEGER_CST |
| && TREE_CODE (ipart) == INTEGER_CST)) |
| return build_complex (type, rpart, ipart); |
| } |
| break; |
| |
| case NEGATE_EXPR: |
| return fold_convert (type, TREE_OPERAND (t, 0)); |
| |
| case MINUS_EXPR: |
| /* - (A - B) -> B - A */ |
| if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) |
| && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) |
| return fold_convert (type, |
| fold (build (MINUS_EXPR, TREE_TYPE (t), |
| TREE_OPERAND (t, 1), |
| TREE_OPERAND (t, 0)))); |
| break; |
| |
| case MULT_EXPR: |
| if (TREE_UNSIGNED (TREE_TYPE (t))) |
| break; |
| |
| /* Fall through. */ |
| |
| case RDIV_EXPR: |
| if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t)))) |
| { |
| tem = TREE_OPERAND (t, 1); |
| if (negate_expr_p (tem)) |
| return fold_convert (type, |
| fold (build (TREE_CODE (t), TREE_TYPE (t), |
| TREE_OPERAND (t, 0), |
| negate_expr (tem)))); |
| tem = TREE_OPERAND (t, 0); |
| if (negate_expr_p (tem)) |
| return fold_convert (type, |
| fold (build (TREE_CODE (t), TREE_TYPE (t), |
| negate_expr (tem), |
| TREE_OPERAND (t, 1)))); |
| } |
| break; |
| |
| case NOP_EXPR: |
| /* Convert -((double)float) into (double)(-float). */ |
| if (TREE_CODE (type) == REAL_TYPE) |
| { |
| tem = strip_float_extensions (t); |
| if (tem != t && negate_expr_p (tem)) |
| return fold_convert (type, negate_expr (tem)); |
| } |
| break; |
| |
| case CALL_EXPR: |
| /* Negate -f(x) as f(-x). */ |
| if (negate_mathfn_p (builtin_mathfn_code (t)) |
| && negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1)))) |
| { |
| tree fndecl, arg, arglist; |
| |
| fndecl = get_callee_fndecl (t); |
| arg = negate_expr (TREE_VALUE (TREE_OPERAND (t, 1))); |
| arglist = build_tree_list (NULL_TREE, arg); |
| return build_function_call_expr (fndecl, arglist); |
| } |
| break; |
| |
| default: |
| break; |
| } |
| |
| tem = fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t)); |
| return fold_convert (type, tem); |
| } |
| |
| /* Split a tree IN into a constant, literal and variable parts that could be |
| combined with CODE to make IN. "constant" means an expression with |
| TREE_CONSTANT but that isn't an actual constant. CODE must be a |
| commutative arithmetic operation. Store the constant part into *CONP, |
| the literal in *LITP and return the variable part. If a part isn't |
| present, set it to null. If the tree does not decompose in this way, |
| return the entire tree as the variable part and the other parts as null. |
| |
| If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that |
| case, we negate an operand that was subtracted. Except if it is a |
| literal for which we use *MINUS_LITP instead. |
| |
| If NEGATE_P is true, we are negating all of IN, again except a literal |
| for which we use *MINUS_LITP instead. |
| |
| If IN is itself a literal or constant, return it as appropriate. |
| |
| Note that we do not guarantee that any of the three values will be the |
| same type as IN, but they will have the same signedness and mode. */ |
| |
| static tree |
| split_tree (tree in, enum tree_code code, tree *conp, tree *litp, |
| tree *minus_litp, int negate_p) |
| { |
| tree var = 0; |
| |
| *conp = 0; |
| *litp = 0; |
| *minus_litp = 0; |
| |
| /* Strip any conversions that don't change the machine mode or signedness. */ |
| STRIP_SIGN_NOPS (in); |
| |
| if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST) |
| *litp = in; |
| else if (TREE_CODE (in) == code |
| || (! FLOAT_TYPE_P (TREE_TYPE (in)) |
| /* We can associate addition and subtraction together (even |
| though the C standard doesn't say so) for integers because |
| the value is not affected. For reals, the value might be |
| affected, so we can't. */ |
| && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR) |
| || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR)))) |
| { |
| tree op0 = TREE_OPERAND (in, 0); |
| tree op1 = TREE_OPERAND (in, 1); |
| int neg1_p = TREE_CODE (in) == MINUS_EXPR; |
| int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0; |
| |
| /* First see if either of the operands is a literal, then a constant. */ |
| if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST) |
| *litp = op0, op0 = 0; |
| else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST) |
| *litp = op1, neg_litp_p = neg1_p, op1 = 0; |
| |
| if (op0 != 0 && TREE_CONSTANT (op0)) |
| *conp = op0, op0 = 0; |
| else if (op1 != 0 && TREE_CONSTANT (op1)) |
| *conp = op1, neg_conp_p = neg1_p, op1 = 0; |
| |
| /* If we haven't dealt with either operand, this is not a case we can |
| decompose. Otherwise, VAR is either of the ones remaining, if any. */ |
| if (op0 != 0 && op1 != 0) |
| var = in; |
| else if (op0 != 0) |
| var = op0; |
| else |
| var = op1, neg_var_p = neg1_p; |
| |
| /* Now do any needed negations. */ |
| if (neg_litp_p) |
| *minus_litp = *litp, *litp = 0; |
| if (neg_conp_p) |
| *conp = negate_expr (*conp); |
| if (neg_var_p) |
| var = negate_expr (var); |
| } |
| else if (TREE_CONSTANT (in)) |
| *conp = in; |
| else |
| var = in; |
| |
| if (negate_p) |
| { |
| if (*litp) |
| *minus_litp = *litp, *litp = 0; |
| else if (*minus_litp) |
| *litp = *minus_litp, *minus_litp = 0; |
| *conp = negate_expr (*conp); |
| var = negate_expr (var); |
| } |
| |
| return var; |
| } |
| |
| /* Re-associate trees split by the above function. T1 and T2 are either |
| expressions to associate or null. Return the new expression, if any. If |
| we build an operation, do it in TYPE and with CODE. */ |
| |
| static tree |
| associate_trees (tree t1, tree t2, enum tree_code code, tree type) |
| { |
| if (t1 == 0) |
| return t2; |
| else if (t2 == 0) |
| return t1; |
| |
| /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't |
| try to fold this since we will have infinite recursion. But do |
| deal with any NEGATE_EXPRs. */ |
| if (TREE_CODE (t1) == code || TREE_CODE (t2) == code |
| || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR) |
| { |
| if (code == PLUS_EXPR) |
| { |
| if (TREE_CODE (t1) == NEGATE_EXPR) |
| return build (MINUS_EXPR, type, fold_convert (type, t2), |
| fold_convert (type, TREE_OPERAND (t1, 0))); |
| else if (TREE_CODE (t2) == NEGATE_EXPR) |
| return build (MINUS_EXPR, type, fold_convert (type, t1), |
| fold_convert (type, TREE_OPERAND (t2, 0))); |
| } |
| return build (code, type, fold_convert (type, t1), |
| fold_convert (type, t2)); |
| } |
| |
| return fold (build (code, type, fold_convert (type, t1), |
| fold_convert (type, t2))); |
| } |
| |
| /* Combine two integer constants ARG1 and ARG2 under operation CODE |
| to produce a new constant. |
| |
| If NOTRUNC is nonzero, do not truncate the result to fit the data type. */ |
| |
| static tree |
| int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc) |
| { |
| unsigned HOST_WIDE_INT int1l, int2l; |
| HOST_WIDE_INT int1h, int2h; |
| unsigned HOST_WIDE_INT low; |
| HOST_WIDE_INT hi; |
| unsigned HOST_WIDE_INT garbagel; |
| HOST_WIDE_INT garbageh; |
| tree t; |
| tree type = TREE_TYPE (arg1); |
| int uns = TREE_UNSIGNED (type); |
| int is_sizetype |
| = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)); |
| int overflow = 0; |
| int no_overflow = 0; |
| |
| int1l = TREE_INT_CST_LOW (arg1); |
| int1h = TREE_INT_CST_HIGH (arg1); |
| int2l = TREE_INT_CST_LOW (arg2); |
| int2h = TREE_INT_CST_HIGH (arg2); |
| |
| switch (code) |
| { |
| case BIT_IOR_EXPR: |
| low = int1l | int2l, hi = int1h | int2h; |
| break; |
| |
| case BIT_XOR_EXPR: |
| low = int1l ^ int2l, hi = int1h ^ int2h; |
| break; |
| |
| case BIT_AND_EXPR: |
| low = int1l & int2l, hi = int1h & int2h; |
| break; |
| |
| case RSHIFT_EXPR: |
| int2l = -int2l; |
| case LSHIFT_EXPR: |
| /* It's unclear from the C standard whether shifts can overflow. |
| The following code ignores overflow; perhaps a C standard |
| interpretation ruling is needed. */ |
| lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type), |
| &low, &hi, !uns); |
| no_overflow = 1; |
| break; |
| |
| case RROTATE_EXPR: |
| int2l = - int2l; |
| case LROTATE_EXPR: |
| lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type), |
| &low, &hi); |
| break; |
| |
| case PLUS_EXPR: |
| overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi); |
| break; |
| |
| case MINUS_EXPR: |
| neg_double (int2l, int2h, &low, &hi); |
| add_double (int1l, int1h, low, hi, &low, &hi); |
| overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h); |
| break; |
| |
| case MULT_EXPR: |
| overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi); |
| break; |
| |
| case TRUNC_DIV_EXPR: |
| case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: |
| case EXACT_DIV_EXPR: |
| /* This is a shortcut for a common special case. */ |
| if (int2h == 0 && (HOST_WIDE_INT) int2l > 0 |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && ! TREE_CONSTANT_OVERFLOW (arg2) |
| && int1h == 0 && (HOST_WIDE_INT) int1l >= 0) |
| { |
| if (code == CEIL_DIV_EXPR) |
| int1l += int2l - 1; |
| |
| low = int1l / int2l, hi = 0; |
| break; |
| } |
| |
| /* ... fall through ... */ |
| |
| case ROUND_DIV_EXPR: |
| if (int2h == 0 && int2l == 1) |
| { |
| low = int1l, hi = int1h; |
| break; |
| } |
| if (int1l == int2l && int1h == int2h |
| && ! (int1l == 0 && int1h == 0)) |
| { |
| low = 1, hi = 0; |
| break; |
| } |
| overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h, |
| &low, &hi, &garbagel, &garbageh); |
| break; |
| |
| case TRUNC_MOD_EXPR: |
| case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: |
| /* This is a shortcut for a common special case. */ |
| if (int2h == 0 && (HOST_WIDE_INT) int2l > 0 |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && ! TREE_CONSTANT_OVERFLOW (arg2) |
| && int1h == 0 && (HOST_WIDE_INT) int1l >= 0) |
| { |
| if (code == CEIL_MOD_EXPR) |
| int1l += int2l - 1; |
| low = int1l % int2l, hi = 0; |
| break; |
| } |
| |
| /* ... fall through ... */ |
| |
| case ROUND_MOD_EXPR: |
| overflow = div_and_round_double (code, uns, |
| int1l, int1h, int2l, int2h, |
| &garbagel, &garbageh, &low, &hi); |
| break; |
| |
| case MIN_EXPR: |
| case MAX_EXPR: |
| if (uns) |
| low = (((unsigned HOST_WIDE_INT) int1h |
| < (unsigned HOST_WIDE_INT) int2h) |
| || (((unsigned HOST_WIDE_INT) int1h |
| == (unsigned HOST_WIDE_INT) int2h) |
| && int1l < int2l)); |
| else |
| low = (int1h < int2h |
| || (int1h == int2h && int1l < int2l)); |
| |
| if (low == (code == MIN_EXPR)) |
| low = int1l, hi = int1h; |
| else |
| low = int2l, hi = int2h; |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| /* If this is for a sizetype, can be represented as one (signed) |
| HOST_WIDE_INT word, and doesn't overflow, use size_int since it caches |
| constants. */ |
| if (is_sizetype |
| && ((hi == 0 && (HOST_WIDE_INT) low >= 0) |
| || (hi == -1 && (HOST_WIDE_INT) low < 0)) |
| && overflow == 0 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2)) |
| return size_int_type_wide (low, type); |
| else |
| { |
| t = build_int_2 (low, hi); |
| TREE_TYPE (t) = TREE_TYPE (arg1); |
| } |
| |
| TREE_OVERFLOW (t) |
| = ((notrunc |
| ? (!uns || is_sizetype) && overflow |
| : (force_fit_type (t, (!uns || is_sizetype) && overflow) |
| && ! no_overflow)) |
| | TREE_OVERFLOW (arg1) |
| | TREE_OVERFLOW (arg2)); |
| |
| /* If we're doing a size calculation, unsigned arithmetic does overflow. |
| So check if force_fit_type truncated the value. */ |
| if (is_sizetype |
| && ! TREE_OVERFLOW (t) |
| && (TREE_INT_CST_HIGH (t) != hi |
| || TREE_INT_CST_LOW (t) != low)) |
| TREE_OVERFLOW (t) = 1; |
| |
| TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t) |
| | TREE_CONSTANT_OVERFLOW (arg1) |
| | TREE_CONSTANT_OVERFLOW (arg2)); |
| return t; |
| } |
| |
| /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new |
| constant. We assume ARG1 and ARG2 have the same data type, or at least |
| are the same kind of constant and the same machine mode. |
| |
| If NOTRUNC is nonzero, do not truncate the result to fit the data type. */ |
| |
| static tree |
| const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc) |
| { |
| STRIP_NOPS (arg1); |
| STRIP_NOPS (arg2); |
| |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| return int_const_binop (code, arg1, arg2, notrunc); |
| |
| if (TREE_CODE (arg1) == REAL_CST) |
| { |
| enum machine_mode mode; |
| REAL_VALUE_TYPE d1; |
| REAL_VALUE_TYPE d2; |
| REAL_VALUE_TYPE value; |
| tree t, type; |
| |
| d1 = TREE_REAL_CST (arg1); |
| d2 = TREE_REAL_CST (arg2); |
| |
| type = TREE_TYPE (arg1); |
| mode = TYPE_MODE (type); |
| |
| /* Don't perform operation if we honor signaling NaNs and |
| either operand is a NaN. */ |
| if (HONOR_SNANS (mode) |
| && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2))) |
| return NULL_TREE; |
| |
| /* Don't perform operation if it would raise a division |
| by zero exception. */ |
| if (code == RDIV_EXPR |
| && REAL_VALUES_EQUAL (d2, dconst0) |
| && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) |
| return NULL_TREE; |
| |
| /* If either operand is a NaN, just return it. Otherwise, set up |
| for floating-point trap; we return an overflow. */ |
| if (REAL_VALUE_ISNAN (d1)) |
| return arg1; |
| else if (REAL_VALUE_ISNAN (d2)) |
| return arg2; |
| |
| REAL_ARITHMETIC (value, code, d1, d2); |
| |
| t = build_real (type, real_value_truncate (mode, value)); |
| |
| TREE_OVERFLOW (t) |
| = (force_fit_type (t, 0) |
| | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) |
| | TREE_CONSTANT_OVERFLOW (arg1) |
| | TREE_CONSTANT_OVERFLOW (arg2); |
| return t; |
| } |
| if (TREE_CODE (arg1) == COMPLEX_CST) |
| { |
| tree type = TREE_TYPE (arg1); |
| tree r1 = TREE_REALPART (arg1); |
| tree i1 = TREE_IMAGPART (arg1); |
| tree r2 = TREE_REALPART (arg2); |
| tree i2 = TREE_IMAGPART (arg2); |
| tree t; |
| |
| switch (code) |
| { |
| case PLUS_EXPR: |
| t = build_complex (type, |
| const_binop (PLUS_EXPR, r1, r2, notrunc), |
| const_binop (PLUS_EXPR, i1, i2, notrunc)); |
| break; |
| |
| case MINUS_EXPR: |
| t = build_complex (type, |
| const_binop (MINUS_EXPR, r1, r2, notrunc), |
| const_binop (MINUS_EXPR, i1, i2, notrunc)); |
| break; |
| |
| case MULT_EXPR: |
| t = build_complex (type, |
| const_binop (MINUS_EXPR, |
| const_binop (MULT_EXPR, |
| r1, r2, notrunc), |
| const_binop (MULT_EXPR, |
| i1, i2, notrunc), |
| notrunc), |
| const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, |
| r1, i2, notrunc), |
| const_binop (MULT_EXPR, |
| i1, r2, notrunc), |
| notrunc)); |
| break; |
| |
| case RDIV_EXPR: |
| { |
| tree magsquared |
| = const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, r2, r2, notrunc), |
| const_binop (MULT_EXPR, i2, i2, notrunc), |
| notrunc); |
| |
| t = build_complex (type, |
| const_binop |
| (INTEGRAL_TYPE_P (TREE_TYPE (r1)) |
| ? TRUNC_DIV_EXPR : RDIV_EXPR, |
| const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, r1, r2, |
| notrunc), |
| const_binop (MULT_EXPR, i1, i2, |
| notrunc), |
| notrunc), |
| magsquared, notrunc), |
| const_binop |
| (INTEGRAL_TYPE_P (TREE_TYPE (r1)) |
| ? TRUNC_DIV_EXPR : RDIV_EXPR, |
| const_binop (MINUS_EXPR, |
| const_binop (MULT_EXPR, i1, r2, |
| notrunc), |
| const_binop (MULT_EXPR, r1, i2, |
| notrunc), |
| notrunc), |
| magsquared, notrunc)); |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| return t; |
| } |
| return 0; |
| } |
| |
| /* These are the hash table functions for the hash table of INTEGER_CST |
| nodes of a sizetype. */ |
| |
| /* Return the hash code code X, an INTEGER_CST. */ |
| |
| static hashval_t |
| size_htab_hash (const void *x) |
| { |
| tree t = (tree) x; |
| |
| return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t) |
| ^ htab_hash_pointer (TREE_TYPE (t)) |
| ^ (TREE_OVERFLOW (t) << 20)); |
| } |
| |
| /* Return nonzero if the value represented by *X (an INTEGER_CST tree node) |
| is the same as that given by *Y, which is the same. */ |
| |
| static int |
| size_htab_eq (const void *x, const void *y) |
| { |
| tree xt = (tree) x; |
| tree yt = (tree) y; |
| |
| return (TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt) |
| && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt) |
| && TREE_TYPE (xt) == TREE_TYPE (yt) |
| && TREE_OVERFLOW (xt) == TREE_OVERFLOW (yt)); |
| } |
| |
| /* Return an INTEGER_CST with value whose low-order HOST_BITS_PER_WIDE_INT |
| bits are given by NUMBER and of the sizetype represented by KIND. */ |
| |
| tree |
| size_int_wide (HOST_WIDE_INT number, enum size_type_kind kind) |
| { |
| return size_int_type_wide (number, sizetype_tab[(int) kind]); |
| } |
| |
| /* Likewise, but the desired type is specified explicitly. */ |
| |
| static GTY (()) tree new_const; |
| static GTY ((if_marked ("ggc_marked_p"), param_is (union tree_node))) |
| htab_t size_htab; |
| |
| tree |
| size_int_type_wide (HOST_WIDE_INT number, tree type) |
| { |
| void **slot; |
| |
| if (size_htab == 0) |
| { |
| size_htab = htab_create_ggc (1024, size_htab_hash, size_htab_eq, NULL); |
| new_const = make_node (INTEGER_CST); |
| } |
| |
| /* Adjust NEW_CONST to be the constant we want. If it's already in the |
| hash table, we return the value from the hash table. Otherwise, we |
| place that in the hash table and make a new node for the next time. */ |
| TREE_INT_CST_LOW (new_const) = number; |
| TREE_INT_CST_HIGH (new_const) = number < 0 ? -1 : 0; |
| TREE_TYPE (new_const) = type; |
| TREE_OVERFLOW (new_const) = TREE_CONSTANT_OVERFLOW (new_const) |
| = force_fit_type (new_const, 0); |
| |
| slot = htab_find_slot (size_htab, new_const, INSERT); |
| if (*slot == 0) |
| { |
| tree t = new_const; |
| |
| *slot = new_const; |
| new_const = make_node (INTEGER_CST); |
| return t; |
| } |
| else |
| return (tree) *slot; |
| } |
| |
| /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE |
| is a tree code. The type of the result is taken from the operands. |
| Both must be the same type integer type and it must be a size type. |
| If the operands are constant, so is the result. */ |
| |
| tree |
| size_binop (enum tree_code code, tree arg0, tree arg1) |
| { |
| tree type = TREE_TYPE (arg0); |
| |
| if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type) |
| || type != TREE_TYPE (arg1)) |
| abort (); |
| |
| /* Handle the special case of two integer constants faster. */ |
| if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST) |
| { |
| /* And some specific cases even faster than that. */ |
| if (code == PLUS_EXPR && integer_zerop (arg0)) |
| return arg1; |
| else if ((code == MINUS_EXPR || code == PLUS_EXPR) |
| && integer_zerop (arg1)) |
| return arg0; |
| else if (code == MULT_EXPR && integer_onep (arg0)) |
| return arg1; |
| |
| /* Handle general case of two integer constants. */ |
| return int_const_binop (code, arg0, arg1, 0); |
| } |
| |
| if (arg0 == error_mark_node || arg1 == error_mark_node) |
| return error_mark_node; |
| |
| return fold (build (code, type, arg0, arg1)); |
| } |
| |
| /* Given two values, either both of sizetype or both of bitsizetype, |
| compute the difference between the two values. Return the value |
| in signed type corresponding to the type of the operands. */ |
| |
| tree |
| size_diffop (tree arg0, tree arg1) |
| { |
| tree type = TREE_TYPE (arg0); |
| tree ctype; |
| |
| if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type) |
| || type != TREE_TYPE (arg1)) |
| abort (); |
| |
| /* If the type is already signed, just do the simple thing. */ |
| if (! TREE_UNSIGNED (type)) |
| return size_binop (MINUS_EXPR, arg0, arg1); |
| |
| ctype = (type == bitsizetype || type == ubitsizetype |
| ? sbitsizetype : ssizetype); |
| |
| /* If either operand is not a constant, do the conversions to the signed |
| type and subtract. The hardware will do the right thing with any |
| overflow in the subtraction. */ |
| if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST) |
| return size_binop (MINUS_EXPR, fold_convert (ctype, arg0), |
| fold_convert (ctype, arg1)); |
| |
| /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE. |
| Otherwise, subtract the other way, convert to CTYPE (we know that can't |
| overflow) and negate (which can't either). Special-case a result |
| of zero while we're here. */ |
| if (tree_int_cst_equal (arg0, arg1)) |
| return fold_convert (ctype, integer_zero_node); |
| else if (tree_int_cst_lt (arg1, arg0)) |
| return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1)); |
| else |
| return size_binop (MINUS_EXPR, fold_convert (ctype, integer_zero_node), |
| fold_convert (ctype, size_binop (MINUS_EXPR, |
| arg1, arg0))); |
| } |
| |
| |
| /* Attempt to fold type conversion operation CODE of expression ARG1 to |
| type TYPE. If no simplification can be done return NULL_TREE. */ |
| |
| static tree |
| fold_convert_const (enum tree_code code ATTRIBUTE_UNUSED, tree type, |
| tree arg1) |
| { |
| int overflow = 0; |
| tree t; |
| |
| if (TREE_TYPE (arg1) == type) |
| return arg1; |
| |
| if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)) |
| { |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| { |
| /* If we would build a constant wider than GCC supports, |
| leave the conversion unfolded. */ |
| if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT) |
| return NULL_TREE; |
| |
| /* If we are trying to make a sizetype for a small integer, use |
| size_int to pick up cached types to reduce duplicate nodes. */ |
| if (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type) |
| && !TREE_CONSTANT_OVERFLOW (arg1) |
| && compare_tree_int (arg1, 10000) < 0) |
| return size_int_type_wide (TREE_INT_CST_LOW (arg1), type); |
| |
| /* Given an integer constant, make new constant with new type, |
| appropriately sign-extended or truncated. */ |
| t = build_int_2 (TREE_INT_CST_LOW (arg1), |
| TREE_INT_CST_HIGH (arg1)); |
| TREE_TYPE (t) = type; |
| /* Indicate an overflow if (1) ARG1 already overflowed, |
| or (2) force_fit_type indicates an overflow. |
| Tell force_fit_type that an overflow has already occurred |
| if ARG1 is a too-large unsigned value and T is signed. |
| But don't indicate an overflow if converting a pointer. */ |
| TREE_OVERFLOW (t) |
| = ((force_fit_type (t, |
| (TREE_INT_CST_HIGH (arg1) < 0 |
| && (TREE_UNSIGNED (type) |
| < TREE_UNSIGNED (TREE_TYPE (arg1))))) |
| && ! POINTER_TYPE_P (TREE_TYPE (arg1))) |
| || TREE_OVERFLOW (arg1)); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| return t; |
| } |
| else if (TREE_CODE (arg1) == REAL_CST) |
| { |
| /* The following code implements the floating point to integer |
| conversion rules required by the Java Language Specification, |
| that IEEE NaNs are mapped to zero and values that overflow |
| the target precision saturate, i.e. values greater than |
| INT_MAX are mapped to INT_MAX, and values less than INT_MIN |
| are mapped to INT_MIN. These semantics are allowed by the |
| C and C++ standards that simply state that the behavior of |
| FP-to-integer conversion is unspecified upon overflow. */ |
| |
| HOST_WIDE_INT high, low; |
| |
| REAL_VALUE_TYPE x = TREE_REAL_CST (arg1); |
| /* If x is NaN, return zero and show we have an overflow. */ |
| if (REAL_VALUE_ISNAN (x)) |
| { |
| overflow = 1; |
| high = 0; |
| low = 0; |
| } |
| |
| /* See if X will be in range after truncation towards 0. |
| To compensate for truncation, move the bounds away from 0, |
| but reject if X exactly equals the adjusted bounds. */ |
| |
| if (! overflow) |
| { |
| tree lt = TYPE_MIN_VALUE (type); |
| REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt); |
| REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1); |
| if (! REAL_VALUES_LESS (l, x)) |
| { |
| overflow = 1; |
| high = TREE_INT_CST_HIGH (lt); |
| low = TREE_INT_CST_LOW (lt); |
| } |
| } |
| |
| if (! overflow) |
| { |
| tree ut = TYPE_MAX_VALUE (type); |
| if (ut) |
| { |
| REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut); |
| REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1); |
| if (! REAL_VALUES_LESS (x, u)) |
| { |
| overflow = 1; |
| high = TREE_INT_CST_HIGH (ut); |
| low = TREE_INT_CST_LOW (ut); |
| } |
| } |
| } |
| |
| if (! overflow) |
| REAL_VALUE_TO_INT (&low, &high, x); |
| |
| t = build_int_2 (low, high); |
| TREE_TYPE (t) = type; |
| TREE_OVERFLOW (t) |
| = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| return t; |
| } |
| } |
| else if (TREE_CODE (type) == REAL_TYPE) |
| { |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| return build_real_from_int_cst (type, arg1); |
| if (TREE_CODE (arg1) == REAL_CST) |
| { |
| if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))) |
| { |
| /* We make a copy of ARG1 so that we don't modify an |
| existing constant tree. */ |
| t = copy_node (arg1); |
| TREE_TYPE (t) = type; |
| return t; |
| } |
| |
| t = build_real (type, |
| real_value_truncate (TYPE_MODE (type), |
| TREE_REAL_CST (arg1))); |
| |
| TREE_OVERFLOW (t) |
| = TREE_OVERFLOW (arg1) | force_fit_type (t, 0); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| return t; |
| } |
| } |
| return NULL_TREE; |
| } |
| |
| /* Convert expression ARG to type TYPE. Used by the middle-end for |
| simple conversions in preference to calling the front-end's convert. */ |
| |
| static tree |
| fold_convert (tree type, tree arg) |
| { |
| tree orig = TREE_TYPE (arg); |
| tree tem; |
| |
| if (type == orig) |
| return arg; |
| |
| if (TREE_CODE (arg) == ERROR_MARK |
| || TREE_CODE (type) == ERROR_MARK |
| || TREE_CODE (orig) == ERROR_MARK) |
| return error_mark_node; |
| |
| if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig)) |
| return fold (build1 (NOP_EXPR, type, arg)); |
| |
| if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) |
| { |
| if (TREE_CODE (arg) == INTEGER_CST) |
| { |
| tem = fold_convert_const (NOP_EXPR, type, arg); |
| if (tem != NULL_TREE) |
| return tem; |
| } |
| if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)) |
| return fold (build1 (NOP_EXPR, type, arg)); |
| if (TREE_CODE (orig) == COMPLEX_TYPE) |
| { |
| tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); |
| return fold_convert (type, tem); |
| } |
| if (TREE_CODE (orig) == VECTOR_TYPE |
| && GET_MODE_SIZE (TYPE_MODE (type)) |
| == GET_MODE_SIZE (TYPE_MODE (orig))) |
| return fold (build1 (NOP_EXPR, type, arg)); |
| } |
| else if (TREE_CODE (type) == REAL_TYPE) |
| { |
| if (TREE_CODE (arg) == INTEGER_CST) |
| { |
| tem = fold_convert_const (FLOAT_EXPR, type, arg); |
| if (tem != NULL_TREE) |
| return tem; |
| } |
| else if (TREE_CODE (arg) == REAL_CST) |
| { |
| tem = fold_convert_const (NOP_EXPR, type, arg); |
| if (tem != NULL_TREE) |
| return tem; |
| } |
| |
| if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)) |
| return fold (build1 (FLOAT_EXPR, type, arg)); |
| if (TREE_CODE (orig) == REAL_TYPE) |
| return fold (build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR, |
| type, arg)); |
| if (TREE_CODE (orig) == COMPLEX_TYPE) |
| { |
| tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); |
| return fold_convert (type, tem); |
| } |
| } |
| else if (TREE_CODE (type) == COMPLEX_TYPE) |
| { |
| if (INTEGRAL_TYPE_P (orig) |
| || POINTER_TYPE_P (orig) |
| || TREE_CODE (orig) == REAL_TYPE) |
| return build (COMPLEX_EXPR, type, |
| fold_convert (TREE_TYPE (type), arg), |
| fold_convert (TREE_TYPE (type), integer_zero_node)); |
| if (TREE_CODE (orig) == COMPLEX_TYPE) |
| { |
| tree rpart, ipart; |
| |
| if (TREE_CODE (arg) == COMPLEX_EXPR) |
| { |
| rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0)); |
| ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1)); |
| return fold (build (COMPLEX_EXPR, type, rpart, ipart)); |
| } |
| |
| arg = save_expr (arg); |
| rpart = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); |
| ipart = fold (build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg)); |
| rpart = fold_convert (TREE_TYPE (type), rpart); |
| ipart = fold_convert (TREE_TYPE (type), ipart); |
| return fold (build (COMPLEX_EXPR, type, rpart, ipart)); |
| } |
| } |
| else if (TREE_CODE (type) == VECTOR_TYPE) |
| { |
| if ((INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)) |
| && GET_MODE_SIZE (TYPE_MODE (type)) |
| == GET_MODE_SIZE (TYPE_MODE (orig))) |
| return fold (build1 (NOP_EXPR, type, arg)); |
| if (TREE_CODE (orig) == VECTOR_TYPE |
| && GET_MODE_SIZE (TYPE_MODE (type)) |
| == GET_MODE_SIZE (TYPE_MODE (orig))) |
| return fold (build1 (NOP_EXPR, type, arg)); |
| } |
| else if (VOID_TYPE_P (type)) |
| return fold (build1 (CONVERT_EXPR, type, arg)); |
| abort (); |
| } |
| |
| /* Return an expr equal to X but certainly not valid as an lvalue. */ |
| |
| tree |
| non_lvalue (tree x) |
| { |
| tree result; |
| |
| /* These things are certainly not lvalues. */ |
| if (TREE_CODE (x) == NON_LVALUE_EXPR |
| || TREE_CODE (x) == INTEGER_CST |
| || TREE_CODE (x) == REAL_CST |
| || TREE_CODE (x) == STRING_CST |
| || TREE_CODE (x) == ADDR_EXPR) |
| return x; |
| |
| result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x); |
| TREE_CONSTANT (result) = TREE_CONSTANT (x); |
| return result; |
| } |
| |
| /* Nonzero means lvalues are limited to those valid in pedantic ANSI C. |
| Zero means allow extended lvalues. */ |
| |
| int pedantic_lvalues; |
| |
| /* When pedantic, return an expr equal to X but certainly not valid as a |
| pedantic lvalue. Otherwise, return X. */ |
| |
| tree |
| pedantic_non_lvalue (tree x) |
| { |
| if (pedantic_lvalues) |
| return non_lvalue (x); |
| else |
| return x; |
| } |
| |
| /* Given a tree comparison code, return the code that is the logical inverse |
| of the given code. It is not safe to do this for floating-point |
| comparisons, except for NE_EXPR and EQ_EXPR. */ |
| |
| static enum tree_code |
| invert_tree_comparison (enum tree_code code) |
| { |
| switch (code) |
| { |
| case EQ_EXPR: |
| return NE_EXPR; |
| case NE_EXPR: |
| return EQ_EXPR; |
| case GT_EXPR: |
| return LE_EXPR; |
| case GE_EXPR: |
| return LT_EXPR; |
| case LT_EXPR: |
| return GE_EXPR; |
| case LE_EXPR: |
| return GT_EXPR; |
| default: |
| abort (); |
| } |
| } |
| |
| /* Similar, but return the comparison that results if the operands are |
| swapped. This is safe for floating-point. */ |
| |
| static enum tree_code |
| swap_tree_comparison (enum tree_code code) |
| { |
| switch (code) |
| { |
| case EQ_EXPR: |
| case NE_EXPR: |
| return code; |
| case GT_EXPR: |
| return LT_EXPR; |
| case GE_EXPR: |
| return LE_EXPR; |
| case LT_EXPR: |
| return GT_EXPR; |
| case LE_EXPR: |
| return GE_EXPR; |
| default: |
| abort (); |
| } |
| } |
| |
| |
| /* Convert a comparison tree code from an enum tree_code representation |
| into a compcode bit-based encoding. This function is the inverse of |
| compcode_to_comparison. */ |
| |
| static int |
| comparison_to_compcode (enum tree_code code) |
| { |
| switch (code) |
| { |
| case LT_EXPR: |
| return COMPCODE_LT; |
| case EQ_EXPR: |
| return COMPCODE_EQ; |
| case LE_EXPR: |
| return COMPCODE_LE; |
| case GT_EXPR: |
| return COMPCODE_GT; |
| case NE_EXPR: |
| return COMPCODE_NE; |
| case GE_EXPR: |
| return COMPCODE_GE; |
| default: |
| abort (); |
| } |
| } |
| |
| /* Convert a compcode bit-based encoding of a comparison operator back |
| to GCC's enum tree_code representation. This function is the |
| inverse of comparison_to_compcode. */ |
| |
| static enum tree_code |
| compcode_to_comparison (int code) |
| { |
| switch (code) |
| { |
| case COMPCODE_LT: |
| return LT_EXPR; |
| case COMPCODE_EQ: |
| return EQ_EXPR; |
| case COMPCODE_LE: |
| return LE_EXPR; |
| case COMPCODE_GT: |
| return GT_EXPR; |
| case COMPCODE_NE: |
| return NE_EXPR; |
| case COMPCODE_GE: |
| return GE_EXPR; |
| default: |
| abort (); |
| } |
| } |
| |
| /* Return nonzero if CODE is a tree code that represents a truth value. */ |
| |
| static int |
| truth_value_p (enum tree_code code) |
| { |
| return (TREE_CODE_CLASS (code) == '<' |
| || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR |
| || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR |
| || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR); |
| } |
| |
| /* Return nonzero if two operands (typically of the same tree node) |
| are necessarily equal. If either argument has side-effects this |
| function returns zero. |
| |
| If ONLY_CONST is nonzero, only return nonzero for constants. |
| This function tests whether the operands are indistinguishable; |
| it does not test whether they are equal using C's == operation. |
| The distinction is important for IEEE floating point, because |
| (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and |
| (2) two NaNs may be indistinguishable, but NaN!=NaN. |
| |
| If ONLY_CONST is zero, a VAR_DECL is considered equal to itself |
| even though it may hold multiple values during a function. |
| This is because a GCC tree node guarantees that nothing else is |
| executed between the evaluation of its "operands" (which may often |
| be evaluated in arbitrary order). Hence if the operands themselves |
| don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the |
| same value in each operand/subexpression. Hence a zero value for |
| ONLY_CONST assumes isochronic (or instantaneous) tree equivalence. |
| If comparing arbitrary expression trees, such as from different |
| statements, ONLY_CONST must usually be nonzero. */ |
| |
| int |
| operand_equal_p (tree arg0, tree arg1, int only_const) |
| { |
| tree fndecl; |
| |
| /* If both types don't have the same signedness, then we can't consider |
| them equal. We must check this before the STRIP_NOPS calls |
| because they may change the signedness of the arguments. */ |
| if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1))) |
| return 0; |
| |
| STRIP_NOPS (arg0); |
| STRIP_NOPS (arg1); |
| |
| if (TREE_CODE (arg0) != TREE_CODE (arg1) |
| /* This is needed for conversions and for COMPONENT_REF. |
| Might as well play it safe and always test this. */ |
| || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK |
| || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK |
| || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1))) |
| return 0; |
| |
| /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal. |
| We don't care about side effects in that case because the SAVE_EXPR |
| takes care of that for us. In all other cases, two expressions are |
| equal if they have no side effects. If we have two identical |
| expressions with side effects that should be treated the same due |
| to the only side effects being identical SAVE_EXPR's, that will |
| be detected in the recursive calls below. */ |
| if (arg0 == arg1 && ! only_const |
| && (TREE_CODE (arg0) == SAVE_EXPR |
| || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1)))) |
| return 1; |
| |
| /* Next handle constant cases, those for which we can return 1 even |
| if ONLY_CONST is set. */ |
| if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1)) |
| switch (TREE_CODE (arg0)) |
| { |
| case INTEGER_CST: |
| return (! TREE_CONSTANT_OVERFLOW (arg0) |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && tree_int_cst_equal (arg0, arg1)); |
| |
| case REAL_CST: |
| return (! TREE_CONSTANT_OVERFLOW (arg0) |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0), |
| TREE_REAL_CST (arg1))); |
| |
| case VECTOR_CST: |
| { |
| tree v1, v2; |
| |
| if (TREE_CONSTANT_OVERFLOW (arg0) |
| || TREE_CONSTANT_OVERFLOW (arg1)) |
| return 0; |
| |
| v1 = TREE_VECTOR_CST_ELTS (arg0); |
| v2 = TREE_VECTOR_CST_ELTS (arg1); |
| while (v1 && v2) |
| { |
| if (!operand_equal_p (v1, v2, only_const)) |
| return 0; |
| v1 = TREE_CHAIN (v1); |
| v2 = TREE_CHAIN (v2); |
| } |
| |
| return 1; |
| } |
| |
| case COMPLEX_CST: |
| return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1), |
| only_const) |
| && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1), |
| only_const)); |
| |
| case STRING_CST: |
| return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1) |
| && ! memcmp (TREE_STRING_POINTER (arg0), |
| TREE_STRING_POINTER (arg1), |
| TREE_STRING_LENGTH (arg0))); |
| |
| case ADDR_EXPR: |
| return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), |
| 0); |
| default: |
| break; |
| } |
| |
| if (only_const) |
| return 0; |
| |
| switch (TREE_CODE_CLASS (TREE_CODE (arg0))) |
| { |
| case '1': |
| /* Two conversions are equal only if signedness and modes match. */ |
| switch (TREE_CODE (arg0)) |
| { |
| case NOP_EXPR: |
| case CONVERT_EXPR: |
| case FIX_CEIL_EXPR: |
| case FIX_TRUNC_EXPR: |
| case FIX_FLOOR_EXPR: |
| case FIX_ROUND_EXPR: |
| if (TREE_UNSIGNED (TREE_TYPE (arg0)) |
| != TREE_UNSIGNED (TREE_TYPE (arg1))) |
| return 0; |
| break; |
| default: |
| break; |
| } |
| |
| return operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0); |
| |
| case '<': |
| case '2': |
| if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), |
| 0)) |
| return 1; |
| |
| /* For commutative ops, allow the other order. */ |
| return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR |
| || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR |
| || TREE_CODE (arg0) == BIT_IOR_EXPR |
| || TREE_CODE (arg0) == BIT_XOR_EXPR |
| || TREE_CODE (arg0) == BIT_AND_EXPR |
| || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR) |
| && operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 1), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 0), 0)); |
| |
| case 'r': |
| /* If either of the pointer (or reference) expressions we are |
| dereferencing contain a side effect, these cannot be equal. */ |
| if (TREE_SIDE_EFFECTS (arg0) |
| || TREE_SIDE_EFFECTS (arg1)) |
| return 0; |
| |
| switch (TREE_CODE (arg0)) |
| { |
| case INDIRECT_REF: |
| return operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0); |
| |
| case COMPONENT_REF: |
| case ARRAY_REF: |
| case ARRAY_RANGE_REF: |
| return (operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 1), 0)); |
| |
| case BIT_FIELD_REF: |
| return (operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 1), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 2), |
| TREE_OPERAND (arg1, 2), 0)); |
| default: |
| return 0; |
| } |
| |
| case 'e': |
| switch (TREE_CODE (arg0)) |
| { |
| case ADDR_EXPR: |
| case TRUTH_NOT_EXPR: |
| return operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0); |
| |
| case RTL_EXPR: |
| return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1)); |
| |
| case CALL_EXPR: |
| /* If the CALL_EXPRs call different functions, then they |
| clearly can not be equal. */ |
| if (! operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0)) |
| return 0; |
| |
| /* Only consider const functions equivalent. */ |
| fndecl = get_callee_fndecl (arg0); |
| if (fndecl == NULL_TREE |
| || ! (flags_from_decl_or_type (fndecl) & ECF_CONST)) |
| return 0; |
| |
| /* Now see if all the arguments are the same. operand_equal_p |
| does not handle TREE_LIST, so we walk the operands here |
| feeding them to operand_equal_p. */ |
| arg0 = TREE_OPERAND (arg0, 1); |
| arg1 = TREE_OPERAND (arg1, 1); |
| while (arg0 && arg1) |
| { |
| if (! operand_equal_p (TREE_VALUE (arg0), TREE_VALUE (arg1), 0)) |
| return 0; |
| |
| arg0 = TREE_CHAIN (arg0); |
| arg1 = TREE_CHAIN (arg1); |
| } |
| |
| /* If we get here and both argument lists are exhausted |
| then the CALL_EXPRs are equal. */ |
| return ! (arg0 || arg1); |
| |
| default: |
| return 0; |
| } |
| |
| case 'd': |
| /* Consider __builtin_sqrt equal to sqrt. */ |
| return TREE_CODE (arg0) == FUNCTION_DECL |
| && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1) |
| && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1) |
| && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1); |
| |
| default: |
| return 0; |
| } |
| } |
| |
| /* Similar to operand_equal_p, but see if ARG0 might have been made by |
| shorten_compare from ARG1 when ARG1 was being compared with OTHER. |
| |
| When in doubt, return 0. */ |
| |
| static int |
| operand_equal_for_comparison_p (tree arg0, tree arg1, tree other) |
| { |
| int unsignedp1, unsignedpo; |
| tree primarg0, primarg1, primother; |
| unsigned int correct_width; |
| |
| if (operand_equal_p (arg0, arg1, 0)) |
| return 1; |
| |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)) |
| || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1))) |
| return 0; |
| |
| /* Discard any conversions that don't change the modes of ARG0 and ARG1 |
| and see if the inner values are the same. This removes any |
| signedness comparison, which doesn't matter here. */ |
| primarg0 = arg0, primarg1 = arg1; |
| STRIP_NOPS (primarg0); |
| STRIP_NOPS (primarg1); |
| if (operand_equal_p (primarg0, primarg1, 0)) |
| return 1; |
| |
| /* Duplicate what shorten_compare does to ARG1 and see if that gives the |
| actual comparison operand, ARG0. |
| |
| First throw away any conversions to wider types |
| already present in the operands. */ |
| |
| primarg1 = get_narrower (arg1, &unsignedp1); |
| primother = get_narrower (other, &unsignedpo); |
| |
| correct_width = TYPE_PRECISION (TREE_TYPE (arg1)); |
| if (unsignedp1 == unsignedpo |
| && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width |
| && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width) |
| { |
| tree type = TREE_TYPE (arg0); |
| |
| /* Make sure shorter operand is extended the right way |
| to match the longer operand. */ |
| primarg1 = fold_convert ((*lang_hooks.types.signed_or_unsigned_type) |
| (unsignedp1, TREE_TYPE (primarg1)), primarg1); |
| |
| if (operand_equal_p (arg0, fold_convert (type, primarg1), 0)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* See if ARG is an expression that is either a comparison or is performing |
| arithmetic on comparisons. The comparisons must only be comparing |
| two different values, which will be stored in *CVAL1 and *CVAL2; if |
| they are nonzero it means that some operands have already been found. |
| No variables may be used anywhere else in the expression except in the |
| comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around |
| the expression and save_expr needs to be called with CVAL1 and CVAL2. |
| |
| If this is true, return 1. Otherwise, return zero. */ |
| |
| static int |
| twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p) |
| { |
| enum tree_code code = TREE_CODE (arg); |
| char class = TREE_CODE_CLASS (code); |
| |
| /* We can handle some of the 'e' cases here. */ |
| if (class == 'e' && code == TRUTH_NOT_EXPR) |
| class = '1'; |
| else if (class == 'e' |
| && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR |
| || code == COMPOUND_EXPR)) |
| class = '2'; |
| |
| else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0 |
| && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0))) |
| { |
| /* If we've already found a CVAL1 or CVAL2, this expression is |
| two complex to handle. */ |
| if (*cval1 || *cval2) |
| return 0; |
| |
| class = '1'; |
| *save_p = 1; |
| } |
| |
| switch (class) |
| { |
| case '1': |
| return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p); |
| |
| case '2': |
| return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 1), |
| cval1, cval2, save_p)); |
| |
| case 'c': |
| return 1; |
| |
| case 'e': |
| if (code == COND_EXPR) |
| return (twoval_comparison_p (TREE_OPERAND (arg, 0), |
| cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 1), |
| cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 2), |
| cval1, cval2, save_p)); |
| return 0; |
| |
| case '<': |
| /* First see if we can handle the first operand, then the second. For |
| the second operand, we know *CVAL1 can't be zero. It must be that |
| one side of the comparison is each of the values; test for the |
| case where this isn't true by failing if the two operands |
| are the same. */ |
| |
| if (operand_equal_p (TREE_OPERAND (arg, 0), |
| TREE_OPERAND (arg, 1), 0)) |
| return 0; |
| |
| if (*cval1 == 0) |
| *cval1 = TREE_OPERAND (arg, 0); |
| else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0)) |
| ; |
| else if (*cval2 == 0) |
| *cval2 = TREE_OPERAND (arg, 0); |
| else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0)) |
| ; |
| else |
| return 0; |
| |
| if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0)) |
| ; |
| else if (*cval2 == 0) |
| *cval2 = TREE_OPERAND (arg, 1); |
| else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0)) |
| ; |
| else |
| return 0; |
| |
| return 1; |
| |
| default: |
| return 0; |
| } |
| } |
| |
| /* ARG is a tree that is known to contain just arithmetic operations and |
| comparisons. Evaluate the operations in the tree substituting NEW0 for |
| any occurrence of OLD0 as an operand of a comparison and likewise for |
| NEW1 and OLD1. */ |
| |
| static tree |
| eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1) |
| { |
| tree type = TREE_TYPE (arg); |
| enum tree_code code = TREE_CODE (arg); |
| char class = TREE_CODE_CLASS (code); |
| |
| /* We can handle some of the 'e' cases here. */ |
| if (class == 'e' && code == TRUTH_NOT_EXPR) |
| class = '1'; |
| else if (class == 'e' |
| && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)) |
| class = '2'; |
| |
| switch (class) |
| { |
| case '1': |
| return fold (build1 (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1))); |
| |
| case '2': |
| return fold (build (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 1), |
| old0, new0, old1, new1))); |
| |
| case 'e': |
| switch (code) |
| { |
| case SAVE_EXPR: |
| return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1); |
| |
| case COMPOUND_EXPR: |
| return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1); |
| |
| case COND_EXPR: |
| return fold (build (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 1), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 2), |
| old0, new0, old1, new1))); |
| default: |
| break; |
| } |
| /* Fall through - ??? */ |
| |
| case '<': |
| { |
| tree arg0 = TREE_OPERAND (arg, 0); |
| tree arg1 = TREE_OPERAND (arg, 1); |
| |
| /* We need to check both for exact equality and tree equality. The |
| former will be true if the operand has a side-effect. In that |
| case, we know the operand occurred exactly once. */ |
| |
| if (arg0 == old0 || operand_equal_p (arg0, old0, 0)) |
| arg0 = new0; |
| else if (arg0 == old1 || operand_equal_p (arg0, old1, 0)) |
| arg0 = new1; |
| |
| if (arg1 == old0 || operand_equal_p (arg1, old0, 0)) |
| arg1 = new0; |
| else if (arg1 == old1 || operand_equal_p (arg1, old1, 0)) |
| arg1 = new1; |
| |
| return fold (build (code, type, arg0, arg1)); |
| } |
| |
| default: |
| return arg; |
| } |
| } |
| |
| /* Return a tree for the case when the result of an expression is RESULT |
| converted to TYPE and OMITTED was previously an operand of the expression |
| but is now not needed (e.g., we folded OMITTED * 0). |
| |
| If OMITTED has side effects, we must evaluate it. Otherwise, just do |
| the conversion of RESULT to TYPE. */ |
| |
| tree |
| omit_one_operand (tree type, tree result, tree omitted) |
| { |
| tree t = fold_convert (type, result); |
| |
| if (TREE_SIDE_EFFECTS (omitted)) |
| return build (COMPOUND_EXPR, type, omitted, t); |
| |
| return non_lvalue (t); |
| } |
| |
| /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */ |
| |
| static tree |
| pedantic_omit_one_operand (tree type, tree result, tree omitted) |
| { |
| tree t = fold_convert (type, result); |
| |
| if (TREE_SIDE_EFFECTS (omitted)) |
| return build (COMPOUND_EXPR, type, omitted, t); |
| |
| return pedantic_non_lvalue (t); |
| } |
| |
| /* Return a simplified tree node for the truth-negation of ARG. This |
| never alters ARG itself. We assume that ARG is an operation that |
| returns a truth value (0 or 1). */ |
| |
| tree |
| invert_truthvalue (tree arg) |
| { |
| tree type = TREE_TYPE (arg); |
| enum tree_code code = TREE_CODE (arg); |
| |
| if (code == ERROR_MARK) |
| return arg; |
| |
| /* If this is a comparison, we can simply invert it, except for |
| floating-point non-equality comparisons, in which case we just |
| enclose a TRUTH_NOT_EXPR around what we have. */ |
| |
| if (TREE_CODE_CLASS (code) == '<') |
| { |
| if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0))) |
| && !flag_unsafe_math_optimizations |
| && code != NE_EXPR |
| && code != EQ_EXPR) |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| else |
| return build (invert_tree_comparison (code), type, |
| TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1)); |
| } |
| |
| switch (code) |
| { |
| case INTEGER_CST: |
| return fold_convert (type, build_int_2 (integer_zerop (arg), 0)); |
| |
| case TRUTH_AND_EXPR: |
| return build (TRUTH_OR_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_OR_EXPR: |
| return build (TRUTH_AND_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_XOR_EXPR: |
| /* Here we can invert either operand. We invert the first operand |
| unless the second operand is a TRUTH_NOT_EXPR in which case our |
| result is the XOR of the first operand with the inside of the |
| negation of the second operand. */ |
| |
| if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR) |
| return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0), |
| TREE_OPERAND (TREE_OPERAND (arg, 1), 0)); |
| else |
| return build (TRUTH_XOR_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| TREE_OPERAND (arg, 1)); |
| |
| case TRUTH_ANDIF_EXPR: |
| return build (TRUTH_ORIF_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_ORIF_EXPR: |
| return build (TRUTH_ANDIF_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_NOT_EXPR: |
| return TREE_OPERAND (arg, 0); |
| |
| case COND_EXPR: |
| return build (COND_EXPR, type, TREE_OPERAND (arg, 0), |
| invert_truthvalue (TREE_OPERAND (arg, 1)), |
| invert_truthvalue (TREE_OPERAND (arg, 2))); |
| |
| case COMPOUND_EXPR: |
| return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case WITH_RECORD_EXPR: |
| return build (WITH_RECORD_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| TREE_OPERAND (arg, 1)); |
| |
| case NON_LVALUE_EXPR: |
| return invert_truthvalue (TREE_OPERAND (arg, 0)); |
| |
| case NOP_EXPR: |
| case CONVERT_EXPR: |
| case FLOAT_EXPR: |
| return build1 (TREE_CODE (arg), type, |
| invert_truthvalue (TREE_OPERAND (arg, 0))); |
| |
| case BIT_AND_EXPR: |
| if (!integer_onep (TREE_OPERAND (arg, 1))) |
| break; |
| return build (EQ_EXPR, type, arg, |
| fold_convert (type, integer_zero_node)); |
| |
| case SAVE_EXPR: |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| |
| case CLEANUP_POINT_EXPR: |
| return build1 (CLEANUP_POINT_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0))); |
| |
| default: |
| break; |
| } |
| if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE) |
| abort (); |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| } |
| |
| /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both |
| operands are another bit-wise operation with a common input. If so, |
| distribute the bit operations to save an operation and possibly two if |
| constants are involved. For example, convert |
| (A | B) & (A | C) into A | (B & C) |
| Further simplification will occur if B and C are constants. |
| |
| If this optimization cannot be done, 0 will be returned. */ |
| |
| static tree |
| distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1) |
| { |
| tree common; |
| tree left, right; |
| |
| if (TREE_CODE (arg0) != TREE_CODE (arg1) |
| || TREE_CODE (arg0) == code |
| || (TREE_CODE (arg0) != BIT_AND_EXPR |
| && TREE_CODE (arg0) != BIT_IOR_EXPR)) |
| return 0; |
| |
| if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) |
| { |
| common = TREE_OPERAND (arg0, 0); |
| left = TREE_OPERAND (arg0, 1); |
| right = TREE_OPERAND (arg1, 1); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0)) |
| { |
| common = TREE_OPERAND (arg0, 0); |
| left = TREE_OPERAND (arg0, 1); |
| right = TREE_OPERAND (arg1, 0); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0)) |
| { |
| common = TREE_OPERAND (arg0, 1); |
| left = TREE_OPERAND (arg0, 0); |
| right = TREE_OPERAND (arg1, 1); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0)) |
| { |
| common = TREE_OPERAND (arg0, 1); |
| left = TREE_OPERAND (arg0, 0); |
| right = TREE_OPERAND (arg1, 0); |
| } |
| else |
| return 0; |
| |
| return fold (build (TREE_CODE (arg0), type, common, |
| fold (build (code, type, left, right)))); |
| } |
| |
| /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER |
| starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */ |
| |
| static tree |
| make_bit_field_ref (tree inner, tree type, int bitsize, int bitpos, |
| int unsignedp) |
| { |
| tree result = build (BIT_FIELD_REF, type, inner, |
| size_int (bitsize), bitsize_int (bitpos)); |
| |
| TREE_UNSIGNED (result) = unsignedp; |
| |
| return result; |
| } |
| |
| /* Optimize a bit-field compare. |
| |
| There are two cases: First is a compare against a constant and the |
| second is a comparison of two items where the fields are at the same |
| bit position relative to the start of a chunk (byte, halfword, word) |
| large enough to contain it. In these cases we can avoid the shift |
| implicit in bitfield extractions. |
| |
| For constants, we emit a compare of the shifted constant with the |
| BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being |
| compared. For two fields at the same position, we do the ANDs with the |
| similar mask and compare the result of the ANDs. |
| |
| CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR. |
| COMPARE_TYPE is the type of the comparison, and LHS and RHS |
| are the left and right operands of the comparison, respectively. |
| |
| If the optimization described above can be done, we return the resulting |
| tree. Otherwise we return zero. */ |
| |
| static tree |
| optimize_bit_field_compare (enum tree_code code, tree compare_type, |
| tree lhs, tree rhs) |
| { |
| HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize; |
| tree type = TREE_TYPE (lhs); |
| tree signed_type, unsigned_type; |
| int const_p = TREE_CODE (rhs) == INTEGER_CST; |
| enum machine_mode lmode, rmode, nmode; |
| int lunsignedp, runsignedp; |
| int lvolatilep = 0, rvolatilep = 0; |
| tree linner, rinner = NULL_TREE; |
| tree mask; |
| tree offset; |
| |
| /* Get all the information about the extractions being done. If the bit size |
| if the same as the size of the underlying object, we aren't doing an |
| extraction at all and so can do nothing. We also don't want to |
| do anything if the inner expression is a PLACEHOLDER_EXPR since we |
| then will no longer be able to replace it. */ |
| linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode, |
| &lunsignedp, &lvolatilep); |
| if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0 |
| || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR) |
| return 0; |
| |
| if (!const_p) |
| { |
| /* If this is not a constant, we can only do something if bit positions, |
| sizes, and signedness are the same. */ |
| rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode, |
| &runsignedp, &rvolatilep); |
| |
| if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize |
| || lunsignedp != runsignedp || offset != 0 |
| || TREE_CODE (rinner) == PLACEHOLDER_EXPR) |
| return 0; |
| } |
| |
| /* See if we can find a mode to refer to this field. We should be able to, |
| but fail if we can't. */ |
| nmode = get_best_mode (lbitsize, lbitpos, |
| const_p ? TYPE_ALIGN (TREE_TYPE (linner)) |
| : MIN (TYPE_ALIGN (TREE_TYPE (linner)), |
| TYPE_ALIGN (TREE_TYPE (rinner))), |
| word_mode, lvolatilep || rvolatilep); |
| if (nmode == VOIDmode) |
| return 0; |
| |
| /* Set signed and unsigned types of the precision of this mode for the |
| shifts below. */ |
| signed_type = (*lang_hooks.types.type_for_mode) (nmode, 0); |
| unsigned_type = (*lang_hooks.types.type_for_mode) (nmode, 1); |
| |
| /* Compute the bit position and size for the new reference and our offset |
| within it. If the new reference is the same size as the original, we |
| won't optimize anything, so return zero. */ |
| nbitsize = GET_MODE_BITSIZE (nmode); |
| nbitpos = lbitpos & ~ (nbitsize - 1); |
| lbitpos -= nbitpos; |
| if (nbitsize == lbitsize) |
| return 0; |
| |
| if (BYTES_BIG_ENDIAN) |
| lbitpos = nbitsize - lbitsize - lbitpos; |
| |
| /* Make the mask to be used against the extracted field. */ |
| mask = build_int_2 (~0, ~0); |
| TREE_TYPE (mask) = unsigned_type; |
| force_fit_type (mask, 0); |
| mask = fold_convert (unsigned_type, mask); |
| mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0); |
| mask = const_binop (RSHIFT_EXPR, mask, |
| size_int (nbitsize - lbitsize - lbitpos), 0); |
| |
| if (! const_p) |
| /* If not comparing with constant, just rework the comparison |
| and return. */ |
| return build (code, compare_type, |
| build (BIT_AND_EXPR, unsigned_type, |
| make_bit_field_ref (linner, unsigned_type, |
| nbitsize, nbitpos, 1), |
| mask), |
| build (BIT_AND_EXPR, unsigned_type, |
| make_bit_field_ref (rinner, unsigned_type, |
| nbitsize, nbitpos, 1), |
| mask)); |
| |
| /* Otherwise, we are handling the constant case. See if the constant is too |
| big for the field. Warn and return a tree of for 0 (false) if so. We do |
| this not only for its own sake, but to avoid having to test for this |
| error case below. If we didn't, we might generate wrong code. |
| |
| For unsigned fields, the constant shifted right by the field length should |
| be all zero. For signed fields, the high-order bits should agree with |
| the sign bit. */ |
| |
| if (lunsignedp) |
| { |
| if (! integer_zerop (const_binop (RSHIFT_EXPR, |
| fold_convert (unsigned_type, rhs), |
| size_int (lbitsize), 0))) |
| { |
| warning ("comparison is always %d due to width of bit-field", |
| code == NE_EXPR); |
| return fold_convert (compare_type, |
| (code == NE_EXPR |
| ? integer_one_node : integer_zero_node)); |
| } |
| } |
| else |
| { |
| tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs), |
| size_int (lbitsize - 1), 0); |
| if (! integer_zerop (tem) && ! integer_all_onesp (tem)) |
| { |
| warning ("comparison is always %d due to width of bit-field", |
| code == NE_EXPR); |
| return fold_convert (compare_type, |
| (code == NE_EXPR |
| ? integer_one_node : integer_zero_node)); |
| } |
| } |
| |
| /* Single-bit compares should always be against zero. */ |
| if (lbitsize == 1 && ! integer_zerop (rhs)) |
| { |
| code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR; |
| rhs = fold_convert (type, integer_zero_node); |
| } |
| |
| /* Make a new bitfield reference, shift the constant over the |
| appropriate number of bits and mask it with the computed mask |
| (in case this was a signed field). If we changed it, make a new one. */ |
| lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1); |
| if (lvolatilep) |
| { |
| TREE_SIDE_EFFECTS (lhs) = 1; |
| TREE_THIS_VOLATILE (lhs) = 1; |
| } |
| |
| rhs = fold (const_binop (BIT_AND_EXPR, |
| const_binop (LSHIFT_EXPR, |
| fold_convert (unsigned_type, rhs), |
| size_int (lbitpos), 0), |
| mask, 0)); |
| |
| return build (code, compare_type, |
| build (BIT_AND_EXPR, unsigned_type, lhs, mask), |
| rhs); |
| } |
| |
| /* Subroutine for fold_truthop: decode a field reference. |
| |
| If EXP is a comparison reference, we return the innermost reference. |
| |
| *PBITSIZE is set to the number of bits in the reference, *PBITPOS is |
| set to the starting bit number. |
| |
| If the innermost field can be completely contained in a mode-sized |
| unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode. |
| |
| *PVOLATILEP is set to 1 if the any expression encountered is volatile; |
| otherwise it is not changed. |
| |
| *PUNSIGNEDP is set to the signedness of the field. |
| |
| *PMASK is set to the mask used. This is either contained in a |
| BIT_AND_EXPR or derived from the width of the field. |
| |
| *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any. |
| |
| Return 0 if this is not a component reference or is one that we can't |
| do anything with. */ |
| |
| static tree |
| decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize, |
| HOST_WIDE_INT *pbitpos, enum machine_mode *pmode, |
| int *punsignedp, int *pvolatilep, |
| tree *pmask, tree *pand_mask) |
| { |
| tree outer_type = 0; |
| tree and_mask = 0; |
| tree mask, inner, offset; |
| tree unsigned_type; |
| unsigned int precision; |
| |
| /* All the optimizations using this function assume integer fields. |
| There are problems with FP fields since the type_for_size call |
| below can fail for, e.g., XFmode. */ |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (exp))) |
| return 0; |
| |
| /* We are interested in the bare arrangement of bits, so strip everything |
| that doesn't affect the machine mode. However, record the type of the |
| outermost expression if it may matter below. */ |
| if (TREE_CODE (exp) == NOP_EXPR |
| || TREE_CODE (exp) == CONVERT_EXPR |
| || TREE_CODE (exp) == NON_LVALUE_EXPR) |
| outer_type = TREE_TYPE (exp); |
| STRIP_NOPS (exp); |
| |
| if (TREE_CODE (exp) == BIT_AND_EXPR) |
| { |
| and_mask = TREE_OPERAND (exp, 1); |
| exp = TREE_OPERAND (exp, 0); |
| STRIP_NOPS (exp); STRIP_NOPS (and_mask); |
| if (TREE_CODE (and_mask) != INTEGER_CST) |
| return 0; |
| } |
| |
| inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode, |
| punsignedp, pvolatilep); |
| if ((inner == exp && and_mask == 0) |
| || *pbitsize < 0 || offset != 0 |
| || TREE_CODE (inner) == PLACEHOLDER_EXPR) |
| return 0; |
| |
| /* If the number of bits in the reference is the same as the bitsize of |
| the outer type, then the outer type gives the signedness. Otherwise |
| (in case of a small bitfield) the signedness is unchanged. */ |
| if (outer_type && *pbitsize == tree_low_cst (TYPE_SIZE (outer_type), 1)) |
| *punsignedp = TREE_UNSIGNED (outer_type); |
| |
| /* Compute the mask to access the bitfield. */ |
| unsigned_type = (*lang_hooks.types.type_for_size) (*pbitsize, 1); |
| precision = TYPE_PRECISION (unsigned_type); |
| |
| mask = build_int_2 (~0, ~0); |
| TREE_TYPE (mask) = unsigned_type; |
| force_fit_type (mask, 0); |
| mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); |
| mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); |
| |
| /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */ |
| if (and_mask != 0) |
| mask = fold (build (BIT_AND_EXPR, unsigned_type, |
<