| /* Fold a constant sub-tree into a single node for C-compiler |
| Copyright (C) 1987, 88, 92-96, 1997 Free Software Foundation, Inc. |
| |
| This file is part of GNU CC. |
| |
| GNU CC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 2, or (at your option) |
| any later version. |
| |
| GNU CC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GNU CC; see the file COPYING. If not, write to |
| the Free Software Foundation, 59 Temple Place - Suite 330, |
| Boston, MA 02111-1307, USA. */ |
| |
| /*@@ This file should be rewritten to use an arbitrary precision |
| @@ representation for "struct tree_int_cst" and "struct tree_real_cst". |
| @@ Perhaps the routines could also be used for bc/dc, and made a lib. |
| @@ The routines that translate from the ap rep should |
| @@ warn if precision et. al. is lost. |
| @@ This would also make life easier when this technology is used |
| @@ for cross-compilers. */ |
| |
| |
| /* The entry points in this file are fold, size_int, size_binop |
| and force_fit_type. |
| |
| fold takes a tree as argument and returns a simplified tree. |
| |
| size_binop takes a tree code for an arithmetic operation |
| and two operands that are trees, and produces a tree for the |
| result, assuming the type comes from `sizetype'. |
| |
| size_int takes an integer value, and creates a tree constant |
| with type from `sizetype'. |
| |
| force_fit_type takes a constant and prior overflow indicator, and |
| forces the value to fit the type. It returns an overflow indicator. */ |
| |
| #include <stdio.h> |
| #include <setjmp.h> |
| #include "config.h" |
| #include "flags.h" |
| #include "tree.h" |
| |
| /* Handle floating overflow for `const_binop'. */ |
| static jmp_buf float_error; |
| |
| static void encode PROTO((HOST_WIDE_INT *, |
| HOST_WIDE_INT, HOST_WIDE_INT)); |
| static void decode PROTO((HOST_WIDE_INT *, |
| HOST_WIDE_INT *, HOST_WIDE_INT *)); |
| int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT, |
| HOST_WIDE_INT, HOST_WIDE_INT, |
| HOST_WIDE_INT, HOST_WIDE_INT *, |
| HOST_WIDE_INT *, HOST_WIDE_INT *, |
| HOST_WIDE_INT *)); |
| static int split_tree PROTO((tree, enum tree_code, tree *, |
| tree *, int *)); |
| static tree const_binop PROTO((enum tree_code, tree, tree, int)); |
| static tree fold_convert PROTO((tree, tree)); |
| static enum tree_code invert_tree_comparison PROTO((enum tree_code)); |
| static enum tree_code swap_tree_comparison PROTO((enum tree_code)); |
| static int truth_value_p PROTO((enum tree_code)); |
| static int operand_equal_for_comparison_p PROTO((tree, tree, tree)); |
| static int twoval_comparison_p PROTO((tree, tree *, tree *, int *)); |
| static tree eval_subst PROTO((tree, tree, tree, tree, tree)); |
| static tree omit_one_operand PROTO((tree, tree, tree)); |
| static tree pedantic_omit_one_operand PROTO((tree, tree, tree)); |
| static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree)); |
| static tree make_bit_field_ref PROTO((tree, tree, int, int, int)); |
| static tree optimize_bit_field_compare PROTO((enum tree_code, tree, |
| tree, tree)); |
| static tree decode_field_reference PROTO((tree, int *, int *, |
| enum machine_mode *, int *, |
| int *, tree *, tree *)); |
| static int all_ones_mask_p PROTO((tree, int)); |
| static int simple_operand_p PROTO((tree)); |
| static tree range_binop PROTO((enum tree_code, tree, tree, int, |
| tree, int)); |
| static tree make_range PROTO((tree, int *, tree *, tree *)); |
| static tree build_range_check PROTO((tree, tree, int, tree, tree)); |
| static int merge_ranges PROTO((int *, tree *, tree *, int, tree, tree, |
| int, tree, tree)); |
| static tree fold_range_test PROTO((tree)); |
| static tree unextend PROTO((tree, int, int, tree)); |
| static tree fold_truthop PROTO((enum tree_code, tree, tree, tree)); |
| static tree strip_compound_expr PROTO((tree, tree)); |
| |
| #ifndef BRANCH_COST |
| #define BRANCH_COST 1 |
| #endif |
| |
| /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow. |
| Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1. |
| Then this yields nonzero if overflow occurred during the addition. |
| Overflow occurs if A and B have the same sign, but A and SUM differ in sign. |
| Use `^' to test whether signs differ, and `< 0' to isolate the sign. */ |
| #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0) |
| |
| /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic. |
| We do that by representing the two-word integer in 4 words, with only |
| HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */ |
| |
| #define LOWPART(x) \ |
| ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1)) |
| #define HIGHPART(x) \ |
| ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2) |
| #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2) |
| |
| /* Unpack a two-word integer into 4 words. |
| LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. |
| WORDS points to the array of HOST_WIDE_INTs. */ |
| |
| static void |
| encode (words, low, hi) |
| HOST_WIDE_INT *words; |
| HOST_WIDE_INT low, hi; |
| { |
| words[0] = LOWPART (low); |
| words[1] = HIGHPART (low); |
| words[2] = LOWPART (hi); |
| words[3] = HIGHPART (hi); |
| } |
| |
| /* Pack an array of 4 words into a two-word integer. |
| WORDS points to the array of words. |
| The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */ |
| |
| static void |
| decode (words, low, hi) |
| HOST_WIDE_INT *words; |
| HOST_WIDE_INT *low, *hi; |
| { |
| *low = words[0] | words[1] * BASE; |
| *hi = words[2] | words[3] * BASE; |
| } |
| |
| /* Make the integer constant T valid for its type |
| by setting to 0 or 1 all the bits in the constant |
| that don't belong in the type. |
| Yield 1 if a signed overflow occurs, 0 otherwise. |
| If OVERFLOW is nonzero, a signed overflow has already occurred |
| in calculating T, so propagate it. |
| |
| Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE, |
| if it exists. */ |
| |
| int |
| force_fit_type (t, overflow) |
| tree t; |
| int overflow; |
| { |
| HOST_WIDE_INT low, high; |
| register int prec; |
| |
| if (TREE_CODE (t) == REAL_CST) |
| { |
| #ifdef CHECK_FLOAT_VALUE |
| CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t), |
| overflow); |
| #endif |
| return overflow; |
| } |
| |
| else if (TREE_CODE (t) != INTEGER_CST) |
| return overflow; |
| |
| low = TREE_INT_CST_LOW (t); |
| high = TREE_INT_CST_HIGH (t); |
| |
| if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE) |
| prec = POINTER_SIZE; |
| else |
| prec = TYPE_PRECISION (TREE_TYPE (t)); |
| |
| /* First clear all bits that are beyond the type's precision. */ |
| |
| if (prec == 2 * HOST_BITS_PER_WIDE_INT) |
| ; |
| else if (prec > HOST_BITS_PER_WIDE_INT) |
| { |
| TREE_INT_CST_HIGH (t) |
| &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); |
| } |
| else |
| { |
| TREE_INT_CST_HIGH (t) = 0; |
| if (prec < HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec); |
| } |
| |
| /* Unsigned types do not suffer sign extension or overflow. */ |
| if (TREE_UNSIGNED (TREE_TYPE (t))) |
| return overflow; |
| |
| /* If the value's sign bit is set, extend the sign. */ |
| if (prec != 2 * HOST_BITS_PER_WIDE_INT |
| && (prec > HOST_BITS_PER_WIDE_INT |
| ? (TREE_INT_CST_HIGH (t) |
| & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1))) |
| : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1)))) |
| { |
| /* Value is negative: |
| set to 1 all the bits that are outside this type's precision. */ |
| if (prec > HOST_BITS_PER_WIDE_INT) |
| { |
| TREE_INT_CST_HIGH (t) |
| |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); |
| } |
| else |
| { |
| TREE_INT_CST_HIGH (t) = -1; |
| if (prec < HOST_BITS_PER_WIDE_INT) |
| TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec); |
| } |
| } |
| |
| /* Yield nonzero if signed overflow occurred. */ |
| return |
| ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t))) |
| != 0); |
| } |
| |
| /* Add two doubleword integers with doubleword result. |
| Each argument is given as two `HOST_WIDE_INT' pieces. |
| One argument is L1 and H1; the other, L2 and H2. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| add_double (l1, h1, l2, h2, lv, hv) |
| HOST_WIDE_INT l1, h1, l2, h2; |
| HOST_WIDE_INT *lv, *hv; |
| { |
| HOST_WIDE_INT l, h; |
| |
| l = l1 + l2; |
| h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1); |
| |
| *lv = l; |
| *hv = h; |
| return overflow_sum_sign (h1, h2, h); |
| } |
| |
| /* Negate a doubleword integer with doubleword result. |
| Return nonzero if the operation overflows, assuming it's signed. |
| The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| neg_double (l1, h1, lv, hv) |
| HOST_WIDE_INT l1, h1; |
| HOST_WIDE_INT *lv, *hv; |
| { |
| if (l1 == 0) |
| { |
| *lv = 0; |
| *hv = - h1; |
| return (*hv & h1) < 0; |
| } |
| else |
| { |
| *lv = - l1; |
| *hv = ~ h1; |
| return 0; |
| } |
| } |
| |
| /* Multiply two doubleword integers with doubleword result. |
| Return nonzero if the operation overflows, assuming it's signed. |
| Each argument is given as two `HOST_WIDE_INT' pieces. |
| One argument is L1 and H1; the other, L2 and H2. |
| The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| int |
| mul_double (l1, h1, l2, h2, lv, hv) |
| HOST_WIDE_INT l1, h1, l2, h2; |
| HOST_WIDE_INT *lv, *hv; |
| { |
| HOST_WIDE_INT arg1[4]; |
| HOST_WIDE_INT arg2[4]; |
| HOST_WIDE_INT prod[4 * 2]; |
| register unsigned HOST_WIDE_INT carry; |
| register int i, j, k; |
| HOST_WIDE_INT toplow, tophigh, neglow, neghigh; |
| |
| encode (arg1, l1, h1); |
| encode (arg2, l2, h2); |
| |
| bzero ((char *) prod, sizeof prod); |
| |
| for (i = 0; i < 4; i++) |
| { |
| carry = 0; |
| for (j = 0; j < 4; j++) |
| { |
| k = i + j; |
| /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */ |
| carry += arg1[i] * arg2[j]; |
| /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */ |
| carry += prod[k]; |
| prod[k] = LOWPART (carry); |
| carry = HIGHPART (carry); |
| } |
| prod[i + 4] = carry; |
| } |
| |
| decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */ |
| |
| /* Check for overflow by calculating the top half of the answer in full; |
| it should agree with the low half's sign bit. */ |
| decode (prod+4, &toplow, &tophigh); |
| if (h1 < 0) |
| { |
| neg_double (l2, h2, &neglow, &neghigh); |
| add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); |
| } |
| if (h2 < 0) |
| { |
| neg_double (l1, h1, &neglow, &neghigh); |
| add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); |
| } |
| return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0; |
| } |
| |
| /* Shift the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. |
| Shift right if COUNT is negative. |
| ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| lshift_double (l1, h1, count, prec, lv, hv, arith) |
| HOST_WIDE_INT l1, h1, count; |
| int prec; |
| HOST_WIDE_INT *lv, *hv; |
| int arith; |
| { |
| if (count < 0) |
| { |
| rshift_double (l1, h1, - count, prec, lv, hv, arith); |
| return; |
| } |
| |
| #ifdef SHIFT_COUNT_TRUNCATED |
| if (SHIFT_COUNT_TRUNCATED) |
| count %= prec; |
| #endif |
| |
| if (count >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT; |
| *lv = 0; |
| } |
| else |
| { |
| *hv = (((unsigned HOST_WIDE_INT) h1 << count) |
| | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1)); |
| *lv = (unsigned HOST_WIDE_INT) l1 << count; |
| } |
| } |
| |
| /* Shift the doubleword integer in L1, H1 right by COUNT places |
| keeping only PREC bits of result. COUNT must be positive. |
| ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| rshift_double (l1, h1, count, prec, lv, hv, arith) |
| HOST_WIDE_INT l1, h1, count; |
| int prec; |
| HOST_WIDE_INT *lv, *hv; |
| int arith; |
| { |
| unsigned HOST_WIDE_INT signmask; |
| signmask = (arith |
| ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) |
| : 0); |
| |
| #ifdef SHIFT_COUNT_TRUNCATED |
| if (SHIFT_COUNT_TRUNCATED) |
| count %= prec; |
| #endif |
| |
| if (count >= HOST_BITS_PER_WIDE_INT) |
| { |
| *hv = signmask; |
| *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1) |
| | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT)); |
| } |
| else |
| { |
| *lv = (((unsigned HOST_WIDE_INT) l1 >> count) |
| | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1)); |
| *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count) |
| | ((unsigned HOST_WIDE_INT) h1 >> count)); |
| } |
| } |
| |
| /* Rotate the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. |
| Rotate right if COUNT is negative. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| lrotate_double (l1, h1, count, prec, lv, hv) |
| HOST_WIDE_INT l1, h1, count; |
| int prec; |
| HOST_WIDE_INT *lv, *hv; |
| { |
| HOST_WIDE_INT s1l, s1h, s2l, s2h; |
| |
| count %= prec; |
| if (count < 0) |
| count += prec; |
| |
| lshift_double (l1, h1, count, prec, &s1l, &s1h, 0); |
| rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); |
| *lv = s1l | s2l; |
| *hv = s1h | s2h; |
| } |
| |
| /* Rotate the doubleword integer in L1, H1 left by COUNT places |
| keeping only PREC bits of result. COUNT must be positive. |
| Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ |
| |
| void |
| rrotate_double (l1, h1, count, prec, lv, hv) |
| HOST_WIDE_INT l1, h1, count; |
| int prec; |
| HOST_WIDE_INT *lv, *hv; |
| { |
| HOST_WIDE_INT s1l, s1h, s2l, s2h; |
| |
| count %= prec; |
| if (count < 0) |
| count += prec; |
| |
| rshift_double (l1, h1, count, prec, &s1l, &s1h, 0); |
| lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); |
| *lv = s1l | s2l; |
| *hv = s1h | s2h; |
| } |
| |
| /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN |
| for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). |
| CODE is a tree code for a kind of division, one of |
| TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR |
| or EXACT_DIV_EXPR |
| It controls how the quotient is rounded to a integer. |
| Return nonzero if the operation overflows. |
| UNS nonzero says do unsigned division. */ |
| |
| int |
| div_and_round_double (code, uns, |
| lnum_orig, hnum_orig, lden_orig, hden_orig, |
| lquo, hquo, lrem, hrem) |
| enum tree_code code; |
| int uns; |
| HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */ |
| HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */ |
| HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem; |
| { |
| int quo_neg = 0; |
| HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */ |
| HOST_WIDE_INT den[4], quo[4]; |
| register int i, j; |
| unsigned HOST_WIDE_INT work; |
| register unsigned HOST_WIDE_INT carry = 0; |
| HOST_WIDE_INT lnum = lnum_orig; |
| HOST_WIDE_INT hnum = hnum_orig; |
| HOST_WIDE_INT lden = lden_orig; |
| HOST_WIDE_INT hden = hden_orig; |
| int overflow = 0; |
| |
| if ((hden == 0) && (lden == 0)) |
| abort (); |
| |
| /* calculate quotient sign and convert operands to unsigned. */ |
| if (!uns) |
| { |
| if (hnum < 0) |
| { |
| quo_neg = ~ quo_neg; |
| /* (minimum integer) / (-1) is the only overflow case. */ |
| if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1) |
| overflow = 1; |
| } |
| if (hden < 0) |
| { |
| quo_neg = ~ quo_neg; |
| neg_double (lden, hden, &lden, &hden); |
| } |
| } |
| |
| if (hnum == 0 && hden == 0) |
| { /* single precision */ |
| *hquo = *hrem = 0; |
| /* This unsigned division rounds toward zero. */ |
| *lquo = lnum / (unsigned HOST_WIDE_INT) lden; |
| goto finish_up; |
| } |
| |
| if (hnum == 0) |
| { /* trivial case: dividend < divisor */ |
| /* hden != 0 already checked. */ |
| *hquo = *lquo = 0; |
| *hrem = hnum; |
| *lrem = lnum; |
| goto finish_up; |
| } |
| |
| bzero ((char *) quo, sizeof quo); |
| |
| bzero ((char *) num, sizeof num); /* to zero 9th element */ |
| bzero ((char *) den, sizeof den); |
| |
| encode (num, lnum, hnum); |
| encode (den, lden, hden); |
| |
| /* Special code for when the divisor < BASE. */ |
| if (hden == 0 && lden < BASE) |
| { |
| /* hnum != 0 already checked. */ |
| for (i = 4 - 1; i >= 0; i--) |
| { |
| work = num[i] + carry * BASE; |
| quo[i] = work / (unsigned HOST_WIDE_INT) lden; |
| carry = work % (unsigned HOST_WIDE_INT) lden; |
| } |
| } |
| else |
| { |
| /* Full double precision division, |
| with thanks to Don Knuth's "Seminumerical Algorithms". */ |
| int num_hi_sig, den_hi_sig; |
| unsigned HOST_WIDE_INT quo_est, scale; |
| |
| /* Find the highest non-zero divisor digit. */ |
| for (i = 4 - 1; ; i--) |
| if (den[i] != 0) { |
| den_hi_sig = i; |
| break; |
| } |
| |
| /* Insure that the first digit of the divisor is at least BASE/2. |
| This is required by the quotient digit estimation algorithm. */ |
| |
| scale = BASE / (den[den_hi_sig] + 1); |
| if (scale > 1) { /* scale divisor and dividend */ |
| carry = 0; |
| for (i = 0; i <= 4 - 1; i++) { |
| work = (num[i] * scale) + carry; |
| num[i] = LOWPART (work); |
| carry = HIGHPART (work); |
| } num[4] = carry; |
| carry = 0; |
| for (i = 0; i <= 4 - 1; i++) { |
| work = (den[i] * scale) + carry; |
| den[i] = LOWPART (work); |
| carry = HIGHPART (work); |
| if (den[i] != 0) den_hi_sig = i; |
| } |
| } |
| |
| num_hi_sig = 4; |
| |
| /* Main loop */ |
| for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) { |
| /* guess the next quotient digit, quo_est, by dividing the first |
| two remaining dividend digits by the high order quotient digit. |
| quo_est is never low and is at most 2 high. */ |
| unsigned HOST_WIDE_INT tmp; |
| |
| num_hi_sig = i + den_hi_sig + 1; |
| work = num[num_hi_sig] * BASE + num[num_hi_sig - 1]; |
| if (num[num_hi_sig] != den[den_hi_sig]) |
| quo_est = work / den[den_hi_sig]; |
| else |
| quo_est = BASE - 1; |
| |
| /* refine quo_est so it's usually correct, and at most one high. */ |
| tmp = work - quo_est * den[den_hi_sig]; |
| if (tmp < BASE |
| && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2])) |
| quo_est--; |
| |
| /* Try QUO_EST as the quotient digit, by multiplying the |
| divisor by QUO_EST and subtracting from the remaining dividend. |
| Keep in mind that QUO_EST is the I - 1st digit. */ |
| |
| carry = 0; |
| for (j = 0; j <= den_hi_sig; j++) |
| { |
| work = quo_est * den[j] + carry; |
| carry = HIGHPART (work); |
| work = num[i + j] - LOWPART (work); |
| num[i + j] = LOWPART (work); |
| carry += HIGHPART (work) != 0; |
| } |
| |
| /* if quo_est was high by one, then num[i] went negative and |
| we need to correct things. */ |
| |
| if (num[num_hi_sig] < carry) |
| { |
| quo_est--; |
| carry = 0; /* add divisor back in */ |
| for (j = 0; j <= den_hi_sig; j++) |
| { |
| work = num[i + j] + den[j] + carry; |
| carry = HIGHPART (work); |
| num[i + j] = LOWPART (work); |
| } |
| num [num_hi_sig] += carry; |
| } |
| |
| /* store the quotient digit. */ |
| quo[i] = quo_est; |
| } |
| } |
| |
| decode (quo, lquo, hquo); |
| |
| finish_up: |
| /* if result is negative, make it so. */ |
| if (quo_neg) |
| neg_double (*lquo, *hquo, lquo, hquo); |
| |
| /* compute trial remainder: rem = num - (quo * den) */ |
| mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); |
| neg_double (*lrem, *hrem, lrem, hrem); |
| add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); |
| |
| switch (code) |
| { |
| case TRUNC_DIV_EXPR: |
| case TRUNC_MOD_EXPR: /* round toward zero */ |
| case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */ |
| return overflow; |
| |
| case FLOOR_DIV_EXPR: |
| case FLOOR_MOD_EXPR: /* round toward negative infinity */ |
| if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */ |
| { |
| /* quo = quo - 1; */ |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, |
| lquo, hquo); |
| } |
| else return overflow; |
| break; |
| |
| case CEIL_DIV_EXPR: |
| case CEIL_MOD_EXPR: /* round toward positive infinity */ |
| if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */ |
| { |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, |
| lquo, hquo); |
| } |
| else return overflow; |
| break; |
| |
| case ROUND_DIV_EXPR: |
| case ROUND_MOD_EXPR: /* round to closest integer */ |
| { |
| HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem; |
| HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice; |
| |
| /* get absolute values */ |
| if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem); |
| if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den); |
| |
| /* if (2 * abs (lrem) >= abs (lden)) */ |
| mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0, |
| labs_rem, habs_rem, <wice, &htwice); |
| if (((unsigned HOST_WIDE_INT) habs_den |
| < (unsigned HOST_WIDE_INT) htwice) |
| || (((unsigned HOST_WIDE_INT) habs_den |
| == (unsigned HOST_WIDE_INT) htwice) |
| && ((HOST_WIDE_INT unsigned) labs_den |
| < (unsigned HOST_WIDE_INT) ltwice))) |
| { |
| if (*hquo < 0) |
| /* quo = quo - 1; */ |
| add_double (*lquo, *hquo, |
| (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); |
| else |
| /* quo = quo + 1; */ |
| add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, |
| lquo, hquo); |
| } |
| else return overflow; |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| /* compute true remainder: rem = num - (quo * den) */ |
| mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); |
| neg_double (*lrem, *hrem, lrem, hrem); |
| add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); |
| return overflow; |
| } |
| |
| #ifndef REAL_ARITHMETIC |
| /* Effectively truncate a real value to represent the nearest possible value |
| in a narrower mode. The result is actually represented in the same data |
| type as the argument, but its value is usually different. |
| |
| A trap may occur during the FP operations and it is the responsibility |
| of the calling function to have a handler established. */ |
| |
| REAL_VALUE_TYPE |
| real_value_truncate (mode, arg) |
| enum machine_mode mode; |
| REAL_VALUE_TYPE arg; |
| { |
| return REAL_VALUE_TRUNCATE (mode, arg); |
| } |
| |
| #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT |
| |
| /* Check for infinity in an IEEE double precision number. */ |
| |
| int |
| target_isinf (x) |
| REAL_VALUE_TYPE x; |
| { |
| /* The IEEE 64-bit double format. */ |
| union { |
| REAL_VALUE_TYPE d; |
| struct { |
| unsigned sign : 1; |
| unsigned exponent : 11; |
| unsigned mantissa1 : 20; |
| unsigned mantissa2; |
| } little_endian; |
| struct { |
| unsigned mantissa2; |
| unsigned mantissa1 : 20; |
| unsigned exponent : 11; |
| unsigned sign : 1; |
| } big_endian; |
| } u; |
| |
| u.d = dconstm1; |
| if (u.big_endian.sign == 1) |
| { |
| u.d = x; |
| return (u.big_endian.exponent == 2047 |
| && u.big_endian.mantissa1 == 0 |
| && u.big_endian.mantissa2 == 0); |
| } |
| else |
| { |
| u.d = x; |
| return (u.little_endian.exponent == 2047 |
| && u.little_endian.mantissa1 == 0 |
| && u.little_endian.mantissa2 == 0); |
| } |
| } |
| |
| /* Check whether an IEEE double precision number is a NaN. */ |
| |
| int |
| target_isnan (x) |
| REAL_VALUE_TYPE x; |
| { |
| /* The IEEE 64-bit double format. */ |
| union { |
| REAL_VALUE_TYPE d; |
| struct { |
| unsigned sign : 1; |
| unsigned exponent : 11; |
| unsigned mantissa1 : 20; |
| unsigned mantissa2; |
| } little_endian; |
| struct { |
| unsigned mantissa2; |
| unsigned mantissa1 : 20; |
| unsigned exponent : 11; |
| unsigned sign : 1; |
| } big_endian; |
| } u; |
| |
| u.d = dconstm1; |
| if (u.big_endian.sign == 1) |
| { |
| u.d = x; |
| return (u.big_endian.exponent == 2047 |
| && (u.big_endian.mantissa1 != 0 |
| || u.big_endian.mantissa2 != 0)); |
| } |
| else |
| { |
| u.d = x; |
| return (u.little_endian.exponent == 2047 |
| && (u.little_endian.mantissa1 != 0 |
| || u.little_endian.mantissa2 != 0)); |
| } |
| } |
| |
| /* Check for a negative IEEE double precision number. */ |
| |
| int |
| target_negative (x) |
| REAL_VALUE_TYPE x; |
| { |
| /* The IEEE 64-bit double format. */ |
| union { |
| REAL_VALUE_TYPE d; |
| struct { |
| unsigned sign : 1; |
| unsigned exponent : 11; |
| unsigned mantissa1 : 20; |
| unsigned mantissa2; |
| } little_endian; |
| struct { |
| unsigned mantissa2; |
| unsigned mantissa1 : 20; |
| unsigned exponent : 11; |
| unsigned sign : 1; |
| } big_endian; |
| } u; |
| |
| u.d = dconstm1; |
| if (u.big_endian.sign == 1) |
| { |
| u.d = x; |
| return u.big_endian.sign; |
| } |
| else |
| { |
| u.d = x; |
| return u.little_endian.sign; |
| } |
| } |
| #else /* Target not IEEE */ |
| |
| /* Let's assume other float formats don't have infinity. |
| (This can be overridden by redefining REAL_VALUE_ISINF.) */ |
| |
| target_isinf (x) |
| REAL_VALUE_TYPE x; |
| { |
| return 0; |
| } |
| |
| /* Let's assume other float formats don't have NaNs. |
| (This can be overridden by redefining REAL_VALUE_ISNAN.) */ |
| |
| target_isnan (x) |
| REAL_VALUE_TYPE x; |
| { |
| return 0; |
| } |
| |
| /* Let's assume other float formats don't have minus zero. |
| (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */ |
| |
| target_negative (x) |
| REAL_VALUE_TYPE x; |
| { |
| return x < 0; |
| } |
| #endif /* Target not IEEE */ |
| |
| /* Try to change R into its exact multiplicative inverse in machine mode |
| MODE. Return nonzero function value if successful. */ |
| |
| int |
| exact_real_inverse (mode, r) |
| enum machine_mode mode; |
| REAL_VALUE_TYPE *r; |
| { |
| union |
| { |
| double d; |
| unsigned short i[4]; |
| }x, t, y; |
| int i; |
| |
| /* Usually disable if bounds checks are not reliable. */ |
| if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float) |
| return 0; |
| |
| /* Set array index to the less significant bits in the unions, depending |
| on the endian-ness of the host doubles. |
| Disable if insufficient information on the data structure. */ |
| #if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT |
| return 0; |
| #else |
| #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT |
| #define K 2 |
| #else |
| #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT |
| #define K 2 |
| #else |
| #define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN) |
| #endif |
| #endif |
| #endif |
| |
| if (setjmp (float_error)) |
| { |
| /* Don't do the optimization if there was an arithmetic error. */ |
| fail: |
| set_float_handler (NULL_PTR); |
| return 0; |
| } |
| set_float_handler (float_error); |
| |
| /* Domain check the argument. */ |
| x.d = *r; |
| if (x.d == 0.0) |
| goto fail; |
| |
| #ifdef REAL_INFINITY |
| if (REAL_VALUE_ISINF (x.d) || REAL_VALUE_ISNAN (x.d)) |
| goto fail; |
| #endif |
| |
| /* Compute the reciprocal and check for numerical exactness. |
| It is unnecessary to check all the significand bits to determine |
| whether X is a power of 2. If X is not, then it is impossible for |
| the bottom half significand of both X and 1/X to be all zero bits. |
| Hence we ignore the data structure of the top half and examine only |
| the low order bits of the two significands. */ |
| t.d = 1.0 / x.d; |
| if (x.i[K] != 0 || x.i[K + 1] != 0 || t.i[K] != 0 || t.i[K + 1] != 0) |
| goto fail; |
| |
| /* Truncate to the required mode and range-check the result. */ |
| y.d = REAL_VALUE_TRUNCATE (mode, t.d); |
| #ifdef CHECK_FLOAT_VALUE |
| i = 0; |
| if (CHECK_FLOAT_VALUE (mode, y.d, i)) |
| goto fail; |
| #endif |
| |
| /* Fail if truncation changed the value. */ |
| if (y.d != t.d || y.d == 0.0) |
| goto fail; |
| |
| #ifdef REAL_INFINITY |
| if (REAL_VALUE_ISINF (y.d) || REAL_VALUE_ISNAN (y.d)) |
| goto fail; |
| #endif |
| |
| /* Output the reciprocal and return success flag. */ |
| set_float_handler (NULL_PTR); |
| *r = y.d; |
| return 1; |
| } |
| #endif /* no REAL_ARITHMETIC */ |
| |
| /* Split a tree IN into a constant and a variable part |
| that could be combined with CODE to make IN. |
| CODE must be a commutative arithmetic operation. |
| Store the constant part into *CONP and the variable in &VARP. |
| Return 1 if this was done; zero means the tree IN did not decompose |
| this way. |
| |
| If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. |
| Therefore, we must tell the caller whether the variable part |
| was subtracted. We do this by storing 1 or -1 into *VARSIGNP. |
| The value stored is the coefficient for the variable term. |
| The constant term we return should always be added; |
| we negate it if necessary. */ |
| |
| static int |
| split_tree (in, code, varp, conp, varsignp) |
| tree in; |
| enum tree_code code; |
| tree *varp, *conp; |
| int *varsignp; |
| { |
| register tree outtype = TREE_TYPE (in); |
| *varp = 0; |
| *conp = 0; |
| |
| /* Strip any conversions that don't change the machine mode. */ |
| while ((TREE_CODE (in) == NOP_EXPR |
| || TREE_CODE (in) == CONVERT_EXPR) |
| && (TYPE_MODE (TREE_TYPE (in)) |
| == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0))))) |
| in = TREE_OPERAND (in, 0); |
| |
| if (TREE_CODE (in) == code |
| || (! FLOAT_TYPE_P (TREE_TYPE (in)) |
| /* We can associate addition and subtraction together |
| (even though the C standard doesn't say so) |
| for integers because the value is not affected. |
| For reals, the value might be affected, so we can't. */ |
| && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR) |
| || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR)))) |
| { |
| enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0)); |
| if (code == INTEGER_CST) |
| { |
| *conp = TREE_OPERAND (in, 0); |
| *varp = TREE_OPERAND (in, 1); |
| if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype) |
| && TREE_TYPE (*varp) != outtype) |
| *varp = convert (outtype, *varp); |
| *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1; |
| return 1; |
| } |
| if (TREE_CONSTANT (TREE_OPERAND (in, 1))) |
| { |
| *conp = TREE_OPERAND (in, 1); |
| *varp = TREE_OPERAND (in, 0); |
| *varsignp = 1; |
| if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype) |
| && TREE_TYPE (*varp) != outtype) |
| *varp = convert (outtype, *varp); |
| if (TREE_CODE (in) == MINUS_EXPR) |
| { |
| /* If operation is subtraction and constant is second, |
| must negate it to get an additive constant. |
| And this cannot be done unless it is a manifest constant. |
| It could also be the address of a static variable. |
| We cannot negate that, so give up. */ |
| if (TREE_CODE (*conp) == INTEGER_CST) |
| /* Subtracting from integer_zero_node loses for long long. */ |
| *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp)); |
| else |
| return 0; |
| } |
| return 1; |
| } |
| if (TREE_CONSTANT (TREE_OPERAND (in, 0))) |
| { |
| *conp = TREE_OPERAND (in, 0); |
| *varp = TREE_OPERAND (in, 1); |
| if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype) |
| && TREE_TYPE (*varp) != outtype) |
| *varp = convert (outtype, *varp); |
| *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1; |
| return 1; |
| } |
| } |
| return 0; |
| } |
| |
| /* Combine two constants ARG1 and ARG2 under operation CODE |
| to produce a new constant. |
| We assume ARG1 and ARG2 have the same data type, |
| or at least are the same kind of constant and the same machine mode. |
| |
| If NOTRUNC is nonzero, do not truncate the result to fit the data type. */ |
| |
| static tree |
| const_binop (code, arg1, arg2, notrunc) |
| enum tree_code code; |
| register tree arg1, arg2; |
| int notrunc; |
| { |
| STRIP_NOPS (arg1); STRIP_NOPS (arg2); |
| |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| { |
| register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1); |
| register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1); |
| HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2); |
| HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2); |
| HOST_WIDE_INT low, hi; |
| HOST_WIDE_INT garbagel, garbageh; |
| register tree t; |
| int uns = TREE_UNSIGNED (TREE_TYPE (arg1)); |
| int overflow = 0; |
| int no_overflow = 0; |
| |
| switch (code) |
| { |
| case BIT_IOR_EXPR: |
| low = int1l | int2l, hi = int1h | int2h; |
| break; |
| |
| case BIT_XOR_EXPR: |
| low = int1l ^ int2l, hi = int1h ^ int2h; |
| break; |
| |
| case BIT_AND_EXPR: |
| low = int1l & int2l, hi = int1h & int2h; |
| break; |
| |
| case BIT_ANDTC_EXPR: |
| low = int1l & ~int2l, hi = int1h & ~int2h; |
| break; |
| |
| case RSHIFT_EXPR: |
| int2l = - int2l; |
| case LSHIFT_EXPR: |
| /* It's unclear from the C standard whether shifts can overflow. |
| The following code ignores overflow; perhaps a C standard |
| interpretation ruling is needed. */ |
| lshift_double (int1l, int1h, int2l, |
| TYPE_PRECISION (TREE_TYPE (arg1)), |
| &low, &hi, |
| !uns); |
| no_overflow = 1; |
| break; |
| |
| case RROTATE_EXPR: |
| int2l = - int2l; |
| case LROTATE_EXPR: |
| lrotate_double (int1l, int1h, int2l, |
| TYPE_PRECISION (TREE_TYPE (arg1)), |
| &low, &hi); |
| break; |
| |
| case PLUS_EXPR: |
| overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi); |
| break; |
| |
| case MINUS_EXPR: |
| neg_double (int2l, int2h, &low, &hi); |
| add_double (int1l, int1h, low, hi, &low, &hi); |
| overflow = overflow_sum_sign (hi, int2h, int1h); |
| break; |
| |
| case MULT_EXPR: |
| overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi); |
| break; |
| |
| case TRUNC_DIV_EXPR: |
| case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: |
| case EXACT_DIV_EXPR: |
| /* This is a shortcut for a common special case. */ |
| if (int2h == 0 && int2l > 0 |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && ! TREE_CONSTANT_OVERFLOW (arg2) |
| && int1h == 0 && int1l >= 0) |
| { |
| if (code == CEIL_DIV_EXPR) |
| int1l += int2l - 1; |
| low = int1l / int2l, hi = 0; |
| break; |
| } |
| |
| /* ... fall through ... */ |
| |
| case ROUND_DIV_EXPR: |
| if (int2h == 0 && int2l == 1) |
| { |
| low = int1l, hi = int1h; |
| break; |
| } |
| if (int1l == int2l && int1h == int2h |
| && ! (int1l == 0 && int1h == 0)) |
| { |
| low = 1, hi = 0; |
| break; |
| } |
| overflow = div_and_round_double (code, uns, |
| int1l, int1h, int2l, int2h, |
| &low, &hi, &garbagel, &garbageh); |
| break; |
| |
| case TRUNC_MOD_EXPR: |
| case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: |
| /* This is a shortcut for a common special case. */ |
| if (int2h == 0 && int2l > 0 |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && ! TREE_CONSTANT_OVERFLOW (arg2) |
| && int1h == 0 && int1l >= 0) |
| { |
| if (code == CEIL_MOD_EXPR) |
| int1l += int2l - 1; |
| low = int1l % int2l, hi = 0; |
| break; |
| } |
| |
| /* ... fall through ... */ |
| |
| case ROUND_MOD_EXPR: |
| overflow = div_and_round_double (code, uns, |
| int1l, int1h, int2l, int2h, |
| &garbagel, &garbageh, &low, &hi); |
| break; |
| |
| case MIN_EXPR: |
| case MAX_EXPR: |
| if (uns) |
| { |
| low = (((unsigned HOST_WIDE_INT) int1h |
| < (unsigned HOST_WIDE_INT) int2h) |
| || (((unsigned HOST_WIDE_INT) int1h |
| == (unsigned HOST_WIDE_INT) int2h) |
| && ((unsigned HOST_WIDE_INT) int1l |
| < (unsigned HOST_WIDE_INT) int2l))); |
| } |
| else |
| { |
| low = ((int1h < int2h) |
| || ((int1h == int2h) |
| && ((unsigned HOST_WIDE_INT) int1l |
| < (unsigned HOST_WIDE_INT) int2l))); |
| } |
| if (low == (code == MIN_EXPR)) |
| low = int1l, hi = int1h; |
| else |
| low = int2l, hi = int2h; |
| break; |
| |
| default: |
| abort (); |
| } |
| got_it: |
| if (TREE_TYPE (arg1) == sizetype && hi == 0 |
| && low >= 0 && low <= TREE_INT_CST_LOW (TYPE_MAX_VALUE (sizetype)) |
| && ! overflow |
| && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2)) |
| t = size_int (low); |
| else |
| { |
| t = build_int_2 (low, hi); |
| TREE_TYPE (t) = TREE_TYPE (arg1); |
| } |
| |
| TREE_OVERFLOW (t) |
| = ((notrunc ? !uns && overflow |
| : force_fit_type (t, overflow && !uns) && ! no_overflow) |
| | TREE_OVERFLOW (arg1) |
| | TREE_OVERFLOW (arg2)); |
| TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t) |
| | TREE_CONSTANT_OVERFLOW (arg1) |
| | TREE_CONSTANT_OVERFLOW (arg2)); |
| return t; |
| } |
| #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
| if (TREE_CODE (arg1) == REAL_CST) |
| { |
| REAL_VALUE_TYPE d1; |
| REAL_VALUE_TYPE d2; |
| int overflow = 0; |
| REAL_VALUE_TYPE value; |
| tree t; |
| |
| d1 = TREE_REAL_CST (arg1); |
| d2 = TREE_REAL_CST (arg2); |
| |
| /* If either operand is a NaN, just return it. Otherwise, set up |
| for floating-point trap; we return an overflow. */ |
| if (REAL_VALUE_ISNAN (d1)) |
| return arg1; |
| else if (REAL_VALUE_ISNAN (d2)) |
| return arg2; |
| else if (setjmp (float_error)) |
| { |
| t = copy_node (arg1); |
| overflow = 1; |
| goto got_float; |
| } |
| |
| set_float_handler (float_error); |
| |
| #ifdef REAL_ARITHMETIC |
| REAL_ARITHMETIC (value, code, d1, d2); |
| #else |
| switch (code) |
| { |
| case PLUS_EXPR: |
| value = d1 + d2; |
| break; |
| |
| case MINUS_EXPR: |
| value = d1 - d2; |
| break; |
| |
| case MULT_EXPR: |
| value = d1 * d2; |
| break; |
| |
| case RDIV_EXPR: |
| #ifndef REAL_INFINITY |
| if (d2 == 0) |
| abort (); |
| #endif |
| |
| value = d1 / d2; |
| break; |
| |
| case MIN_EXPR: |
| value = MIN (d1, d2); |
| break; |
| |
| case MAX_EXPR: |
| value = MAX (d1, d2); |
| break; |
| |
| default: |
| abort (); |
| } |
| #endif /* no REAL_ARITHMETIC */ |
| t = build_real (TREE_TYPE (arg1), |
| real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value)); |
| got_float: |
| set_float_handler (NULL_PTR); |
| |
| TREE_OVERFLOW (t) |
| = (force_fit_type (t, overflow) |
| | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) |
| | TREE_CONSTANT_OVERFLOW (arg1) |
| | TREE_CONSTANT_OVERFLOW (arg2); |
| return t; |
| } |
| #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */ |
| if (TREE_CODE (arg1) == COMPLEX_CST) |
| { |
| register tree type = TREE_TYPE (arg1); |
| register tree r1 = TREE_REALPART (arg1); |
| register tree i1 = TREE_IMAGPART (arg1); |
| register tree r2 = TREE_REALPART (arg2); |
| register tree i2 = TREE_IMAGPART (arg2); |
| register tree t; |
| |
| switch (code) |
| { |
| case PLUS_EXPR: |
| t = build_complex (type, |
| const_binop (PLUS_EXPR, r1, r2, notrunc), |
| const_binop (PLUS_EXPR, i1, i2, notrunc)); |
| break; |
| |
| case MINUS_EXPR: |
| t = build_complex (type, |
| const_binop (MINUS_EXPR, r1, r2, notrunc), |
| const_binop (MINUS_EXPR, i1, i2, notrunc)); |
| break; |
| |
| case MULT_EXPR: |
| t = build_complex (type, |
| const_binop (MINUS_EXPR, |
| const_binop (MULT_EXPR, |
| r1, r2, notrunc), |
| const_binop (MULT_EXPR, |
| i1, i2, notrunc), |
| notrunc), |
| const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, |
| r1, i2, notrunc), |
| const_binop (MULT_EXPR, |
| i1, r2, notrunc), |
| notrunc)); |
| break; |
| |
| case RDIV_EXPR: |
| { |
| register tree magsquared |
| = const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, r2, r2, notrunc), |
| const_binop (MULT_EXPR, i2, i2, notrunc), |
| notrunc); |
| |
| t = build_complex (type, |
| const_binop |
| (INTEGRAL_TYPE_P (TREE_TYPE (r1)) |
| ? TRUNC_DIV_EXPR : RDIV_EXPR, |
| const_binop (PLUS_EXPR, |
| const_binop (MULT_EXPR, r1, r2, |
| notrunc), |
| const_binop (MULT_EXPR, i1, i2, |
| notrunc), |
| notrunc), |
| magsquared, notrunc), |
| const_binop |
| (INTEGRAL_TYPE_P (TREE_TYPE (r1)) |
| ? TRUNC_DIV_EXPR : RDIV_EXPR, |
| const_binop (MINUS_EXPR, |
| const_binop (MULT_EXPR, i1, r2, |
| notrunc), |
| const_binop (MULT_EXPR, r1, i2, |
| notrunc), |
| notrunc), |
| magsquared, notrunc)); |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| return t; |
| } |
| return 0; |
| } |
| |
| /* Return an INTEGER_CST with value V and type from `sizetype'. */ |
| |
| tree |
| size_int (number) |
| unsigned HOST_WIDE_INT number; |
| { |
| register tree t; |
| /* Type-size nodes already made for small sizes. */ |
| static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1]; |
| |
| if (number < 2*HOST_BITS_PER_WIDE_INT + 1 |
| && size_table[number] != 0) |
| return size_table[number]; |
| if (number < 2*HOST_BITS_PER_WIDE_INT + 1) |
| { |
| push_obstacks_nochange (); |
| /* Make this a permanent node. */ |
| end_temporary_allocation (); |
| t = build_int_2 (number, 0); |
| TREE_TYPE (t) = sizetype; |
| size_table[number] = t; |
| pop_obstacks (); |
| } |
| else |
| { |
| t = build_int_2 (number, 0); |
| TREE_TYPE (t) = sizetype; |
| TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0); |
| } |
| return t; |
| } |
| |
| /* Combine operands OP1 and OP2 with arithmetic operation CODE. |
| CODE is a tree code. Data type is taken from `sizetype', |
| If the operands are constant, so is the result. */ |
| |
| tree |
| size_binop (code, arg0, arg1) |
| enum tree_code code; |
| tree arg0, arg1; |
| { |
| /* Handle the special case of two integer constants faster. */ |
| if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST) |
| { |
| /* And some specific cases even faster than that. */ |
| if (code == PLUS_EXPR && integer_zerop (arg0)) |
| return arg1; |
| else if ((code == MINUS_EXPR || code == PLUS_EXPR) |
| && integer_zerop (arg1)) |
| return arg0; |
| else if (code == MULT_EXPR && integer_onep (arg0)) |
| return arg1; |
| |
| /* Handle general case of two integer constants. */ |
| return const_binop (code, arg0, arg1, 0); |
| } |
| |
| if (arg0 == error_mark_node || arg1 == error_mark_node) |
| return error_mark_node; |
| |
| return fold (build (code, sizetype, arg0, arg1)); |
| } |
| |
| /* Given T, a tree representing type conversion of ARG1, a constant, |
| return a constant tree representing the result of conversion. */ |
| |
| static tree |
| fold_convert (t, arg1) |
| register tree t; |
| register tree arg1; |
| { |
| register tree type = TREE_TYPE (t); |
| int overflow = 0; |
| |
| if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type)) |
| { |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| { |
| /* If we would build a constant wider than GCC supports, |
| leave the conversion unfolded. */ |
| if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT) |
| return t; |
| |
| /* Given an integer constant, make new constant with new type, |
| appropriately sign-extended or truncated. */ |
| t = build_int_2 (TREE_INT_CST_LOW (arg1), |
| TREE_INT_CST_HIGH (arg1)); |
| TREE_TYPE (t) = type; |
| /* Indicate an overflow if (1) ARG1 already overflowed, |
| or (2) force_fit_type indicates an overflow. |
| Tell force_fit_type that an overflow has already occurred |
| if ARG1 is a too-large unsigned value and T is signed. */ |
| TREE_OVERFLOW (t) |
| = (TREE_OVERFLOW (arg1) |
| | force_fit_type (t, |
| (TREE_INT_CST_HIGH (arg1) < 0 |
| & (TREE_UNSIGNED (type) |
| < TREE_UNSIGNED (TREE_TYPE (arg1)))))); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| } |
| #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
| else if (TREE_CODE (arg1) == REAL_CST) |
| { |
| /* Don't initialize these, use assignments. |
| Initialized local aggregates don't work on old compilers. */ |
| REAL_VALUE_TYPE x; |
| REAL_VALUE_TYPE l; |
| REAL_VALUE_TYPE u; |
| tree type1 = TREE_TYPE (arg1); |
| |
| x = TREE_REAL_CST (arg1); |
| l = real_value_from_int_cst (type1, TYPE_MIN_VALUE (type)); |
| u = real_value_from_int_cst (type1, TYPE_MAX_VALUE (type)); |
| /* See if X will be in range after truncation towards 0. |
| To compensate for truncation, move the bounds away from 0, |
| but reject if X exactly equals the adjusted bounds. */ |
| #ifdef REAL_ARITHMETIC |
| REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1); |
| REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1); |
| #else |
| l--; |
| u++; |
| #endif |
| /* If X is a NaN, use zero instead and show we have an overflow. |
| Otherwise, range check. */ |
| if (REAL_VALUE_ISNAN (x)) |
| overflow = 1, x = dconst0; |
| else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u))) |
| overflow = 1; |
| |
| #ifndef REAL_ARITHMETIC |
| { |
| HOST_WIDE_INT low, high; |
| HOST_WIDE_INT half_word |
| = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2); |
| |
| if (x < 0) |
| x = -x; |
| |
| high = (HOST_WIDE_INT) (x / half_word / half_word); |
| x -= (REAL_VALUE_TYPE) high * half_word * half_word; |
| if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2) |
| { |
| low = x - (REAL_VALUE_TYPE) half_word * half_word / 2; |
| low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1); |
| } |
| else |
| low = (HOST_WIDE_INT) x; |
| if (TREE_REAL_CST (arg1) < 0) |
| neg_double (low, high, &low, &high); |
| t = build_int_2 (low, high); |
| } |
| #else |
| { |
| HOST_WIDE_INT low, high; |
| REAL_VALUE_TO_INT (&low, &high, x); |
| t = build_int_2 (low, high); |
| } |
| #endif |
| TREE_TYPE (t) = type; |
| TREE_OVERFLOW (t) |
| = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| } |
| #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */ |
| TREE_TYPE (t) = type; |
| } |
| else if (TREE_CODE (type) == REAL_TYPE) |
| { |
| #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
| if (TREE_CODE (arg1) == INTEGER_CST) |
| return build_real_from_int_cst (type, arg1); |
| #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */ |
| if (TREE_CODE (arg1) == REAL_CST) |
| { |
| if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))) |
| { |
| t = arg1; |
| TREE_TYPE (arg1) = type; |
| return t; |
| } |
| else if (setjmp (float_error)) |
| { |
| overflow = 1; |
| t = copy_node (arg1); |
| goto got_it; |
| } |
| set_float_handler (float_error); |
| |
| t = build_real (type, real_value_truncate (TYPE_MODE (type), |
| TREE_REAL_CST (arg1))); |
| set_float_handler (NULL_PTR); |
| |
| got_it: |
| TREE_OVERFLOW (t) |
| = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow); |
| TREE_CONSTANT_OVERFLOW (t) |
| = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); |
| return t; |
| } |
| } |
| TREE_CONSTANT (t) = 1; |
| return t; |
| } |
| |
| /* Return an expr equal to X but certainly not valid as an lvalue. |
| Also make sure it is not valid as an null pointer constant. */ |
| |
| tree |
| non_lvalue (x) |
| tree x; |
| { |
| tree result; |
| |
| /* These things are certainly not lvalues. */ |
| if (TREE_CODE (x) == NON_LVALUE_EXPR |
| || TREE_CODE (x) == INTEGER_CST |
| || TREE_CODE (x) == REAL_CST |
| || TREE_CODE (x) == STRING_CST |
| || TREE_CODE (x) == ADDR_EXPR) |
| { |
| if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x)) |
| { |
| /* Use NOP_EXPR instead of NON_LVALUE_EXPR |
| so convert_for_assignment won't strip it. |
| This is so this 0 won't be treated as a null pointer constant. */ |
| result = build1 (NOP_EXPR, TREE_TYPE (x), x); |
| TREE_CONSTANT (result) = TREE_CONSTANT (x); |
| return result; |
| } |
| return x; |
| } |
| |
| result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x); |
| TREE_CONSTANT (result) = TREE_CONSTANT (x); |
| return result; |
| } |
| |
| /* Nonzero means lvalues are limited to those valid in pedantic ANSI C. |
| Zero means allow extended lvalues. */ |
| |
| int pedantic_lvalues; |
| |
| /* When pedantic, return an expr equal to X but certainly not valid as a |
| pedantic lvalue. Otherwise, return X. */ |
| |
| tree |
| pedantic_non_lvalue (x) |
| tree x; |
| { |
| if (pedantic_lvalues) |
| return non_lvalue (x); |
| else |
| return x; |
| } |
| |
| /* Given a tree comparison code, return the code that is the logical inverse |
| of the given code. It is not safe to do this for floating-point |
| comparisons, except for NE_EXPR and EQ_EXPR. */ |
| |
| static enum tree_code |
| invert_tree_comparison (code) |
| enum tree_code code; |
| { |
| switch (code) |
| { |
| case EQ_EXPR: |
| return NE_EXPR; |
| case NE_EXPR: |
| return EQ_EXPR; |
| case GT_EXPR: |
| return LE_EXPR; |
| case GE_EXPR: |
| return LT_EXPR; |
| case LT_EXPR: |
| return GE_EXPR; |
| case LE_EXPR: |
| return GT_EXPR; |
| default: |
| abort (); |
| } |
| } |
| |
| /* Similar, but return the comparison that results if the operands are |
| swapped. This is safe for floating-point. */ |
| |
| static enum tree_code |
| swap_tree_comparison (code) |
| enum tree_code code; |
| { |
| switch (code) |
| { |
| case EQ_EXPR: |
| case NE_EXPR: |
| return code; |
| case GT_EXPR: |
| return LT_EXPR; |
| case GE_EXPR: |
| return LE_EXPR; |
| case LT_EXPR: |
| return GT_EXPR; |
| case LE_EXPR: |
| return GE_EXPR; |
| default: |
| abort (); |
| } |
| } |
| |
| /* Return nonzero if CODE is a tree code that represents a truth value. */ |
| |
| static int |
| truth_value_p (code) |
| enum tree_code code; |
| { |
| return (TREE_CODE_CLASS (code) == '<' |
| || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR |
| || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR |
| || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR); |
| } |
| |
| /* Return nonzero if two operands are necessarily equal. |
| If ONLY_CONST is non-zero, only return non-zero for constants. |
| This function tests whether the operands are indistinguishable; |
| it does not test whether they are equal using C's == operation. |
| The distinction is important for IEEE floating point, because |
| (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and |
| (2) two NaNs may be indistinguishable, but NaN!=NaN. */ |
| |
| int |
| operand_equal_p (arg0, arg1, only_const) |
| tree arg0, arg1; |
| int only_const; |
| { |
| /* If both types don't have the same signedness, then we can't consider |
| them equal. We must check this before the STRIP_NOPS calls |
| because they may change the signedness of the arguments. */ |
| if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1))) |
| return 0; |
| |
| STRIP_NOPS (arg0); |
| STRIP_NOPS (arg1); |
| |
| if (TREE_CODE (arg0) != TREE_CODE (arg1) |
| /* This is needed for conversions and for COMPONENT_REF. |
| Might as well play it safe and always test this. */ |
| || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1))) |
| return 0; |
| |
| /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal. |
| We don't care about side effects in that case because the SAVE_EXPR |
| takes care of that for us. In all other cases, two expressions are |
| equal if they have no side effects. If we have two identical |
| expressions with side effects that should be treated the same due |
| to the only side effects being identical SAVE_EXPR's, that will |
| be detected in the recursive calls below. */ |
| if (arg0 == arg1 && ! only_const |
| && (TREE_CODE (arg0) == SAVE_EXPR |
| || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1)))) |
| return 1; |
| |
| /* Next handle constant cases, those for which we can return 1 even |
| if ONLY_CONST is set. */ |
| if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1)) |
| switch (TREE_CODE (arg0)) |
| { |
| case INTEGER_CST: |
| return (! TREE_CONSTANT_OVERFLOW (arg0) |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1) |
| && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1)); |
| |
| case REAL_CST: |
| return (! TREE_CONSTANT_OVERFLOW (arg0) |
| && ! TREE_CONSTANT_OVERFLOW (arg1) |
| && REAL_VALUES_EQUAL (TREE_REAL_CST (arg0), |
| TREE_REAL_CST (arg1))); |
| |
| case COMPLEX_CST: |
| return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1), |
| only_const) |
| && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1), |
| only_const)); |
| |
| case STRING_CST: |
| return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1) |
| && ! strncmp (TREE_STRING_POINTER (arg0), |
| TREE_STRING_POINTER (arg1), |
| TREE_STRING_LENGTH (arg0))); |
| |
| case ADDR_EXPR: |
| return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), |
| 0); |
| } |
| |
| if (only_const) |
| return 0; |
| |
| switch (TREE_CODE_CLASS (TREE_CODE (arg0))) |
| { |
| case '1': |
| /* Two conversions are equal only if signedness and modes match. */ |
| if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR) |
| && (TREE_UNSIGNED (TREE_TYPE (arg0)) |
| != TREE_UNSIGNED (TREE_TYPE (arg1)))) |
| return 0; |
| |
| return operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0); |
| |
| case '<': |
| case '2': |
| if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), |
| 0)) |
| return 1; |
| |
| /* For commutative ops, allow the other order. */ |
| return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR |
| || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR |
| || TREE_CODE (arg0) == BIT_IOR_EXPR |
| || TREE_CODE (arg0) == BIT_XOR_EXPR |
| || TREE_CODE (arg0) == BIT_AND_EXPR |
| || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR) |
| && operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 1), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 0), 0)); |
| |
| case 'r': |
| switch (TREE_CODE (arg0)) |
| { |
| case INDIRECT_REF: |
| return operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0); |
| |
| case COMPONENT_REF: |
| case ARRAY_REF: |
| return (operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 1), 0)); |
| |
| case BIT_FIELD_REF: |
| return (operand_equal_p (TREE_OPERAND (arg0, 0), |
| TREE_OPERAND (arg1, 0), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 1), |
| TREE_OPERAND (arg1, 1), 0) |
| && operand_equal_p (TREE_OPERAND (arg0, 2), |
| TREE_OPERAND (arg1, 2), 0)); |
| } |
| break; |
| } |
| |
| return 0; |
| } |
| |
| /* Similar to operand_equal_p, but see if ARG0 might have been made by |
| shorten_compare from ARG1 when ARG1 was being compared with OTHER. |
| |
| When in doubt, return 0. */ |
| |
| static int |
| operand_equal_for_comparison_p (arg0, arg1, other) |
| tree arg0, arg1; |
| tree other; |
| { |
| int unsignedp1, unsignedpo; |
| tree primarg1, primother; |
| unsigned correct_width; |
| |
| if (operand_equal_p (arg0, arg1, 0)) |
| return 1; |
| |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)) |
| || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1))) |
| return 0; |
| |
| /* Duplicate what shorten_compare does to ARG1 and see if that gives the |
| actual comparison operand, ARG0. |
| |
| First throw away any conversions to wider types |
| already present in the operands. */ |
| |
| primarg1 = get_narrower (arg1, &unsignedp1); |
| primother = get_narrower (other, &unsignedpo); |
| |
| correct_width = TYPE_PRECISION (TREE_TYPE (arg1)); |
| if (unsignedp1 == unsignedpo |
| && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width |
| && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width) |
| { |
| tree type = TREE_TYPE (arg0); |
| |
| /* Make sure shorter operand is extended the right way |
| to match the longer operand. */ |
| primarg1 = convert (signed_or_unsigned_type (unsignedp1, |
| TREE_TYPE (primarg1)), |
| primarg1); |
| |
| if (operand_equal_p (arg0, convert (type, primarg1), 0)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* See if ARG is an expression that is either a comparison or is performing |
| arithmetic on comparisons. The comparisons must only be comparing |
| two different values, which will be stored in *CVAL1 and *CVAL2; if |
| they are non-zero it means that some operands have already been found. |
| No variables may be used anywhere else in the expression except in the |
| comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around |
| the expression and save_expr needs to be called with CVAL1 and CVAL2. |
| |
| If this is true, return 1. Otherwise, return zero. */ |
| |
| static int |
| twoval_comparison_p (arg, cval1, cval2, save_p) |
| tree arg; |
| tree *cval1, *cval2; |
| int *save_p; |
| { |
| enum tree_code code = TREE_CODE (arg); |
| char class = TREE_CODE_CLASS (code); |
| |
| /* We can handle some of the 'e' cases here. */ |
| if (class == 'e' && code == TRUTH_NOT_EXPR) |
| class = '1'; |
| else if (class == 'e' |
| && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR |
| || code == COMPOUND_EXPR)) |
| class = '2'; |
| |
| /* ??? Disable this since the SAVE_EXPR might already be in use outside |
| the expression. There may be no way to make this work, but it needs |
| to be looked at again for 2.6. */ |
| #if 0 |
| else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0) |
| { |
| /* If we've already found a CVAL1 or CVAL2, this expression is |
| two complex to handle. */ |
| if (*cval1 || *cval2) |
| return 0; |
| |
| class = '1'; |
| *save_p = 1; |
| } |
| #endif |
| |
| switch (class) |
| { |
| case '1': |
| return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p); |
| |
| case '2': |
| return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 1), |
| cval1, cval2, save_p)); |
| |
| case 'c': |
| return 1; |
| |
| case 'e': |
| if (code == COND_EXPR) |
| return (twoval_comparison_p (TREE_OPERAND (arg, 0), |
| cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 1), |
| cval1, cval2, save_p) |
| && twoval_comparison_p (TREE_OPERAND (arg, 2), |
| cval1, cval2, save_p)); |
| return 0; |
| |
| case '<': |
| /* First see if we can handle the first operand, then the second. For |
| the second operand, we know *CVAL1 can't be zero. It must be that |
| one side of the comparison is each of the values; test for the |
| case where this isn't true by failing if the two operands |
| are the same. */ |
| |
| if (operand_equal_p (TREE_OPERAND (arg, 0), |
| TREE_OPERAND (arg, 1), 0)) |
| return 0; |
| |
| if (*cval1 == 0) |
| *cval1 = TREE_OPERAND (arg, 0); |
| else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0)) |
| ; |
| else if (*cval2 == 0) |
| *cval2 = TREE_OPERAND (arg, 0); |
| else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0)) |
| ; |
| else |
| return 0; |
| |
| if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0)) |
| ; |
| else if (*cval2 == 0) |
| *cval2 = TREE_OPERAND (arg, 1); |
| else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0)) |
| ; |
| else |
| return 0; |
| |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* ARG is a tree that is known to contain just arithmetic operations and |
| comparisons. Evaluate the operations in the tree substituting NEW0 for |
| any occurrence of OLD0 as an operand of a comparison and likewise for |
| NEW1 and OLD1. */ |
| |
| static tree |
| eval_subst (arg, old0, new0, old1, new1) |
| tree arg; |
| tree old0, new0, old1, new1; |
| { |
| tree type = TREE_TYPE (arg); |
| enum tree_code code = TREE_CODE (arg); |
| char class = TREE_CODE_CLASS (code); |
| |
| /* We can handle some of the 'e' cases here. */ |
| if (class == 'e' && code == TRUTH_NOT_EXPR) |
| class = '1'; |
| else if (class == 'e' |
| && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)) |
| class = '2'; |
| |
| switch (class) |
| { |
| case '1': |
| return fold (build1 (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1))); |
| |
| case '2': |
| return fold (build (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 1), |
| old0, new0, old1, new1))); |
| |
| case 'e': |
| switch (code) |
| { |
| case SAVE_EXPR: |
| return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1); |
| |
| case COMPOUND_EXPR: |
| return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1); |
| |
| case COND_EXPR: |
| return fold (build (code, type, |
| eval_subst (TREE_OPERAND (arg, 0), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 1), |
| old0, new0, old1, new1), |
| eval_subst (TREE_OPERAND (arg, 2), |
| old0, new0, old1, new1))); |
| } |
| |
| case '<': |
| { |
| tree arg0 = TREE_OPERAND (arg, 0); |
| tree arg1 = TREE_OPERAND (arg, 1); |
| |
| /* We need to check both for exact equality and tree equality. The |
| former will be true if the operand has a side-effect. In that |
| case, we know the operand occurred exactly once. */ |
| |
| if (arg0 == old0 || operand_equal_p (arg0, old0, 0)) |
| arg0 = new0; |
| else if (arg0 == old1 || operand_equal_p (arg0, old1, 0)) |
| arg0 = new1; |
| |
| if (arg1 == old0 || operand_equal_p (arg1, old0, 0)) |
| arg1 = new0; |
| else if (arg1 == old1 || operand_equal_p (arg1, old1, 0)) |
| arg1 = new1; |
| |
| return fold (build (code, type, arg0, arg1)); |
| } |
| } |
| |
| return arg; |
| } |
| |
| /* Return a tree for the case when the result of an expression is RESULT |
| converted to TYPE and OMITTED was previously an operand of the expression |
| but is now not needed (e.g., we folded OMITTED * 0). |
| |
| If OMITTED has side effects, we must evaluate it. Otherwise, just do |
| the conversion of RESULT to TYPE. */ |
| |
| static tree |
| omit_one_operand (type, result, omitted) |
| tree type, result, omitted; |
| { |
| tree t = convert (type, result); |
| |
| if (TREE_SIDE_EFFECTS (omitted)) |
| return build (COMPOUND_EXPR, type, omitted, t); |
| |
| return non_lvalue (t); |
| } |
| |
| /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */ |
| |
| static tree |
| pedantic_omit_one_operand (type, result, omitted) |
| tree type, result, omitted; |
| { |
| tree t = convert (type, result); |
| |
| if (TREE_SIDE_EFFECTS (omitted)) |
| return build (COMPOUND_EXPR, type, omitted, t); |
| |
| return pedantic_non_lvalue (t); |
| } |
| |
| |
| |
| /* Return a simplified tree node for the truth-negation of ARG. This |
| never alters ARG itself. We assume that ARG is an operation that |
| returns a truth value (0 or 1). */ |
| |
| tree |
| invert_truthvalue (arg) |
| tree arg; |
| { |
| tree type = TREE_TYPE (arg); |
| enum tree_code code = TREE_CODE (arg); |
| |
| if (code == ERROR_MARK) |
| return arg; |
| |
| /* If this is a comparison, we can simply invert it, except for |
| floating-point non-equality comparisons, in which case we just |
| enclose a TRUTH_NOT_EXPR around what we have. */ |
| |
| if (TREE_CODE_CLASS (code) == '<') |
| { |
| if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0))) |
| && code != NE_EXPR && code != EQ_EXPR) |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| else |
| return build (invert_tree_comparison (code), type, |
| TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1)); |
| } |
| |
| switch (code) |
| { |
| case INTEGER_CST: |
| return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0 |
| && TREE_INT_CST_HIGH (arg) == 0, 0)); |
| |
| case TRUTH_AND_EXPR: |
| return build (TRUTH_OR_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_OR_EXPR: |
| return build (TRUTH_AND_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_XOR_EXPR: |
| /* Here we can invert either operand. We invert the first operand |
| unless the second operand is a TRUTH_NOT_EXPR in which case our |
| result is the XOR of the first operand with the inside of the |
| negation of the second operand. */ |
| |
| if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR) |
| return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0), |
| TREE_OPERAND (TREE_OPERAND (arg, 1), 0)); |
| else |
| return build (TRUTH_XOR_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| TREE_OPERAND (arg, 1)); |
| |
| case TRUTH_ANDIF_EXPR: |
| return build (TRUTH_ORIF_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_ORIF_EXPR: |
| return build (TRUTH_ANDIF_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0)), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case TRUTH_NOT_EXPR: |
| return TREE_OPERAND (arg, 0); |
| |
| case COND_EXPR: |
| return build (COND_EXPR, type, TREE_OPERAND (arg, 0), |
| invert_truthvalue (TREE_OPERAND (arg, 1)), |
| invert_truthvalue (TREE_OPERAND (arg, 2))); |
| |
| case COMPOUND_EXPR: |
| return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0), |
| invert_truthvalue (TREE_OPERAND (arg, 1))); |
| |
| case NON_LVALUE_EXPR: |
| return invert_truthvalue (TREE_OPERAND (arg, 0)); |
| |
| case NOP_EXPR: |
| case CONVERT_EXPR: |
| case FLOAT_EXPR: |
| return build1 (TREE_CODE (arg), type, |
| invert_truthvalue (TREE_OPERAND (arg, 0))); |
| |
| case BIT_AND_EXPR: |
| if (!integer_onep (TREE_OPERAND (arg, 1))) |
| break; |
| return build (EQ_EXPR, type, arg, convert (type, integer_zero_node)); |
| |
| case SAVE_EXPR: |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| |
| case CLEANUP_POINT_EXPR: |
| return build1 (CLEANUP_POINT_EXPR, type, |
| invert_truthvalue (TREE_OPERAND (arg, 0))); |
| } |
| if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE) |
| abort (); |
| return build1 (TRUTH_NOT_EXPR, type, arg); |
| } |
| |
| /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both |
| operands are another bit-wise operation with a common input. If so, |
| distribute the bit operations to save an operation and possibly two if |
| constants are involved. For example, convert |
| (A | B) & (A | C) into A | (B & C) |
| Further simplification will occur if B and C are constants. |
| |
| If this optimization cannot be done, 0 will be returned. */ |
| |
| static tree |
| distribute_bit_expr (code, type, arg0, arg1) |
| enum tree_code code; |
| tree type; |
| tree arg0, arg1; |
| { |
| tree common; |
| tree left, right; |
| |
| if (TREE_CODE (arg0) != TREE_CODE (arg1) |
| || TREE_CODE (arg0) == code |
| || (TREE_CODE (arg0) != BIT_AND_EXPR |
| && TREE_CODE (arg0) != BIT_IOR_EXPR)) |
| return 0; |
| |
| if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) |
| { |
| common = TREE_OPERAND (arg0, 0); |
| left = TREE_OPERAND (arg0, 1); |
| right = TREE_OPERAND (arg1, 1); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0)) |
| { |
| common = TREE_OPERAND (arg0, 0); |
| left = TREE_OPERAND (arg0, 1); |
| right = TREE_OPERAND (arg1, 0); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0)) |
| { |
| common = TREE_OPERAND (arg0, 1); |
| left = TREE_OPERAND (arg0, 0); |
| right = TREE_OPERAND (arg1, 1); |
| } |
| else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0)) |
| { |
| common = TREE_OPERAND (arg0, 1); |
| left = TREE_OPERAND (arg0, 0); |
| right = TREE_OPERAND (arg1, 0); |
| } |
| else |
| return 0; |
| |
| return fold (build (TREE_CODE (arg0), type, common, |
| fold (build (code, type, left, right)))); |
| } |
| |
| /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER |
| starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */ |
| |
| static tree |
| make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp) |
| tree inner; |
| tree type; |
| int bitsize, bitpos; |
| int unsignedp; |
| { |
| tree result = build (BIT_FIELD_REF, type, inner, |
| size_int (bitsize), size_int (bitpos)); |
| |
| TREE_UNSIGNED (result) = unsignedp; |
| |
| return result; |
| } |
| |
| /* Optimize a bit-field compare. |
| |
| There are two cases: First is a compare against a constant and the |
| second is a comparison of two items where the fields are at the same |
| bit position relative to the start of a chunk (byte, halfword, word) |
| large enough to contain it. In these cases we can avoid the shift |
| implicit in bitfield extractions. |
| |
| For constants, we emit a compare of the shifted constant with the |
| BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being |
| compared. For two fields at the same position, we do the ANDs with the |
| similar mask and compare the result of the ANDs. |
| |
| CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR. |
| COMPARE_TYPE is the type of the comparison, and LHS and RHS |
| are the left and right operands of the comparison, respectively. |
| |
| If the optimization described above can be done, we return the resulting |
| tree. Otherwise we return zero. */ |
| |
| static tree |
| optimize_bit_field_compare (code, compare_type, lhs, rhs) |
| enum tree_code code; |
| tree compare_type; |
| tree lhs, rhs; |
| { |
| int lbitpos, lbitsize, rbitpos, rbitsize; |
| int lnbitpos, lnbitsize, rnbitpos, rnbitsize; |
| tree type = TREE_TYPE (lhs); |
| tree signed_type, unsigned_type; |
| int const_p = TREE_CODE (rhs) == INTEGER_CST; |
| enum machine_mode lmode, rmode, lnmode, rnmode; |
| int lunsignedp, runsignedp; |
| int lvolatilep = 0, rvolatilep = 0; |
| int alignment; |
| tree linner, rinner; |
| tree mask; |
| tree offset; |
| |
| /* Get all the information about the extractions being done. If the bit size |
| if the same as the size of the underlying object, we aren't doing an |
| extraction at all and so can do nothing. */ |
| linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode, |
| &lunsignedp, &lvolatilep, &alignment); |
| if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0 |
| || offset != 0) |
| return 0; |
| |
| if (!const_p) |
| { |
| /* If this is not a constant, we can only do something if bit positions, |
| sizes, and signedness are the same. */ |
| rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode, |
| &runsignedp, &rvolatilep, &alignment); |
| |
| if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize |
| || lunsignedp != runsignedp || offset != 0) |
| return 0; |
| } |
| |
| /* See if we can find a mode to refer to this field. We should be able to, |
| but fail if we can't. */ |
| lnmode = get_best_mode (lbitsize, lbitpos, |
| TYPE_ALIGN (TREE_TYPE (linner)), word_mode, |
| lvolatilep); |
| if (lnmode == VOIDmode) |
| return 0; |
| |
| /* Set signed and unsigned types of the precision of this mode for the |
| shifts below. */ |
| signed_type = type_for_mode (lnmode, 0); |
| unsigned_type = type_for_mode (lnmode, 1); |
| |
| if (! const_p) |
| { |
| rnmode = get_best_mode (rbitsize, rbitpos, |
| TYPE_ALIGN (TREE_TYPE (rinner)), word_mode, |
| rvolatilep); |
| if (rnmode == VOIDmode) |
| return 0; |
| } |
| |
| /* Compute the bit position and size for the new reference and our offset |
| within it. If the new reference is the same size as the original, we |
| won't optimize anything, so return zero. */ |
| lnbitsize = GET_MODE_BITSIZE (lnmode); |
| lnbitpos = lbitpos & ~ (lnbitsize - 1); |
| lbitpos -= lnbitpos; |
| if (lnbitsize == lbitsize) |
| return 0; |
| |
| if (! const_p) |
| { |
| rnbitsize = GET_MODE_BITSIZE (rnmode); |
| rnbitpos = rbitpos & ~ (rnbitsize - 1); |
| rbitpos -= rnbitpos; |
| if (rnbitsize == rbitsize) |
| return 0; |
| } |
| |
| if (BYTES_BIG_ENDIAN) |
| lbitpos = lnbitsize - lbitsize - lbitpos; |
| |
| /* Make the mask to be used against the extracted field. */ |
| mask = build_int_2 (~0, ~0); |
| TREE_TYPE (mask) = unsigned_type; |
| force_fit_type (mask, 0); |
| mask = convert (unsigned_type, mask); |
| mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0); |
| mask = const_binop (RSHIFT_EXPR, mask, |
| size_int (lnbitsize - lbitsize - lbitpos), 0); |
| |
| if (! const_p) |
| /* If not comparing with constant, just rework the comparison |
| and return. */ |
| return build (code, compare_type, |
| build (BIT_AND_EXPR, unsigned_type, |
| make_bit_field_ref (linner, unsigned_type, |
| lnbitsize, lnbitpos, 1), |
| mask), |
| build (BIT_AND_EXPR, unsigned_type, |
| make_bit_field_ref (rinner, unsigned_type, |
| rnbitsize, rnbitpos, 1), |
| mask)); |
| |
| /* Otherwise, we are handling the constant case. See if the constant is too |
| big for the field. Warn and return a tree of for 0 (false) if so. We do |
| this not only for its own sake, but to avoid having to test for this |
| error case below. If we didn't, we might generate wrong code. |
| |
| For unsigned fields, the constant shifted right by the field length should |
| be all zero. For signed fields, the high-order bits should agree with |
| the sign bit. */ |
| |
| if (lunsignedp) |
| { |
| if (! integer_zerop (const_binop (RSHIFT_EXPR, |
| convert (unsigned_type, rhs), |
| size_int (lbitsize), 0))) |
| { |
| warning ("comparison is always %s due to width of bitfield", |
| code == NE_EXPR ? "one" : "zero"); |
| return convert (compare_type, |
| (code == NE_EXPR |
| ? integer_one_node : integer_zero_node)); |
| } |
| } |
| else |
| { |
| tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs), |
| size_int (lbitsize - 1), 0); |
| if (! integer_zerop (tem) && ! integer_all_onesp (tem)) |
| { |
| warning ("comparison is always %s due to width of bitfield", |
| code == NE_EXPR ? "one" : "zero"); |
| return convert (compare_type, |
| (code == NE_EXPR |
| ? integer_one_node : integer_zero_node)); |
| } |
| } |
| |
| /* Single-bit compares should always be against zero. */ |
| if (lbitsize == 1 && ! integer_zerop (rhs)) |
| { |
| code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR; |
| rhs = convert (type, integer_zero_node); |
| } |
| |
| /* Make a new bitfield reference, shift the constant over the |
| appropriate number of bits and mask it with the computed mask |
| (in case this was a signed field). If we changed it, make a new one. */ |
| lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1); |
| if (lvolatilep) |
| { |
| TREE_SIDE_EFFECTS (lhs) = 1; |
| TREE_THIS_VOLATILE (lhs) = 1; |
| } |
| |
| rhs = fold (const_binop (BIT_AND_EXPR, |
| const_binop (LSHIFT_EXPR, |
| convert (unsigned_type, rhs), |
| size_int (lbitpos), 0), |
| mask, 0)); |
| |
| return build (code, compare_type, |
| build (BIT_AND_EXPR, unsigned_type, lhs, mask), |
| rhs); |
| } |
| |
| /* Subroutine for fold_truthop: decode a field reference. |
| |
| If EXP is a comparison reference, we return the innermost reference. |
| |
| *PBITSIZE is set to the number of bits in the reference, *PBITPOS is |
| set to the starting bit number. |
| |
| If the innermost field can be completely contained in a mode-sized |
| unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode. |
| |
| *PVOLATILEP is set to 1 if the any expression encountered is volatile; |
| otherwise it is not changed. |
| |
| *PUNSIGNEDP is set to the signedness of the field. |
| |
| *PMASK is set to the mask used. This is either contained in a |
| BIT_AND_EXPR or derived from the width of the field. |
| |
| *PAND_MASK is set the the mask found in a BIT_AND_EXPR, if any. |
| |
| Return 0 if this is not a component reference or is one that we can't |
| do anything with. */ |
| |
| static tree |
| decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp, |
| pvolatilep, pmask, pand_mask) |
| tree exp; |
| int *pbitsize, *pbitpos; |
| enum machine_mode *pmode; |
| int *punsignedp, *pvolatilep; |
| tree *pmask; |
| tree *pand_mask; |
| { |
| tree and_mask = 0; |
| tree mask, inner, offset; |
| tree unsigned_type; |
| int precision; |
| int alignment; |
| |
| /* All the optimizations using this function assume integer fields. |
| There are problems with FP fields since the type_for_size call |
| below can fail for, e.g., XFmode. */ |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (exp))) |
| return 0; |
| |
| STRIP_NOPS (exp); |
| |
| if (TREE_CODE (exp) == BIT_AND_EXPR) |
| { |
| and_mask = TREE_OPERAND (exp, 1); |
| exp = TREE_OPERAND (exp, 0); |
| STRIP_NOPS (exp); STRIP_NOPS (and_mask); |
| if (TREE_CODE (and_mask) != INTEGER_CST) |
| return 0; |
| } |
| |
| |
| inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode, |
| punsignedp, pvolatilep, &alignment); |
| if ((inner == exp && and_mask == 0) |
| || *pbitsize < 0 || offset != 0) |
| return 0; |
| |
| /* Compute the mask to access the bitfield. */ |
| unsigned_type = type_for_size (*pbitsize, 1); |
| precision = TYPE_PRECISION (unsigned_type); |
| |
| mask = build_int_2 (~0, ~0); |
| TREE_TYPE (mask) = unsigned_type; |
| force_fit_type (mask, 0); |
| mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); |
| mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); |
| |
| /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */ |
| if (and_mask != 0) |
| mask = fold (build (BIT_AND_EXPR, unsigned_type, |
| convert (unsigned_type, and_mask), mask)); |
| |
| *pmask = mask; |
| *pand_mask = and_mask; |
| return inner; |
| } |
| |
| /* Return non-zero if MASK represents a mask of SIZE ones in the low-order |
| bit positions. */ |
| |
| static int |
| all_ones_mask_p (mask, size) |
| tree mask; |
| int size; |
| { |
| tree type = TREE_TYPE (mask); |
| int precision = TYPE_PRECISION (type); |
| tree tmask; |
| |
| tmask = build_int_2 (~0, ~0); |
| TREE_TYPE (tmask) = signed_type (type); |
| force_fit_type (tmask, 0); |
| return |
| tree_int_cst_equal (mask, |
| const_binop (RSHIFT_EXPR, |
| const_binop (LSHIFT_EXPR, tmask, |
| size_int (precision - size), |
| 0), |
| size_int (precision - size), 0)); |
| } |
| |
| /* Subroutine for fold_truthop: determine if an operand is simple enough |
| to be evaluated unconditionally. */ |
| |
| static int |
| simple_operand_p (exp) |
| tree exp; |
| { |
| /* Strip any conversions that don't change the machine mode. */ |
| while ((TREE_CODE (exp) == NOP_EXPR |
| || TREE_CODE (exp) == CONVERT_EXPR) |
| && (TYPE_MODE (TREE_TYPE (exp)) |
| == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))) |
| exp = TREE_OPERAND (exp, 0); |
| |
| return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c' |
| || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd' |
| && ! TREE_ADDRESSABLE (exp) |
| && ! TREE_THIS_VOLATILE (exp) |
| && ! DECL_NONLOCAL (exp) |
| /* Don't regard global variables as simple. They may be |
| allocated in ways unknown to the compiler (shared memory, |
| #pragma weak, etc). */ |
| && ! TREE_PUBLIC (exp) |
| && ! DECL_EXTERNAL (exp) |
| /* Loading a static variable is unduly expensive, but global |
| registers aren't expensive. */ |
| && (! TREE_STATIC (exp) || DECL_REGISTER (exp)))); |
| } |
| |
| /* The following functions are subroutines to fold_range_test and allow it to |
| try to change a logical combination of comparisons into a range test. |
| |
| For example, both |
| X == 2 && X == 3 && X == 4 && X == 5 |
| and |
| X >= 2 && X <= 5 |
| are converted to |
| (unsigned) (X - 2) <= 3 |
| |
| We decribe each set of comparisons as being either inside or outside |
| a range, using a variable named like IN_P, and then describe the |
| range with a lower and upper bound. If one of the bounds is omitted, |
| it represents either the highest or lowest value of the type. |
| |
| In the comments below, we represent a range by two numbers in brackets |
| preceeded by a "+" to designate being inside that range, or a "-" to |
| designate being outside that range, so the condition can be inverted by |
| flipping the prefix. An omitted bound is represented by a "-". For |
| example, "- [-, 10]" means being outside the range starting at the lowest |
| possible value and ending at 10, in other words, being greater than 10. |
| The range "+ [-, -]" is always true and hence the range "- [-, -]" is |
| always false. |
| |
| We set up things so that the missing bounds are handled in a consistent |
| manner so neither a missing bound nor "true" and "false" need to be |
| handled using a special case. */ |
| |
| /* Return the result of applying CODE to ARG0 and ARG1, but handle the case |
| of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P |
| and UPPER1_P are nonzero if the respective argument is an upper bound |
| and zero for a lower. TYPE, if nonzero, is the type of the result; it |
| must be specified for a comparison. ARG1 will be converted to ARG0's |
| type if both are specified. */ |
| |
| static tree |
| range_binop (code, type, arg0, upper0_p, arg1, upper1_p) |
| enum tree_code code; |
| tree type; |
| tree arg0, arg1; |
| int upper0_p, upper1_p; |
| { |
| tree tem; |
| int result; |
| int sgn0, sgn1; |
| |
| /* If neither arg represents infinity, do the normal operation. |
| Else, if not a comparison, return infinity. Else handle the special |
| comparison rules. Note that most of the cases below won't occur, but |
| are handled for consistency. */ |
| |
| if (arg0 != 0 && arg1 != 0) |
| { |
| tem = fold (build (code, type != 0 ? type : TREE_TYPE (arg0), |
| arg0, convert (TREE_TYPE (arg0), arg1))); |
| STRIP_NOPS (tem); |
| return TREE_CODE (tem) == INTEGER_CST ? tem : 0; |
| } |
| |
| if (TREE_CODE_CLASS (code) != '<') |
| return 0; |
| |
| /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0 |
| for neither. Then compute our result treating them as never equal |
| and comparing bounds to non-bounds as above. */ |
| sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1); |
| sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1); |
| switch (code) |
| { |
| case EQ_EXPR: case NE_EXPR: |
| result = (code == NE_EXPR); |
| break; |
| case LT_EXPR: case LE_EXPR: |
| result = sgn0 < sgn1; |
| break; |
| case GT_EXPR: case GE_EXPR: |
| result = sgn0 > sgn1; |
| break; |
| } |
| |
| return convert (type, result ? integer_one_node : integer_zero_node); |
| } |
| |
| /* Given EXP, a logical expression, set the range it is testing into |
| variables denoted by PIN_P, PLOW, and PHIGH. Return the expression |
| actually being tested. *PLOW and *PHIGH will have be made the same type |
| as the returned expression. If EXP is not a comparison, we will most |
| likely not be returning a useful value and range. */ |
| |
| static tree |
| make_range (exp, pin_p, plow, phigh) |
| tree exp; |
| int *pin_p; |
| tree *plow, *phigh; |
| { |
| enum tree_code code; |
| tree arg0, arg1, type; |
| int in_p, n_in_p; |
| tree low, high, n_low, n_high; |
| |
| /* Start with simply saying "EXP != 0" and then look at the code of EXP |
| and see if we can refine the range. Some of the cases below may not |
| happen, but it doesn't seem worth worrying about this. We "continue" |
| the outer loop when we've changed something; otherwise we "break" |
| the switch, which will "break" the while. */ |
| |
| in_p = 0, low = high = convert (TREE_TYPE (exp), integer_zero_node); |
| |
| while (1) |
| { |
| code = TREE_CODE (exp); |
| arg0 = TREE_OPERAND (exp, 0), arg1 = TREE_OPERAND (exp, 1); |
| if (TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '1' |
| || TREE_CODE_CLASS (code) == '2') |
| type = TREE_TYPE (arg0); |
| |
| switch (code) |
| { |
| case TRUTH_NOT_EXPR: |
| in_p = ! in_p, exp = arg0; |
| continue; |
| |
| case EQ_EXPR: case NE_EXPR: |
| case LT_EXPR: case LE_EXPR: case GE_EXPR: case GT_EXPR: |
| /* We can only do something if the range is testing for zero |
| and if the second operand is an integer constant. Note that |
| saying something is "in" the range we make is done by |
| complementing IN_P since it will set in the initial case of |
| being not equal to zero; "out" is leaving it alone. */ |
| if (low == 0 || high == 0 |
| || ! integer_zerop (low) || ! integer_zerop (high) |
| || TREE_CODE (arg1) != INTEGER_CST) |
| break; |
| |
| switch (code) |
| { |
| case NE_EXPR: /* - [c, c] */ |
| low = high = arg1; |
| break; |
| case EQ_EXPR: /* + [c, c] */ |
| in_p = ! in_p, low = high = arg1; |
| break; |
| case GT_EXPR: /* - [-, c] */ |
| low = 0, high = arg1; |
| break; |
| case GE_EXPR: /* + [c, -] */ |
| in_p = ! in_p, low = arg1, high = 0; |
| break; |
| case LT_EXPR: /* - [c, -] */ |
| low = arg1, high = 0; |
| break; |
| case LE_EXPR: /* + [-, c] */ |
| in_p = ! in_p, low = 0, high = arg1; |
| break; |
| } |
| |
| exp = arg0; |
| |
| /* If this is an unsigned comparison, we also know that EXP is |
| greater than or equal to zero. We base the range tests we make |
| on that fact, so we record it here so we can parse existing |
| range tests. */ |
| if (TREE_UNSIGNED (type) && (low == 0 || high == 0)) |
| { |
| if (! merge_ranges (&n_in_p, &n_low, &n_high, in_p, low, high, |
| 1, convert (type, integer_zero_node), |
| NULL_TREE)) |
| break; |
| |
| in_p = n_in_p, low = n_low, high = n_high; |
| |
| /* If the high bound is missing, reverse the range so it |
| goes from zero to the low bound minus 1. */ |
| if (high == 0) |
| { |
| in_p = ! in_p; |
| high = range_binop (MINUS_EXPR, NULL_TREE, low, 0, |
| integer_one_node, 0); |
| low = convert (type, integer_zero_node); |
| } |
| } |
| continue; |
| |
| case NEGATE_EXPR: |
| /* (-x) IN [a,b] -> x in [-b, -a] */ |
| n_low = range_binop (MINUS_EXPR, type, |
| convert (type, integer_zero_node), 0, high, 1); |
| n_high = range_binop (MINUS_EXPR, type, |
| convert (type, integer_zero_node), 0, low, 0); |
| low = n_low, high = n_high; |
| exp = arg0; |
| continue; |
| |
| case BIT_NOT_EXPR: |
| /* ~ X -> -X - 1 */ |
| exp = build (MINUS_EXPR, type, build1 (NEGATE_EXPR, type, arg0), |
| convert (type, integer_one_node)); |
| continue; |
| |
| case PLUS_EXPR: case MINUS_EXPR: |
| if (TREE_CODE (arg1) != INTEGER_CST) |
| break; |
| |
| /* If EXP is signed, any overflow in the computation is undefined, |
| so we don't worry about it so long as our computations on |
| the bounds don't overflow. For unsigned, overflow is defined |
| and this is exactly the right thing. */ |
| n_low = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR, |
| type, low, 0, arg1, 0); |
| n_high = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR, |
| type, high, 1, arg1, 0); |
| if ((n_low != 0 && TREE_OVERFLOW (n_low)) |
| || (n_high != 0 && TREE_OVERFLOW (n_high))) |
| break; |
| |
| /* Check for an unsigned range which has wrapped around the maximum |
| value thus making n_high < n_low, and normalize it. */ |
| if (n_low && n_high && tree_int_cst_lt (n_high, n_low)) |
| { |
| low = range_binop (PLUS_EXPR, type, n_high, 0, |
| integer_one_node, 0); |
| high = range_binop (MINUS_EXPR, type, n_low, 0, |
| integer_one_node, 0); |
| in_p = ! in_p; |
| } |
| else |
| low = n_low, high = n_high; |
| |
| exp = arg0; |
| continue; |
| |
| case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR: |
| if (! INTEGRAL_TYPE_P (type) |
| || (low != 0 && ! int_fits_type_p (low, type)) |
| || (high != 0 && ! int_fits_type_p (high, type))) |
| break; |
| |
| if (low != 0) |
| low = convert (type, low); |
| |
| if (high != 0) |
| high = convert (type, high); |
| |
| exp = arg0; |
| continue; |
| } |
| |
| break; |
| } |
| |
| /* If EXP is a constant, we can evaluate whether this is true or false. */ |
| if (TREE_CODE (exp) == INTEGER_CST) |
| { |
| in_p = in_p == (integer_onep (range_binop (GE_EXPR, integer_type_node, |
| exp, 0, low, 0)) |
| && integer_onep (range_binop (LE_EXPR, integer_type_node, |
| exp, 1, high, 1))); |
| low = high = 0; |
| exp = 0; |
| } |
| |
| *pin_p = in_p, *plow = low, *phigh = high; |
| return exp; |
| } |
| |
| /* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result |
| type, TYPE, return an expression to test if EXP is in (or out of, depending |
| on IN_P) the range. */ |
| |
| static tree |
| build_range_check (type, exp, in_p, low, high) |
| tree type; |
| tree exp; |
| int in_p; |
| tree low, high; |
| { |
| tree etype = TREE_TYPE (exp); |
| tree utype, value; |
| |
| if (! in_p |
| && (0 != (value = build_range_check (type, exp, 1, low, high)))) |
| return invert_truthvalue (value); |
| |
| else if (low == 0 && high == 0) |
| return convert (type, integer_one_node); |
| |
| else if (low == 0) |
| return fold (build (LE_EXPR, type, exp, high)); |
| |
| else if (high == 0) |
| return fold (build (GE_EXPR, type, exp, low)); |
| |
| else if (operand_equal_p (low, high, 0)) |
| return fold (build (EQ_EXPR, type, exp, low)); |
| |
| else if (TREE_UNSIGNED (etype) && integer_zerop (low)) |
| return build_range_check (type, exp, 1, 0, high); |
| |
| else if (integer_zerop (low)) |
| { |
| utype = unsigned_type (etype); |
| return build_range_check (type, convert (utype, exp), 1, 0, |
| convert (utype, high)); |
| } |
| |
| else if (0 != (value = const_binop (MINUS_EXPR, high, low, 0)) |
| && ! TREE_OVERFLOW (value)) |
| return build_range_check (type, |
| fold (build (MINUS_EXPR, etype, exp, low)), |
| 1, convert (etype, integer_zero_node), value); |
| else |
| return 0; |
| } |
| |
| /* Given two ranges, see if we can merge them into one. Return 1 if we |
| can, 0 if we can't. Set the output range into the specified parameters. */ |
| |
| static int |
| merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1) |
| int *pin_p; |
| tree *plow, *phigh; |
| int in0_p, in1_p; |
| tree low0, high0, low1, high1; |
| { |
| int no_overlap; |
| int subset; |
| int temp; |
| tree tem; |
| int in_p; |
| tree low, high; |
| |
| /* Make range 0 be the range that starts first. Swap them if it isn't. */ |
| if (integer_onep (range_binop (GT_EXPR, integer_type_node, |
| low0, 0, low1, 0)) |
| || (((low0 == 0 && low1 == 0) |
| || integer_onep (range_binop (EQ_EXPR, integer_type_node, |
| low0, 0, low1, 0))) |
| && integer_onep (range_binop (GT_EXPR, integer_type_node, |
| high0, 1, high1, 1)))) |
| { |
| temp = in0_p, in0_p = in1_p, in1_p = temp; |
| tem = low0, low0 = low1, low1 = tem; |
| tem = high0, high0 = high1, high1 = tem; |
| } |
| |
| /* Now flag two cases, whether the ranges are disjoint or whether the |
| second range is totally subsumed in the first. Note that the tests |
| below are simplified by the ones above. */ |
| no_overlap = integer_onep (range_binop (LT_EXPR, integer_type_node, |
| high0, 1, low1, 0)); |
| subset = integer_onep (range_binop (LE_EXPR, integer_type_node, |
| high1, 1, high0, 1)); |
| |
| /* We now have four cases, depending on whether we are including or |
| excluding the two ranges. */ |
| if (in0_p && in1_p) |
| { |
| /* If they don't overlap, the result is false. If the second range |
| is a subset it is the result. Otherwise, the range is from the start |
| of the second to the end of the first. */ |
| if (no_overlap) |
| in_p = 0, low = high = 0; |
| else if (subset) |
| in_p = 1, low = low1, high = high1; |
| else |
| in_p = 1, low = low1, high = high0; |
| } |
| |
| else if (in0_p && ! in1_p) |
| { |
| /* If they don't overlap, the result is the first range. If the |
| second range is a subset of the first, we can't describe this as |
| a single range unless both ranges end at the same place. If both |
| ranges start in the same place, then the result is false. |
| Otherwise, we go from the start of the first range to just before |
| the start of the second. */ |
| if (no_overlap) |
| in_p = 1, low = low0, high = high0; |
| else if (subset |
| && integer_zerop (range_binop (EQ_EXPR, integer_type_node, |
| high0, 1, high1, 0))) |
| return 0; |
| else if (integer_onep (range_binop (EQ_EXPR, integer_type_node, |
| low0, 0, low1, 0))) |
| in_p = 0, low = high = 0; |
| else |
| { |
| in_p = 1, low = low0; |
| high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0, |
| integer_one_node, 0); |
| } |
| } |
| |
| else if (! in0_p && in1_p) |
| { |
| /* If they don't overlap, the result is the second range. If the second |
| is a subset of the first, the result is false. Otherwise, |
| the range starts just after the first range and ends at the |
| end of the second. */ |
| if (no_overlap) |
| in_p = 1, low = low1, high = high1; |
| else if (subset) |
| in_p = 0, low = high = 0; |
| else |
| { |
| in_p = 1, high = high1; |
| low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1, |
| integer_one_node, 0); |
| } |
| } |
| |
|