| /* RTL simplification functions for GNU compiler. |
| Copyright (C) 1987-2021 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "predict.h" |
| #include "memmodel.h" |
| #include "optabs.h" |
| #include "emit-rtl.h" |
| #include "recog.h" |
| #include "diagnostic-core.h" |
| #include "varasm.h" |
| #include "flags.h" |
| #include "selftest.h" |
| #include "selftest-rtl.h" |
| #include "rtx-vector-builder.h" |
| |
| /* Simplification and canonicalization of RTL. */ |
| |
| /* Much code operates on (low, high) pairs; the low value is an |
| unsigned wide int, the high value a signed wide int. We |
| occasionally need to sign extend from low to high as if low were a |
| signed wide int. */ |
| #define HWI_SIGN_EXTEND(low) \ |
| ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0) |
| |
| static bool plus_minus_operand_p (const_rtx); |
| |
| /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */ |
| |
| static rtx |
| neg_poly_int_rtx (machine_mode mode, const_rtx i) |
| { |
| return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode); |
| } |
| |
| /* Test whether expression, X, is an immediate constant that represents |
| the most significant bit of machine mode MODE. */ |
| |
| bool |
| mode_signbit_p (machine_mode mode, const_rtx x) |
| { |
| unsigned HOST_WIDE_INT val; |
| unsigned int width; |
| scalar_int_mode int_mode; |
| |
| if (!is_int_mode (mode, &int_mode)) |
| return false; |
| |
| width = GET_MODE_PRECISION (int_mode); |
| if (width == 0) |
| return false; |
| |
| if (width <= HOST_BITS_PER_WIDE_INT |
| && CONST_INT_P (x)) |
| val = INTVAL (x); |
| #if TARGET_SUPPORTS_WIDE_INT |
| else if (CONST_WIDE_INT_P (x)) |
| { |
| unsigned int i; |
| unsigned int elts = CONST_WIDE_INT_NUNITS (x); |
| if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) |
| return false; |
| for (i = 0; i < elts - 1; i++) |
| if (CONST_WIDE_INT_ELT (x, i) != 0) |
| return false; |
| val = CONST_WIDE_INT_ELT (x, elts - 1); |
| width %= HOST_BITS_PER_WIDE_INT; |
| if (width == 0) |
| width = HOST_BITS_PER_WIDE_INT; |
| } |
| #else |
| else if (width <= HOST_BITS_PER_DOUBLE_INT |
| && CONST_DOUBLE_AS_INT_P (x) |
| && CONST_DOUBLE_LOW (x) == 0) |
| { |
| val = CONST_DOUBLE_HIGH (x); |
| width -= HOST_BITS_PER_WIDE_INT; |
| } |
| #endif |
| else |
| /* X is not an integer constant. */ |
| return false; |
| |
| if (width < HOST_BITS_PER_WIDE_INT) |
| val &= (HOST_WIDE_INT_1U << width) - 1; |
| return val == (HOST_WIDE_INT_1U << (width - 1)); |
| } |
| |
| /* Test whether VAL is equal to the most significant bit of mode MODE |
| (after masking with the mode mask of MODE). Returns false if the |
| precision of MODE is too large to handle. */ |
| |
| bool |
| val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val) |
| { |
| unsigned int width; |
| scalar_int_mode int_mode; |
| |
| if (!is_int_mode (mode, &int_mode)) |
| return false; |
| |
| width = GET_MODE_PRECISION (int_mode); |
| if (width == 0 || width > HOST_BITS_PER_WIDE_INT) |
| return false; |
| |
| val &= GET_MODE_MASK (int_mode); |
| return val == (HOST_WIDE_INT_1U << (width - 1)); |
| } |
| |
| /* Test whether the most significant bit of mode MODE is set in VAL. |
| Returns false if the precision of MODE is too large to handle. */ |
| bool |
| val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val) |
| { |
| unsigned int width; |
| |
| scalar_int_mode int_mode; |
| if (!is_int_mode (mode, &int_mode)) |
| return false; |
| |
| width = GET_MODE_PRECISION (int_mode); |
| if (width == 0 || width > HOST_BITS_PER_WIDE_INT) |
| return false; |
| |
| val &= HOST_WIDE_INT_1U << (width - 1); |
| return val != 0; |
| } |
| |
| /* Test whether the most significant bit of mode MODE is clear in VAL. |
| Returns false if the precision of MODE is too large to handle. */ |
| bool |
| val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val) |
| { |
| unsigned int width; |
| |
| scalar_int_mode int_mode; |
| if (!is_int_mode (mode, &int_mode)) |
| return false; |
| |
| width = GET_MODE_PRECISION (int_mode); |
| if (width == 0 || width > HOST_BITS_PER_WIDE_INT) |
| return false; |
| |
| val &= HOST_WIDE_INT_1U << (width - 1); |
| return val == 0; |
| } |
| |
| /* Make a binary operation by properly ordering the operands and |
| seeing if the expression folds. */ |
| |
| rtx |
| simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| /* If this simplifies, do it. */ |
| tem = simplify_binary_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| |
| /* Put complex operands first and constants second if commutative. */ |
| if (GET_RTX_CLASS (code) == RTX_COMM_ARITH |
| && swap_commutative_operands_p (op0, op1)) |
| std::swap (op0, op1); |
| |
| return gen_rtx_fmt_ee (code, mode, op0, op1); |
| } |
| |
| /* If X is a MEM referencing the constant pool, return the real value. |
| Otherwise return X. */ |
| rtx |
| avoid_constant_pool_reference (rtx x) |
| { |
| rtx c, tmp, addr; |
| machine_mode cmode; |
| poly_int64 offset = 0; |
| |
| switch (GET_CODE (x)) |
| { |
| case MEM: |
| break; |
| |
| case FLOAT_EXTEND: |
| /* Handle float extensions of constant pool references. */ |
| tmp = XEXP (x, 0); |
| c = avoid_constant_pool_reference (tmp); |
| if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c)) |
| return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c), |
| GET_MODE (x)); |
| return x; |
| |
| default: |
| return x; |
| } |
| |
| if (GET_MODE (x) == BLKmode) |
| return x; |
| |
| addr = XEXP (x, 0); |
| |
| /* Call target hook to avoid the effects of -fpic etc.... */ |
| addr = targetm.delegitimize_address (addr); |
| |
| /* Split the address into a base and integer offset. */ |
| addr = strip_offset (addr, &offset); |
| |
| if (GET_CODE (addr) == LO_SUM) |
| addr = XEXP (addr, 1); |
| |
| /* If this is a constant pool reference, we can turn it into its |
| constant and hope that simplifications happen. */ |
| if (GET_CODE (addr) == SYMBOL_REF |
| && CONSTANT_POOL_ADDRESS_P (addr)) |
| { |
| c = get_pool_constant (addr); |
| cmode = get_pool_mode (addr); |
| |
| /* If we're accessing the constant in a different mode than it was |
| originally stored, attempt to fix that up via subreg simplifications. |
| If that fails we have no choice but to return the original memory. */ |
| if (known_eq (offset, 0) && cmode == GET_MODE (x)) |
| return c; |
| else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode))) |
| { |
| rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset); |
| if (tem && CONSTANT_P (tem)) |
| return tem; |
| } |
| } |
| |
| return x; |
| } |
| |
| /* Simplify a MEM based on its attributes. This is the default |
| delegitimize_address target hook, and it's recommended that every |
| overrider call it. */ |
| |
| rtx |
| delegitimize_mem_from_attrs (rtx x) |
| { |
| /* MEMs without MEM_OFFSETs may have been offset, so we can't just |
| use their base addresses as equivalent. */ |
| if (MEM_P (x) |
| && MEM_EXPR (x) |
| && MEM_OFFSET_KNOWN_P (x)) |
| { |
| tree decl = MEM_EXPR (x); |
| machine_mode mode = GET_MODE (x); |
| poly_int64 offset = 0; |
| |
| switch (TREE_CODE (decl)) |
| { |
| default: |
| decl = NULL; |
| break; |
| |
| case VAR_DECL: |
| break; |
| |
| case ARRAY_REF: |
| case ARRAY_RANGE_REF: |
| case COMPONENT_REF: |
| case BIT_FIELD_REF: |
| case REALPART_EXPR: |
| case IMAGPART_EXPR: |
| case VIEW_CONVERT_EXPR: |
| { |
| poly_int64 bitsize, bitpos, bytepos, toffset_val = 0; |
| tree toffset; |
| int unsignedp, reversep, volatilep = 0; |
| |
| decl |
| = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode, |
| &unsignedp, &reversep, &volatilep); |
| if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode)) |
| || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos) |
| || (toffset && !poly_int_tree_p (toffset, &toffset_val))) |
| decl = NULL; |
| else |
| offset += bytepos + toffset_val; |
| break; |
| } |
| } |
| |
| if (decl |
| && mode == GET_MODE (x) |
| && VAR_P (decl) |
| && (TREE_STATIC (decl) |
| || DECL_THREAD_LOCAL_P (decl)) |
| && DECL_RTL_SET_P (decl) |
| && MEM_P (DECL_RTL (decl))) |
| { |
| rtx newx; |
| |
| offset += MEM_OFFSET (x); |
| |
| newx = DECL_RTL (decl); |
| |
| if (MEM_P (newx)) |
| { |
| rtx n = XEXP (newx, 0), o = XEXP (x, 0); |
| poly_int64 n_offset, o_offset; |
| |
| /* Avoid creating a new MEM needlessly if we already had |
| the same address. We do if there's no OFFSET and the |
| old address X is identical to NEWX, or if X is of the |
| form (plus NEWX OFFSET), or the NEWX is of the form |
| (plus Y (const_int Z)) and X is that with the offset |
| added: (plus Y (const_int Z+OFFSET)). */ |
| n = strip_offset (n, &n_offset); |
| o = strip_offset (o, &o_offset); |
| if (!(known_eq (o_offset, n_offset + offset) |
| && rtx_equal_p (o, n))) |
| x = adjust_address_nv (newx, mode, offset); |
| } |
| else if (GET_MODE (x) == GET_MODE (newx) |
| && known_eq (offset, 0)) |
| x = newx; |
| } |
| } |
| |
| return x; |
| } |
| |
| /* Make a unary operation by first seeing if it folds and otherwise making |
| the specified operation. */ |
| |
| rtx |
| simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op, |
| machine_mode op_mode) |
| { |
| rtx tem; |
| |
| /* If this simplifies, use it. */ |
| if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) |
| return tem; |
| |
| return gen_rtx_fmt_e (code, mode, op); |
| } |
| |
| /* Likewise for ternary operations. */ |
| |
| rtx |
| simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode, |
| machine_mode op0_mode, |
| rtx op0, rtx op1, rtx op2) |
| { |
| rtx tem; |
| |
| /* If this simplifies, use it. */ |
| if ((tem = simplify_ternary_operation (code, mode, op0_mode, |
| op0, op1, op2)) != 0) |
| return tem; |
| |
| return gen_rtx_fmt_eee (code, mode, op0, op1, op2); |
| } |
| |
| /* Likewise, for relational operations. |
| CMP_MODE specifies mode comparison is done in. */ |
| |
| rtx |
| simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode, |
| machine_mode cmp_mode, |
| rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| if ((tem = simplify_relational_operation (code, mode, cmp_mode, |
| op0, op1)) != 0) |
| return tem; |
| |
| return gen_rtx_fmt_ee (code, mode, op0, op1); |
| } |
| |
| /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA) |
| and simplify the result. If FN is non-NULL, call this callback on each |
| X, if it returns non-NULL, replace X with its return value and simplify the |
| result. */ |
| |
| rtx |
| simplify_replace_fn_rtx (rtx x, const_rtx old_rtx, |
| rtx (*fn) (rtx, const_rtx, void *), void *data) |
| { |
| enum rtx_code code = GET_CODE (x); |
| machine_mode mode = GET_MODE (x); |
| machine_mode op_mode; |
| const char *fmt; |
| rtx op0, op1, op2, newx, op; |
| rtvec vec, newvec; |
| int i, j; |
| |
| if (__builtin_expect (fn != NULL, 0)) |
| { |
| newx = fn (x, old_rtx, data); |
| if (newx) |
| return newx; |
| } |
| else if (rtx_equal_p (x, old_rtx)) |
| return copy_rtx ((rtx) data); |
| |
| switch (GET_RTX_CLASS (code)) |
| { |
| case RTX_UNARY: |
| op0 = XEXP (x, 0); |
| op_mode = GET_MODE (op0); |
| op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
| if (op0 == XEXP (x, 0)) |
| return x; |
| return simplify_gen_unary (code, mode, op0, op_mode); |
| |
| case RTX_BIN_ARITH: |
| case RTX_COMM_ARITH: |
| op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
| op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return simplify_gen_binary (code, mode, op0, op1); |
| |
| case RTX_COMPARE: |
| case RTX_COMM_COMPARE: |
| op0 = XEXP (x, 0); |
| op1 = XEXP (x, 1); |
| op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); |
| op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
| op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return simplify_gen_relational (code, mode, op_mode, op0, op1); |
| |
| case RTX_TERNARY: |
| case RTX_BITFIELD_OPS: |
| op0 = XEXP (x, 0); |
| op_mode = GET_MODE (op0); |
| op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
| op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); |
| op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) |
| return x; |
| if (op_mode == VOIDmode) |
| op_mode = GET_MODE (op0); |
| return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); |
| |
| case RTX_EXTRA: |
| if (code == SUBREG) |
| { |
| op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data); |
| if (op0 == SUBREG_REG (x)) |
| return x; |
| op0 = simplify_gen_subreg (GET_MODE (x), op0, |
| GET_MODE (SUBREG_REG (x)), |
| SUBREG_BYTE (x)); |
| return op0 ? op0 : x; |
| } |
| break; |
| |
| case RTX_OBJ: |
| if (code == MEM) |
| { |
| op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
| if (op0 == XEXP (x, 0)) |
| return x; |
| return replace_equiv_address_nv (x, op0); |
| } |
| else if (code == LO_SUM) |
| { |
| op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
| op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); |
| |
| /* (lo_sum (high x) y) -> y where x and y have the same base. */ |
| if (GET_CODE (op0) == HIGH) |
| { |
| rtx base0, base1, offset0, offset1; |
| split_const (XEXP (op0, 0), &base0, &offset0); |
| split_const (op1, &base1, &offset1); |
| if (rtx_equal_p (base0, base1)) |
| return op1; |
| } |
| |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return gen_rtx_LO_SUM (mode, op0, op1); |
| } |
| break; |
| |
| default: |
| break; |
| } |
| |
| newx = x; |
| fmt = GET_RTX_FORMAT (code); |
| for (i = 0; fmt[i]; i++) |
| switch (fmt[i]) |
| { |
| case 'E': |
| vec = XVEC (x, i); |
| newvec = XVEC (newx, i); |
| for (j = 0; j < GET_NUM_ELEM (vec); j++) |
| { |
| op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j), |
| old_rtx, fn, data); |
| if (op != RTVEC_ELT (vec, j)) |
| { |
| if (newvec == vec) |
| { |
| newvec = shallow_copy_rtvec (vec); |
| if (x == newx) |
| newx = shallow_copy_rtx (x); |
| XVEC (newx, i) = newvec; |
| } |
| RTVEC_ELT (newvec, j) = op; |
| } |
| } |
| break; |
| |
| case 'e': |
| if (XEXP (x, i)) |
| { |
| op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data); |
| if (op != XEXP (x, i)) |
| { |
| if (x == newx) |
| newx = shallow_copy_rtx (x); |
| XEXP (newx, i) = op; |
| } |
| } |
| break; |
| } |
| return newx; |
| } |
| |
| /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the |
| resulting RTX. Return a new RTX which is as simplified as possible. */ |
| |
| rtx |
| simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx) |
| { |
| return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx); |
| } |
| |
| /* Try to simplify a MODE truncation of OP, which has OP_MODE. |
| Only handle cases where the truncated value is inherently an rvalue. |
| |
| RTL provides two ways of truncating a value: |
| |
| 1. a lowpart subreg. This form is only a truncation when both |
| the outer and inner modes (here MODE and OP_MODE respectively) |
| are scalar integers, and only then when the subreg is used as |
| an rvalue. |
| |
| It is only valid to form such truncating subregs if the |
| truncation requires no action by the target. The onus for |
| proving this is on the creator of the subreg -- e.g. the |
| caller to simplify_subreg or simplify_gen_subreg -- and typically |
| involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode. |
| |
| 2. a TRUNCATE. This form handles both scalar and compound integers. |
| |
| The first form is preferred where valid. However, the TRUNCATE |
| handling in simplify_unary_operation turns the second form into the |
| first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow, |
| so it is generally safe to form rvalue truncations using: |
| |
| simplify_gen_unary (TRUNCATE, ...) |
| |
| and leave simplify_unary_operation to work out which representation |
| should be used. |
| |
| Because of the proof requirements on (1), simplify_truncation must |
| also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP, |
| regardless of whether the outer truncation came from a SUBREG or a |
| TRUNCATE. For example, if the caller has proven that an SImode |
| truncation of: |
| |
| (and:DI X Y) |
| |
| is a no-op and can be represented as a subreg, it does not follow |
| that SImode truncations of X and Y are also no-ops. On a target |
| like 64-bit MIPS that requires SImode values to be stored in |
| sign-extended form, an SImode truncation of: |
| |
| (and:DI (reg:DI X) (const_int 63)) |
| |
| is trivially a no-op because only the lower 6 bits can be set. |
| However, X is still an arbitrary 64-bit number and so we cannot |
| assume that truncating it too is a no-op. */ |
| |
| rtx |
| simplify_context::simplify_truncation (machine_mode mode, rtx op, |
| machine_mode op_mode) |
| { |
| unsigned int precision = GET_MODE_UNIT_PRECISION (mode); |
| unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode); |
| scalar_int_mode int_mode, int_op_mode, subreg_mode; |
| |
| gcc_assert (precision <= op_precision); |
| |
| /* Optimize truncations of zero and sign extended values. */ |
| if (GET_CODE (op) == ZERO_EXTEND |
| || GET_CODE (op) == SIGN_EXTEND) |
| { |
| /* There are three possibilities. If MODE is the same as the |
| origmode, we can omit both the extension and the subreg. |
| If MODE is not larger than the origmode, we can apply the |
| truncation without the extension. Finally, if the outermode |
| is larger than the origmode, we can just extend to the appropriate |
| mode. */ |
| machine_mode origmode = GET_MODE (XEXP (op, 0)); |
| if (mode == origmode) |
| return XEXP (op, 0); |
| else if (precision <= GET_MODE_UNIT_PRECISION (origmode)) |
| return simplify_gen_unary (TRUNCATE, mode, |
| XEXP (op, 0), origmode); |
| else |
| return simplify_gen_unary (GET_CODE (op), mode, |
| XEXP (op, 0), origmode); |
| } |
| |
| /* If the machine can perform operations in the truncated mode, distribute |
| the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into |
| (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */ |
| if (1 |
| && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD) |
| && (GET_CODE (op) == PLUS |
| || GET_CODE (op) == MINUS |
| || GET_CODE (op) == MULT)) |
| { |
| rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode); |
| if (op0) |
| { |
| rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode); |
| if (op1) |
| return simplify_gen_binary (GET_CODE (op), mode, op0, op1); |
| } |
| } |
| |
| /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into |
| to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and |
| the outer subreg is effectively a truncation to the original mode. */ |
| if ((GET_CODE (op) == LSHIFTRT |
| || GET_CODE (op) == ASHIFTRT) |
| /* Ensure that OP_MODE is at least twice as wide as MODE |
| to avoid the possibility that an outer LSHIFTRT shifts by more |
| than the sign extension's sign_bit_copies and introduces zeros |
| into the high bits of the result. */ |
| && 2 * precision <= op_precision |
| && CONST_INT_P (XEXP (op, 1)) |
| && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND |
| && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode |
| && UINTVAL (XEXP (op, 1)) < precision) |
| return simplify_gen_binary (ASHIFTRT, mode, |
| XEXP (XEXP (op, 0), 0), XEXP (op, 1)); |
| |
| /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into |
| to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and |
| the outer subreg is effectively a truncation to the original mode. */ |
| if ((GET_CODE (op) == LSHIFTRT |
| || GET_CODE (op) == ASHIFTRT) |
| && CONST_INT_P (XEXP (op, 1)) |
| && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND |
| && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode |
| && UINTVAL (XEXP (op, 1)) < precision) |
| return simplify_gen_binary (LSHIFTRT, mode, |
| XEXP (XEXP (op, 0), 0), XEXP (op, 1)); |
| |
| /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into |
| to (ashift:QI (x:QI) C), where C is a suitable small constant and |
| the outer subreg is effectively a truncation to the original mode. */ |
| if (GET_CODE (op) == ASHIFT |
| && CONST_INT_P (XEXP (op, 1)) |
| && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND |
| || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND) |
| && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode |
| && UINTVAL (XEXP (op, 1)) < precision) |
| return simplify_gen_binary (ASHIFT, mode, |
| XEXP (XEXP (op, 0), 0), XEXP (op, 1)); |
| |
| /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into |
| (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C |
| and C2. */ |
| if (GET_CODE (op) == AND |
| && (GET_CODE (XEXP (op, 0)) == LSHIFTRT |
| || GET_CODE (XEXP (op, 0)) == ASHIFTRT) |
| && CONST_INT_P (XEXP (XEXP (op, 0), 1)) |
| && CONST_INT_P (XEXP (op, 1))) |
| { |
| rtx op0 = (XEXP (XEXP (op, 0), 0)); |
| rtx shift_op = XEXP (XEXP (op, 0), 1); |
| rtx mask_op = XEXP (op, 1); |
| unsigned HOST_WIDE_INT shift = UINTVAL (shift_op); |
| unsigned HOST_WIDE_INT mask = UINTVAL (mask_op); |
| |
| if (shift < precision |
| /* If doing this transform works for an X with all bits set, |
| it works for any X. */ |
| && ((GET_MODE_MASK (mode) >> shift) & mask) |
| == ((GET_MODE_MASK (op_mode) >> shift) & mask) |
| && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode)) |
| && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op))) |
| { |
| mask_op = GEN_INT (trunc_int_for_mode (mask, mode)); |
| return simplify_gen_binary (AND, mode, op0, mask_op); |
| } |
| } |
| |
| /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into |
| (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without |
| changing len. */ |
| if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT) |
| && REG_P (XEXP (op, 0)) |
| && GET_MODE (XEXP (op, 0)) == GET_MODE (op) |
| && CONST_INT_P (XEXP (op, 1)) |
| && CONST_INT_P (XEXP (op, 2))) |
| { |
| rtx op0 = XEXP (op, 0); |
| unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1)); |
| unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2)); |
| if (BITS_BIG_ENDIAN && pos >= op_precision - precision) |
| { |
| op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0)); |
| if (op0) |
| { |
| pos -= op_precision - precision; |
| return simplify_gen_ternary (GET_CODE (op), mode, mode, op0, |
| XEXP (op, 1), GEN_INT (pos)); |
| } |
| } |
| else if (!BITS_BIG_ENDIAN && precision >= len + pos) |
| { |
| op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0)); |
| if (op0) |
| return simplify_gen_ternary (GET_CODE (op), mode, mode, op0, |
| XEXP (op, 1), XEXP (op, 2)); |
| } |
| } |
| |
| /* Recognize a word extraction from a multi-word subreg. */ |
| if ((GET_CODE (op) == LSHIFTRT |
| || GET_CODE (op) == ASHIFTRT) |
| && SCALAR_INT_MODE_P (mode) |
| && SCALAR_INT_MODE_P (op_mode) |
| && precision >= BITS_PER_WORD |
| && 2 * precision <= op_precision |
| && CONST_INT_P (XEXP (op, 1)) |
| && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0 |
| && UINTVAL (XEXP (op, 1)) < op_precision) |
| { |
| poly_int64 byte = subreg_lowpart_offset (mode, op_mode); |
| int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; |
| return simplify_gen_subreg (mode, XEXP (op, 0), op_mode, |
| (WORDS_BIG_ENDIAN |
| ? byte - shifted_bytes |
| : byte + shifted_bytes)); |
| } |
| |
| /* If we have a TRUNCATE of a right shift of MEM, make a new MEM |
| and try replacing the TRUNCATE and shift with it. Don't do this |
| if the MEM has a mode-dependent address. */ |
| if ((GET_CODE (op) == LSHIFTRT |
| || GET_CODE (op) == ASHIFTRT) |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && is_a <scalar_int_mode> (op_mode, &int_op_mode) |
| && MEM_P (XEXP (op, 0)) |
| && CONST_INT_P (XEXP (op, 1)) |
| && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0 |
| && INTVAL (XEXP (op, 1)) > 0 |
| && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode) |
| && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0), |
| MEM_ADDR_SPACE (XEXP (op, 0))) |
| && ! MEM_VOLATILE_P (XEXP (op, 0)) |
| && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD |
| || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN)) |
| { |
| poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode); |
| int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; |
| return adjust_address_nv (XEXP (op, 0), int_mode, |
| (WORDS_BIG_ENDIAN |
| ? byte - shifted_bytes |
| : byte + shifted_bytes)); |
| } |
| |
| /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is |
| (OP:SI foo:SI) if OP is NEG or ABS. */ |
| if ((GET_CODE (op) == ABS |
| || GET_CODE (op) == NEG) |
| && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND |
| || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND) |
| && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) |
| return simplify_gen_unary (GET_CODE (op), mode, |
| XEXP (XEXP (op, 0), 0), mode); |
| |
| /* (truncate:A (subreg:B (truncate:C X) 0)) is |
| (truncate:A X). */ |
| if (GET_CODE (op) == SUBREG |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && SCALAR_INT_MODE_P (op_mode) |
| && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode) |
| && GET_CODE (SUBREG_REG (op)) == TRUNCATE |
| && subreg_lowpart_p (op)) |
| { |
| rtx inner = XEXP (SUBREG_REG (op), 0); |
| if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode)) |
| return simplify_gen_unary (TRUNCATE, int_mode, inner, |
| GET_MODE (inner)); |
| else |
| /* If subreg above is paradoxical and C is narrower |
| than A, return (subreg:A (truncate:C X) 0). */ |
| return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0); |
| } |
| |
| /* (truncate:A (truncate:B X)) is (truncate:A X). */ |
| if (GET_CODE (op) == TRUNCATE) |
| return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already, |
| in mode A. */ |
| if (GET_CODE (op) == IOR |
| && SCALAR_INT_MODE_P (mode) |
| && SCALAR_INT_MODE_P (op_mode) |
| && CONST_INT_P (XEXP (op, 1)) |
| && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1) |
| return constm1_rtx; |
| |
| return NULL_RTX; |
| } |
| |
| /* Try to simplify a unary operation CODE whose output mode is to be |
| MODE with input operand OP whose mode was originally OP_MODE. |
| Return zero if no simplification can be made. */ |
| rtx |
| simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode, |
| rtx op, machine_mode op_mode) |
| { |
| rtx trueop, tem; |
| |
| trueop = avoid_constant_pool_reference (op); |
| |
| tem = simplify_const_unary_operation (code, mode, trueop, op_mode); |
| if (tem) |
| return tem; |
| |
| return simplify_unary_operation_1 (code, mode, op); |
| } |
| |
| /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known |
| to be exact. */ |
| |
| static bool |
| exact_int_to_float_conversion_p (const_rtx op) |
| { |
| int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op))); |
| machine_mode op0_mode = GET_MODE (XEXP (op, 0)); |
| /* Constants shouldn't reach here. */ |
| gcc_assert (op0_mode != VOIDmode); |
| int in_prec = GET_MODE_UNIT_PRECISION (op0_mode); |
| int in_bits = in_prec; |
| if (HWI_COMPUTABLE_MODE_P (op0_mode)) |
| { |
| unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode); |
| if (GET_CODE (op) == FLOAT) |
| in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode); |
| else if (GET_CODE (op) == UNSIGNED_FLOAT) |
| in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED); |
| else |
| gcc_unreachable (); |
| in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec)); |
| } |
| return in_bits <= out_bits; |
| } |
| |
| /* Perform some simplifications we can do even if the operands |
| aren't constant. */ |
| rtx |
| simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode, |
| rtx op) |
| { |
| enum rtx_code reversed; |
| rtx temp, elt, base, step; |
| scalar_int_mode inner, int_mode, op_mode, op0_mode; |
| |
| switch (code) |
| { |
| case NOT: |
| /* (not (not X)) == X. */ |
| if (GET_CODE (op) == NOT) |
| return XEXP (op, 0); |
| |
| /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the |
| comparison is all ones. */ |
| if (COMPARISON_P (op) |
| && (mode == BImode || STORE_FLAG_VALUE == -1) |
| && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN)) |
| return simplify_gen_relational (reversed, mode, VOIDmode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (not (plus X -1)) can become (neg X). */ |
| if (GET_CODE (op) == PLUS |
| && XEXP (op, 1) == constm1_rtx) |
| return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); |
| |
| /* Similarly, (not (neg X)) is (plus X -1). Only do this for |
| modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT |
| and MODE_VECTOR_INT. */ |
| if (GET_CODE (op) == NEG && CONSTM1_RTX (mode)) |
| return simplify_gen_binary (PLUS, mode, XEXP (op, 0), |
| CONSTM1_RTX (mode)); |
| |
| /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ |
| if (GET_CODE (op) == XOR |
| && CONST_INT_P (XEXP (op, 1)) |
| && (temp = simplify_unary_operation (NOT, mode, |
| XEXP (op, 1), mode)) != 0) |
| return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); |
| |
| /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ |
| if (GET_CODE (op) == PLUS |
| && CONST_INT_P (XEXP (op, 1)) |
| && mode_signbit_p (mode, XEXP (op, 1)) |
| && (temp = simplify_unary_operation (NOT, mode, |
| XEXP (op, 1), mode)) != 0) |
| return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); |
| |
| |
| /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for |
| operands other than 1, but that is not valid. We could do a |
| similar simplification for (not (lshiftrt C X)) where C is |
| just the sign bit, but this doesn't seem common enough to |
| bother with. */ |
| if (GET_CODE (op) == ASHIFT |
| && XEXP (op, 0) == const1_rtx) |
| { |
| temp = simplify_gen_unary (NOT, mode, const1_rtx, mode); |
| return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* (not (ashiftrt foo C)) where C is the number of bits in FOO |
| minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, |
| so we can perform the above simplification. */ |
| if (STORE_FLAG_VALUE == -1 |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && GET_CODE (op) == ASHIFTRT |
| && CONST_INT_P (XEXP (op, 1)) |
| && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1) |
| return simplify_gen_relational (GE, int_mode, VOIDmode, |
| XEXP (op, 0), const0_rtx); |
| |
| |
| if (partial_subreg_p (op) |
| && subreg_lowpart_p (op) |
| && GET_CODE (SUBREG_REG (op)) == ASHIFT |
| && XEXP (SUBREG_REG (op), 0) == const1_rtx) |
| { |
| machine_mode inner_mode = GET_MODE (SUBREG_REG (op)); |
| rtx x; |
| |
| x = gen_rtx_ROTATE (inner_mode, |
| simplify_gen_unary (NOT, inner_mode, const1_rtx, |
| inner_mode), |
| XEXP (SUBREG_REG (op), 1)); |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, x); |
| if (temp) |
| return temp; |
| } |
| |
| /* Apply De Morgan's laws to reduce number of patterns for machines |
| with negating logical insns (and-not, nand, etc.). If result has |
| only one NOT, put it first, since that is how the patterns are |
| coded. */ |
| if (GET_CODE (op) == IOR || GET_CODE (op) == AND) |
| { |
| rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1); |
| machine_mode op_mode; |
| |
| op_mode = GET_MODE (in1); |
| in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode); |
| |
| op_mode = GET_MODE (in2); |
| if (op_mode == VOIDmode) |
| op_mode = mode; |
| in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode); |
| |
| if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT) |
| std::swap (in1, in2); |
| |
| return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR, |
| mode, in1, in2); |
| } |
| |
| /* (not (bswap x)) -> (bswap (not x)). */ |
| if (GET_CODE (op) == BSWAP) |
| { |
| rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); |
| return simplify_gen_unary (BSWAP, mode, x, mode); |
| } |
| break; |
| |
| case NEG: |
| /* (neg (neg X)) == X. */ |
| if (GET_CODE (op) == NEG) |
| return XEXP (op, 0); |
| |
| /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y. |
| If comparison is not reversible use |
| x ? y : (neg y). */ |
| if (GET_CODE (op) == IF_THEN_ELSE) |
| { |
| rtx cond = XEXP (op, 0); |
| rtx true_rtx = XEXP (op, 1); |
| rtx false_rtx = XEXP (op, 2); |
| |
| if ((GET_CODE (true_rtx) == NEG |
| && rtx_equal_p (XEXP (true_rtx, 0), false_rtx)) |
| || (GET_CODE (false_rtx) == NEG |
| && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))) |
| { |
| if (reversed_comparison_code (cond, NULL) != UNKNOWN) |
| temp = reversed_comparison (cond, mode); |
| else |
| { |
| temp = cond; |
| std::swap (true_rtx, false_rtx); |
| } |
| return simplify_gen_ternary (IF_THEN_ELSE, mode, |
| mode, temp, true_rtx, false_rtx); |
| } |
| } |
| |
| /* (neg (plus X 1)) can become (not X). */ |
| if (GET_CODE (op) == PLUS |
| && XEXP (op, 1) == const1_rtx) |
| return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); |
| |
| /* Similarly, (neg (not X)) is (plus X 1). */ |
| if (GET_CODE (op) == NOT) |
| return simplify_gen_binary (PLUS, mode, XEXP (op, 0), |
| CONST1_RTX (mode)); |
| |
| /* (neg (minus X Y)) can become (minus Y X). This transformation |
| isn't safe for modes with signed zeros, since if X and Y are |
| both +0, (minus Y X) is the same as (minus X Y). If the |
| rounding mode is towards +infinity (or -infinity) then the two |
| expressions will be rounded differently. */ |
| if (GET_CODE (op) == MINUS |
| && !HONOR_SIGNED_ZEROS (mode) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0)); |
| |
| if (GET_CODE (op) == PLUS |
| && !HONOR_SIGNED_ZEROS (mode) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| { |
| /* (neg (plus A C)) is simplified to (minus -C A). */ |
| if (CONST_SCALAR_INT_P (XEXP (op, 1)) |
| || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1))) |
| { |
| temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode); |
| if (temp) |
| return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0)); |
| } |
| |
| /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */ |
| temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); |
| return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* (neg (mult A B)) becomes (mult A (neg B)). |
| This works even for floating-point values. */ |
| if (GET_CODE (op) == MULT |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| { |
| temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode); |
| return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp); |
| } |
| |
| /* NEG commutes with ASHIFT since it is multiplication. Only do |
| this if we can then eliminate the NEG (e.g., if the operand |
| is a constant). */ |
| if (GET_CODE (op) == ASHIFT) |
| { |
| temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode); |
| if (temp) |
| return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when |
| C is equal to the width of MODE minus 1. */ |
| if (GET_CODE (op) == ASHIFTRT |
| && CONST_INT_P (XEXP (op, 1)) |
| && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) |
| return simplify_gen_binary (LSHIFTRT, mode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when |
| C is equal to the width of MODE minus 1. */ |
| if (GET_CODE (op) == LSHIFTRT |
| && CONST_INT_P (XEXP (op, 1)) |
| && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) |
| return simplify_gen_binary (ASHIFTRT, mode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */ |
| if (GET_CODE (op) == XOR |
| && XEXP (op, 1) == const1_rtx |
| && nonzero_bits (XEXP (op, 0), mode) == 1) |
| return plus_constant (mode, XEXP (op, 0), -1); |
| |
| /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */ |
| /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */ |
| if (GET_CODE (op) == LT |
| && XEXP (op, 1) == const0_rtx |
| && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner)) |
| { |
| int_mode = as_a <scalar_int_mode> (mode); |
| int isize = GET_MODE_PRECISION (inner); |
| if (STORE_FLAG_VALUE == 1) |
| { |
| temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0), |
| gen_int_shift_amount (inner, |
| isize - 1)); |
| if (int_mode == inner) |
| return temp; |
| if (GET_MODE_PRECISION (int_mode) > isize) |
| return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner); |
| return simplify_gen_unary (TRUNCATE, int_mode, temp, inner); |
| } |
| else if (STORE_FLAG_VALUE == -1) |
| { |
| temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0), |
| gen_int_shift_amount (inner, |
| isize - 1)); |
| if (int_mode == inner) |
| return temp; |
| if (GET_MODE_PRECISION (int_mode) > isize) |
| return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner); |
| return simplify_gen_unary (TRUNCATE, int_mode, temp, inner); |
| } |
| } |
| |
| if (vec_series_p (op, &base, &step)) |
| { |
| /* Only create a new series if we can simplify both parts. In other |
| cases this isn't really a simplification, and it's not necessarily |
| a win to replace a vector operation with a scalar operation. */ |
| scalar_mode inner_mode = GET_MODE_INNER (mode); |
| base = simplify_unary_operation (NEG, inner_mode, base, inner_mode); |
| if (base) |
| { |
| step = simplify_unary_operation (NEG, inner_mode, |
| step, inner_mode); |
| if (step) |
| return gen_vec_series (mode, base, step); |
| } |
| } |
| break; |
| |
| case TRUNCATE: |
| /* Don't optimize (lshiftrt (mult ...)) as it would interfere |
| with the umulXi3_highpart patterns. */ |
| if (GET_CODE (op) == LSHIFTRT |
| && GET_CODE (XEXP (op, 0)) == MULT) |
| break; |
| |
| if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) |
| { |
| if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, op); |
| if (temp) |
| return temp; |
| } |
| /* We can't handle truncation to a partial integer mode here |
| because we don't know the real bitsize of the partial |
| integer mode. */ |
| break; |
| } |
| |
| if (GET_MODE (op) != VOIDmode) |
| { |
| temp = simplify_truncation (mode, op, GET_MODE (op)); |
| if (temp) |
| return temp; |
| } |
| |
| /* If we know that the value is already truncated, we can |
| replace the TRUNCATE with a SUBREG. */ |
| if (known_eq (GET_MODE_NUNITS (mode), 1) |
| && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)) |
| || truncated_to_mode (mode, op))) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, op); |
| if (temp) |
| return temp; |
| } |
| |
| /* A truncate of a comparison can be replaced with a subreg if |
| STORE_FLAG_VALUE permits. This is like the previous test, |
| but it works even if the comparison is done in a mode larger |
| than HOST_BITS_PER_WIDE_INT. */ |
| if (HWI_COMPUTABLE_MODE_P (mode) |
| && COMPARISON_P (op) |
| && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, op); |
| if (temp) |
| return temp; |
| } |
| |
| /* A truncate of a memory is just loading the low part of the memory |
| if we are not changing the meaning of the address. */ |
| if (GET_CODE (op) == MEM |
| && !VECTOR_MODE_P (mode) |
| && !MEM_VOLATILE_P (op) |
| && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, op); |
| if (temp) |
| return temp; |
| } |
| |
| break; |
| |
| case FLOAT_TRUNCATE: |
| if (DECIMAL_FLOAT_MODE_P (mode)) |
| break; |
| |
| /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */ |
| if (GET_CODE (op) == FLOAT_EXTEND |
| && GET_MODE (XEXP (op, 0)) == mode) |
| return XEXP (op, 0); |
| |
| /* (float_truncate:SF (float_truncate:DF foo:XF)) |
| = (float_truncate:SF foo:XF). |
| This may eliminate double rounding, so it is unsafe. |
| |
| (float_truncate:SF (float_extend:XF foo:DF)) |
| = (float_truncate:SF foo:DF). |
| |
| (float_truncate:DF (float_extend:XF foo:SF)) |
| = (float_extend:DF foo:SF). */ |
| if ((GET_CODE (op) == FLOAT_TRUNCATE |
| && flag_unsafe_math_optimizations) |
| || GET_CODE (op) == FLOAT_EXTEND) |
| return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0))) |
| > GET_MODE_UNIT_SIZE (mode) |
| ? FLOAT_TRUNCATE : FLOAT_EXTEND, |
| mode, |
| XEXP (op, 0), mode); |
| |
| /* (float_truncate (float x)) is (float x) */ |
| if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT) |
| && (flag_unsafe_math_optimizations |
| || exact_int_to_float_conversion_p (op))) |
| return simplify_gen_unary (GET_CODE (op), mode, |
| XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is |
| (OP:SF foo:SF) if OP is NEG or ABS. */ |
| if ((GET_CODE (op) == ABS |
| || GET_CODE (op) == NEG) |
| && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND |
| && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) |
| return simplify_gen_unary (GET_CODE (op), mode, |
| XEXP (XEXP (op, 0), 0), mode); |
| |
| /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0)) |
| is (float_truncate:SF x). */ |
| if (GET_CODE (op) == SUBREG |
| && subreg_lowpart_p (op) |
| && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE) |
| return SUBREG_REG (op); |
| break; |
| |
| case FLOAT_EXTEND: |
| if (DECIMAL_FLOAT_MODE_P (mode)) |
| break; |
| |
| /* (float_extend (float_extend x)) is (float_extend x) |
| |
| (float_extend (float x)) is (float x) assuming that double |
| rounding can't happen. |
| */ |
| if (GET_CODE (op) == FLOAT_EXTEND |
| || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT) |
| && exact_int_to_float_conversion_p (op))) |
| return simplify_gen_unary (GET_CODE (op), mode, |
| XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| break; |
| |
| case ABS: |
| /* (abs (neg <foo>)) -> (abs <foo>) */ |
| if (GET_CODE (op) == NEG) |
| return simplify_gen_unary (ABS, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS), |
| do nothing. */ |
| if (GET_MODE (op) == VOIDmode) |
| break; |
| |
| /* If operand is something known to be positive, ignore the ABS. */ |
| if (GET_CODE (op) == FFS || GET_CODE (op) == ABS |
| || val_signbit_known_clear_p (GET_MODE (op), |
| nonzero_bits (op, GET_MODE (op)))) |
| return op; |
| |
| /* If operand is known to be only -1 or 0, convert ABS to NEG. */ |
| if (is_a <scalar_int_mode> (mode, &int_mode) |
| && (num_sign_bit_copies (op, int_mode) |
| == GET_MODE_PRECISION (int_mode))) |
| return gen_rtx_NEG (int_mode, op); |
| |
| break; |
| |
| case FFS: |
| /* (ffs (*_extend <X>)) = (ffs <X>) */ |
| if (GET_CODE (op) == SIGN_EXTEND |
| || GET_CODE (op) == ZERO_EXTEND) |
| return simplify_gen_unary (FFS, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| break; |
| |
| case POPCOUNT: |
| switch (GET_CODE (op)) |
| { |
| case BSWAP: |
| case ZERO_EXTEND: |
| /* (popcount (zero_extend <X>)) = (popcount <X>) */ |
| return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| case ROTATE: |
| case ROTATERT: |
| /* Rotations don't affect popcount. */ |
| if (!side_effects_p (XEXP (op, 1))) |
| return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| break; |
| |
| default: |
| break; |
| } |
| break; |
| |
| case PARITY: |
| switch (GET_CODE (op)) |
| { |
| case NOT: |
| case BSWAP: |
| case ZERO_EXTEND: |
| case SIGN_EXTEND: |
| return simplify_gen_unary (PARITY, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| case ROTATE: |
| case ROTATERT: |
| /* Rotations don't affect parity. */ |
| if (!side_effects_p (XEXP (op, 1))) |
| return simplify_gen_unary (PARITY, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| break; |
| |
| case PARITY: |
| /* (parity (parity x)) -> parity (x). */ |
| return op; |
| |
| default: |
| break; |
| } |
| break; |
| |
| case BSWAP: |
| /* (bswap (bswap x)) -> x. */ |
| if (GET_CODE (op) == BSWAP) |
| return XEXP (op, 0); |
| break; |
| |
| case FLOAT: |
| /* (float (sign_extend <X>)) = (float <X>). */ |
| if (GET_CODE (op) == SIGN_EXTEND) |
| return simplify_gen_unary (FLOAT, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| break; |
| |
| case SIGN_EXTEND: |
| /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) |
| becomes just the MINUS if its mode is MODE. This allows |
| folding switch statements on machines using casesi (such as |
| the VAX). */ |
| if (GET_CODE (op) == TRUNCATE |
| && GET_MODE (XEXP (op, 0)) == mode |
| && GET_CODE (XEXP (op, 0)) == MINUS |
| && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF |
| && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) |
| return XEXP (op, 0); |
| |
| /* Extending a widening multiplication should be canonicalized to |
| a wider widening multiplication. */ |
| if (GET_CODE (op) == MULT) |
| { |
| rtx lhs = XEXP (op, 0); |
| rtx rhs = XEXP (op, 1); |
| enum rtx_code lcode = GET_CODE (lhs); |
| enum rtx_code rcode = GET_CODE (rhs); |
| |
| /* Widening multiplies usually extend both operands, but sometimes |
| they use a shift to extract a portion of a register. */ |
| if ((lcode == SIGN_EXTEND |
| || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1)))) |
| && (rcode == SIGN_EXTEND |
| || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1))))) |
| { |
| machine_mode lmode = GET_MODE (lhs); |
| machine_mode rmode = GET_MODE (rhs); |
| int bits; |
| |
| if (lcode == ASHIFTRT) |
| /* Number of bits not shifted off the end. */ |
| bits = (GET_MODE_UNIT_PRECISION (lmode) |
| - INTVAL (XEXP (lhs, 1))); |
| else /* lcode == SIGN_EXTEND */ |
| /* Size of inner mode. */ |
| bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0))); |
| |
| if (rcode == ASHIFTRT) |
| bits += (GET_MODE_UNIT_PRECISION (rmode) |
| - INTVAL (XEXP (rhs, 1))); |
| else /* rcode == SIGN_EXTEND */ |
| bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0))); |
| |
| /* We can only widen multiplies if the result is mathematiclly |
| equivalent. I.e. if overflow was impossible. */ |
| if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op))) |
| return simplify_gen_binary |
| (MULT, mode, |
| simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode), |
| simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode)); |
| } |
| } |
| |
| /* Check for a sign extension of a subreg of a promoted |
| variable, where the promotion is sign-extended, and the |
| target mode is the same as the variable's promotion. */ |
| if (GET_CODE (op) == SUBREG |
| && SUBREG_PROMOTED_VAR_P (op) |
| && SUBREG_PROMOTED_SIGNED_P (op) |
| && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op)))) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op)); |
| if (temp) |
| return temp; |
| } |
| |
| /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>). |
| (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */ |
| if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND) |
| { |
| gcc_assert (GET_MODE_UNIT_PRECISION (mode) |
| > GET_MODE_UNIT_PRECISION (GET_MODE (op))); |
| return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| } |
| |
| /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I))) |
| is (sign_extend:M (subreg:O <X>)) if there is mode with |
| GET_MODE_BITSIZE (N) - I bits. |
| (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I))) |
| is similarly (zero_extend:M (subreg:O <X>)). */ |
| if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT) |
| && GET_CODE (XEXP (op, 0)) == ASHIFT |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && CONST_INT_P (XEXP (op, 1)) |
| && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) |
| && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)), |
| GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1)))) |
| { |
| scalar_int_mode tmode; |
| gcc_assert (GET_MODE_PRECISION (int_mode) |
| > GET_MODE_PRECISION (op_mode)); |
| if (int_mode_for_size (GET_MODE_PRECISION (op_mode) |
| - INTVAL (XEXP (op, 1)), 1).exists (&tmode)) |
| { |
| rtx inner = |
| rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); |
| if (inner) |
| return simplify_gen_unary (GET_CODE (op) == ASHIFTRT |
| ? SIGN_EXTEND : ZERO_EXTEND, |
| int_mode, inner, tmode); |
| } |
| } |
| |
| /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as |
| (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */ |
| if (GET_CODE (op) == LSHIFTRT |
| && CONST_INT_P (XEXP (op, 1)) |
| && XEXP (op, 1) != const0_rtx) |
| return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op)); |
| |
| /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where |
| I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to |
| (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and |
| (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than |
| O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is |
| wider than O. */ |
| if (GET_CODE (op) == TRUNCATE |
| && GET_CODE (XEXP (op, 0)) == LSHIFTRT |
| && CONST_INT_P (XEXP (XEXP (op, 0), 1))) |
| { |
| scalar_int_mode m_mode, n_mode, o_mode; |
| rtx old_shift = XEXP (op, 0); |
| if (is_a <scalar_int_mode> (mode, &m_mode) |
| && is_a <scalar_int_mode> (GET_MODE (op), &n_mode) |
| && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode) |
| && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode) |
| == INTVAL (XEXP (old_shift, 1))) |
| { |
| rtx new_shift = simplify_gen_binary (ASHIFTRT, |
| GET_MODE (old_shift), |
| XEXP (old_shift, 0), |
| XEXP (old_shift, 1)); |
| if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode)) |
| return simplify_gen_unary (SIGN_EXTEND, mode, new_shift, |
| GET_MODE (new_shift)); |
| if (mode != GET_MODE (new_shift)) |
| return simplify_gen_unary (TRUNCATE, mode, new_shift, |
| GET_MODE (new_shift)); |
| return new_shift; |
| } |
| } |
| |
| #if defined(POINTERS_EXTEND_UNSIGNED) |
| /* As we do not know which address space the pointer is referring to, |
| we can do this only if the target does not support different pointer |
| or address modes depending on the address space. */ |
| if (target_default_pointer_address_modes_p () |
| && ! POINTERS_EXTEND_UNSIGNED |
| && mode == Pmode && GET_MODE (op) == ptr_mode |
| && (CONSTANT_P (op) |
| || (GET_CODE (op) == SUBREG |
| && REG_P (SUBREG_REG (op)) |
| && REG_POINTER (SUBREG_REG (op)) |
| && GET_MODE (SUBREG_REG (op)) == Pmode)) |
| && !targetm.have_ptr_extend ()) |
| { |
| temp |
| = convert_memory_address_addr_space_1 (Pmode, op, |
| ADDR_SPACE_GENERIC, false, |
| true); |
| if (temp) |
| return temp; |
| } |
| #endif |
| break; |
| |
| case ZERO_EXTEND: |
| /* Check for a zero extension of a subreg of a promoted |
| variable, where the promotion is zero-extended, and the |
| target mode is the same as the variable's promotion. */ |
| if (GET_CODE (op) == SUBREG |
| && SUBREG_PROMOTED_VAR_P (op) |
| && SUBREG_PROMOTED_UNSIGNED_P (op) |
| && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op)))) |
| { |
| temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op)); |
| if (temp) |
| return temp; |
| } |
| |
| /* Extending a widening multiplication should be canonicalized to |
| a wider widening multiplication. */ |
| if (GET_CODE (op) == MULT) |
| { |
| rtx lhs = XEXP (op, 0); |
| rtx rhs = XEXP (op, 1); |
| enum rtx_code lcode = GET_CODE (lhs); |
| enum rtx_code rcode = GET_CODE (rhs); |
| |
| /* Widening multiplies usually extend both operands, but sometimes |
| they use a shift to extract a portion of a register. */ |
| if ((lcode == ZERO_EXTEND |
| || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1)))) |
| && (rcode == ZERO_EXTEND |
| || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1))))) |
| { |
| machine_mode lmode = GET_MODE (lhs); |
| machine_mode rmode = GET_MODE (rhs); |
| int bits; |
| |
| if (lcode == LSHIFTRT) |
| /* Number of bits not shifted off the end. */ |
| bits = (GET_MODE_UNIT_PRECISION (lmode) |
| - INTVAL (XEXP (lhs, 1))); |
| else /* lcode == ZERO_EXTEND */ |
| /* Size of inner mode. */ |
| bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0))); |
| |
| if (rcode == LSHIFTRT) |
| bits += (GET_MODE_UNIT_PRECISION (rmode) |
| - INTVAL (XEXP (rhs, 1))); |
| else /* rcode == ZERO_EXTEND */ |
| bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0))); |
| |
| /* We can only widen multiplies if the result is mathematiclly |
| equivalent. I.e. if overflow was impossible. */ |
| if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op))) |
| return simplify_gen_binary |
| (MULT, mode, |
| simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode), |
| simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode)); |
| } |
| } |
| |
| /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */ |
| if (GET_CODE (op) == ZERO_EXTEND) |
| return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0), |
| GET_MODE (XEXP (op, 0))); |
| |
| /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I))) |
| is (zero_extend:M (subreg:O <X>)) if there is mode with |
| GET_MODE_PRECISION (N) - I bits. */ |
| if (GET_CODE (op) == LSHIFTRT |
| && GET_CODE (XEXP (op, 0)) == ASHIFT |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && CONST_INT_P (XEXP (op, 1)) |
| && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) |
| && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)), |
| GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1)))) |
| { |
| scalar_int_mode tmode; |
| if (int_mode_for_size (GET_MODE_PRECISION (op_mode) |
| - INTVAL (XEXP (op, 1)), 1).exists (&tmode)) |
| { |
| rtx inner = |
| rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); |
| if (inner) |
| return simplify_gen_unary (ZERO_EXTEND, int_mode, |
| inner, tmode); |
| } |
| } |
| |
| /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or |
| (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside |
| of mode N. E.g. |
| (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is |
| (and:SI (reg:SI) (const_int 63)). */ |
| if (partial_subreg_p (op) |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode) |
| && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT |
| && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode) |
| && subreg_lowpart_p (op) |
| && (nonzero_bits (SUBREG_REG (op), op0_mode) |
| & ~GET_MODE_MASK (GET_MODE (op))) == 0) |
| { |
| if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode)) |
| return SUBREG_REG (op); |
| return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op), |
| op0_mode); |
| } |
| |
| #if defined(POINTERS_EXTEND_UNSIGNED) |
| /* As we do not know which address space the pointer is referring to, |
| we can do this only if the target does not support different pointer |
| or address modes depending on the address space. */ |
| if (target_default_pointer_address_modes_p () |
| && POINTERS_EXTEND_UNSIGNED > 0 |
| && mode == Pmode && GET_MODE (op) == ptr_mode |
| && (CONSTANT_P (op) |
| || (GET_CODE (op) == SUBREG |
| && REG_P (SUBREG_REG (op)) |
| && REG_POINTER (SUBREG_REG (op)) |
| && GET_MODE (SUBREG_REG (op)) == Pmode)) |
| && !targetm.have_ptr_extend ()) |
| { |
| temp |
| = convert_memory_address_addr_space_1 (Pmode, op, |
| ADDR_SPACE_GENERIC, false, |
| true); |
| if (temp) |
| return temp; |
| } |
| #endif |
| break; |
| |
| default: |
| break; |
| } |
| |
| if (VECTOR_MODE_P (mode) |
| && vec_duplicate_p (op, &elt) |
| && code != VEC_DUPLICATE) |
| { |
| /* Try applying the operator to ELT and see if that simplifies. |
| We can duplicate the result if so. |
| |
| The reason we don't use simplify_gen_unary is that it isn't |
| necessarily a win to convert things like: |
| |
| (neg:V (vec_duplicate:V (reg:S R))) |
| |
| to: |
| |
| (vec_duplicate:V (neg:S (reg:S R))) |
| |
| The first might be done entirely in vector registers while the |
| second might need a move between register files. */ |
| temp = simplify_unary_operation (code, GET_MODE_INNER (mode), |
| elt, GET_MODE_INNER (GET_MODE (op))); |
| if (temp) |
| return gen_vec_duplicate (mode, temp); |
| } |
| |
| return 0; |
| } |
| |
| /* Try to compute the value of a unary operation CODE whose output mode is to |
| be MODE with input operand OP whose mode was originally OP_MODE. |
| Return zero if the value cannot be computed. */ |
| rtx |
| simplify_const_unary_operation (enum rtx_code code, machine_mode mode, |
| rtx op, machine_mode op_mode) |
| { |
| scalar_int_mode result_mode; |
| |
| if (code == VEC_DUPLICATE) |
| { |
| gcc_assert (VECTOR_MODE_P (mode)); |
| if (GET_MODE (op) != VOIDmode) |
| { |
| if (!VECTOR_MODE_P (GET_MODE (op))) |
| gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op)); |
| else |
| gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER |
| (GET_MODE (op))); |
| } |
| if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)) |
| return gen_const_vec_duplicate (mode, op); |
| if (GET_CODE (op) == CONST_VECTOR |
| && (CONST_VECTOR_DUPLICATE_P (op) |
| || CONST_VECTOR_NUNITS (op).is_constant ())) |
| { |
| unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op) |
| ? CONST_VECTOR_NPATTERNS (op) |
| : CONST_VECTOR_NUNITS (op).to_constant ()); |
| gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns)); |
| rtx_vector_builder builder (mode, npatterns, 1); |
| for (unsigned i = 0; i < npatterns; i++) |
| builder.quick_push (CONST_VECTOR_ELT (op, i)); |
| return builder.build (); |
| } |
| } |
| |
| if (VECTOR_MODE_P (mode) |
| && GET_CODE (op) == CONST_VECTOR |
| && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op))) |
| { |
| gcc_assert (GET_MODE (op) == op_mode); |
| |
| rtx_vector_builder builder; |
| if (!builder.new_unary_operation (mode, op, false)) |
| return 0; |
| |
| unsigned int count = builder.encoded_nelts (); |
| for (unsigned int i = 0; i < count; i++) |
| { |
| rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), |
| CONST_VECTOR_ELT (op, i), |
| GET_MODE_INNER (op_mode)); |
| if (!x || !valid_for_const_vector_p (mode, x)) |
| return 0; |
| builder.quick_push (x); |
| } |
| return builder.build (); |
| } |
| |
| /* The order of these tests is critical so that, for example, we don't |
| check the wrong mode (input vs. output) for a conversion operation, |
| such as FIX. At some point, this should be simplified. */ |
| |
| if (code == FLOAT && CONST_SCALAR_INT_P (op)) |
| { |
| REAL_VALUE_TYPE d; |
| |
| if (op_mode == VOIDmode) |
| { |
| /* CONST_INT have VOIDmode as the mode. We assume that all |
| the bits of the constant are significant, though, this is |
| a dangerous assumption as many times CONST_INTs are |
| created and used with garbage in the bits outside of the |
| precision of the implied mode of the const_int. */ |
| op_mode = MAX_MODE_INT; |
| } |
| |
| real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED); |
| |
| /* Avoid the folding if flag_signaling_nans is on and |
| operand is a signaling NaN. */ |
| if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)) |
| return 0; |
| |
| d = real_value_truncate (mode, d); |
| return const_double_from_real_value (d, mode); |
| } |
| else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op)) |
| { |
| REAL_VALUE_TYPE d; |
| |
| if (op_mode == VOIDmode) |
| { |
| /* CONST_INT have VOIDmode as the mode. We assume that all |
| the bits of the constant are significant, though, this is |
| a dangerous assumption as many times CONST_INTs are |
| created and used with garbage in the bits outside of the |
| precision of the implied mode of the const_int. */ |
| op_mode = MAX_MODE_INT; |
| } |
| |
| real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED); |
| |
| /* Avoid the folding if flag_signaling_nans is on and |
| operand is a signaling NaN. */ |
| if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)) |
| return 0; |
| |
| d = real_value_truncate (mode, d); |
| return const_double_from_real_value (d, mode); |
| } |
| |
| if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode)) |
| { |
| unsigned int width = GET_MODE_PRECISION (result_mode); |
| if (width > MAX_BITSIZE_MODE_ANY_INT) |
| return 0; |
| |
| wide_int result; |
| scalar_int_mode imode = (op_mode == VOIDmode |
| ? result_mode |
| : as_a <scalar_int_mode> (op_mode)); |
| rtx_mode_t op0 = rtx_mode_t (op, imode); |
| int int_value; |
| |
| #if TARGET_SUPPORTS_WIDE_INT == 0 |
| /* This assert keeps the simplification from producing a result |
| that cannot be represented in a CONST_DOUBLE but a lot of |
| upstream callers expect that this function never fails to |
| simplify something and so you if you added this to the test |
| above the code would die later anyway. If this assert |
| happens, you just need to make the port support wide int. */ |
| gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); |
| #endif |
| |
| switch (code) |
| { |
| case NOT: |
| result = wi::bit_not (op0); |
| break; |
| |
| case NEG: |
| result = wi::neg (op0); |
| break; |
| |
| case ABS: |
| result = wi::abs (op0); |
| break; |
| |
| case FFS: |
| result = wi::shwi (wi::ffs (op0), result_mode); |
| break; |
| |
| case CLZ: |
| if (wi::ne_p (op0, 0)) |
| int_value = wi::clz (op0); |
| else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value)) |
| return NULL_RTX; |
| result = wi::shwi (int_value, result_mode); |
| break; |
| |
| case CLRSB: |
| result = wi::shwi (wi::clrsb (op0), result_mode); |
| break; |
| |
| case CTZ: |
| if (wi::ne_p (op0, 0)) |
| int_value = wi::ctz (op0); |
| else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value)) |
| return NULL_RTX; |
| result = wi::shwi (int_value, result_mode); |
| break; |
| |
| case POPCOUNT: |
| result = wi::shwi (wi::popcount (op0), result_mode); |
| break; |
| |
| case PARITY: |
| result = wi::shwi (wi::parity (op0), result_mode); |
| break; |
| |
| case BSWAP: |
| result = wide_int (op0).bswap (); |
| break; |
| |
| case TRUNCATE: |
| case ZERO_EXTEND: |
| result = wide_int::from (op0, width, UNSIGNED); |
| break; |
| |
| case SIGN_EXTEND: |
| result = wide_int::from (op0, width, SIGNED); |
| break; |
| |
| case SQRT: |
| default: |
| return 0; |
| } |
| |
| return immed_wide_int_const (result, result_mode); |
| } |
| |
| else if (CONST_DOUBLE_AS_FLOAT_P (op) |
| && SCALAR_FLOAT_MODE_P (mode) |
| && SCALAR_FLOAT_MODE_P (GET_MODE (op))) |
| { |
| REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op); |
| switch (code) |
| { |
| case SQRT: |
| return 0; |
| case ABS: |
| d = real_value_abs (&d); |
| break; |
| case NEG: |
| d = real_value_negate (&d); |
| break; |
| case FLOAT_TRUNCATE: |
| /* Don't perform the operation if flag_signaling_nans is on |
| and the operand is a signaling NaN. */ |
| if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)) |
| return NULL_RTX; |
| d = real_value_truncate (mode, d); |
| break; |
| case FLOAT_EXTEND: |
| /* Don't perform the operation if flag_signaling_nans is on |
| and the operand is a signaling NaN. */ |
| if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)) |
| return NULL_RTX; |
| /* All this does is change the mode, unless changing |
| mode class. */ |
| if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))) |
| real_convert (&d, mode, &d); |
| break; |
| case FIX: |
| /* Don't perform the operation if flag_signaling_nans is on |
| and the operand is a signaling NaN. */ |
| if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)) |
| return NULL_RTX; |
| real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL); |
| break; |
| case NOT: |
| { |
| long tmp[4]; |
| int i; |
| |
| real_to_target (tmp, &d, GET_MODE (op)); |
| for (i = 0; i < 4; i++) |
| tmp[i] = ~tmp[i]; |
| real_from_target (&d, tmp, mode); |
| break; |
| } |
| default: |
| gcc_unreachable (); |
| } |
| return const_double_from_real_value (d, mode); |
| } |
| else if (CONST_DOUBLE_AS_FLOAT_P (op) |
| && SCALAR_FLOAT_MODE_P (GET_MODE (op)) |
| && is_int_mode (mode, &result_mode)) |
| { |
| unsigned int width = GET_MODE_PRECISION (result_mode); |
| if (width > MAX_BITSIZE_MODE_ANY_INT) |
| return 0; |
| |
| /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX |
| operators are intentionally left unspecified (to ease implementation |
| by target backends), for consistency, this routine implements the |
| same semantics for constant folding as used by the middle-end. */ |
| |
| /* This was formerly used only for non-IEEE float. |
| eggert@twinsun.com says it is safe for IEEE also. */ |
| REAL_VALUE_TYPE t; |
| const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op); |
| wide_int wmax, wmin; |
| /* This is part of the abi to real_to_integer, but we check |
| things before making this call. */ |
| bool fail; |
| |
| switch (code) |
| { |
| case FIX: |
| if (REAL_VALUE_ISNAN (*x)) |
| return const0_rtx; |
| |
| /* Test against the signed upper bound. */ |
| wmax = wi::max_value (width, SIGNED); |
| real_from_integer (&t, VOIDmode, wmax, SIGNED); |
| if (real_less (&t, x)) |
| return immed_wide_int_const (wmax, mode); |
| |
| /* Test against the signed lower bound. */ |
| wmin = wi::min_value (width, SIGNED); |
| real_from_integer (&t, VOIDmode, wmin, SIGNED); |
| if (real_less (x, &t)) |
| return immed_wide_int_const (wmin, mode); |
| |
| return immed_wide_int_const (real_to_integer (x, &fail, width), |
| mode); |
| |
| case UNSIGNED_FIX: |
| if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x)) |
| return const0_rtx; |
| |
| /* Test against the unsigned upper bound. */ |
| wmax = wi::max_value (width, UNSIGNED); |
| real_from_integer (&t, VOIDmode, wmax, UNSIGNED); |
| if (real_less (&t, x)) |
| return immed_wide_int_const (wmax, mode); |
| |
| return immed_wide_int_const (real_to_integer (x, &fail, width), |
| mode); |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Handle polynomial integers. */ |
| else if (CONST_POLY_INT_P (op)) |
| { |
| poly_wide_int result; |
| switch (code) |
| { |
| case NEG: |
| result = -const_poly_int_value (op); |
| break; |
| |
| case NOT: |
| result = ~const_poly_int_value (op); |
| break; |
| |
| default: |
| return NULL_RTX; |
| } |
| return immed_wide_int_const (result, mode); |
| } |
| |
| return NULL_RTX; |
| } |
| |
| /* Subroutine of simplify_binary_operation to simplify a binary operation |
| CODE that can commute with byte swapping, with result mode MODE and |
| operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR. |
| Return zero if no simplification or canonicalization is possible. */ |
| |
| rtx |
| simplify_context::simplify_byte_swapping_operation (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */ |
| if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1)) |
| { |
| tem = simplify_gen_binary (code, mode, XEXP (op0, 0), |
| simplify_gen_unary (BSWAP, mode, op1, mode)); |
| return simplify_gen_unary (BSWAP, mode, tem, mode); |
| } |
| |
| /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */ |
| if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP) |
| { |
| tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0)); |
| return simplify_gen_unary (BSWAP, mode, tem, mode); |
| } |
| |
| return NULL_RTX; |
| } |
| |
| /* Subroutine of simplify_binary_operation to simplify a commutative, |
| associative binary operation CODE with result mode MODE, operating |
| on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR, |
| SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or |
| canonicalization is possible. */ |
| |
| rtx |
| simplify_context::simplify_associative_operation (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| /* Linearize the operator to the left. */ |
| if (GET_CODE (op1) == code) |
| { |
| /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */ |
| if (GET_CODE (op0) == code) |
| { |
| tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0)); |
| return simplify_gen_binary (code, mode, tem, XEXP (op1, 1)); |
| } |
| |
| /* "a op (b op c)" becomes "(b op c) op a". */ |
| if (! swap_commutative_operands_p (op1, op0)) |
| return simplify_gen_binary (code, mode, op1, op0); |
| |
| std::swap (op0, op1); |
| } |
| |
| if (GET_CODE (op0) == code) |
| { |
| /* Canonicalize "(x op c) op y" as "(x op y) op c". */ |
| if (swap_commutative_operands_p (XEXP (op0, 1), op1)) |
| { |
| tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1); |
| return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); |
| } |
| |
| /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ |
| tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1); |
| if (tem != 0) |
| return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); |
| |
| /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ |
| tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1); |
| if (tem != 0) |
| return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); |
| } |
| |
| return 0; |
| } |
| |
| /* Return a mask describing the COMPARISON. */ |
| static int |
| comparison_to_mask (enum rtx_code comparison) |
| { |
| switch (comparison) |
| { |
| case LT: |
| return 8; |
| case GT: |
| return 4; |
| case EQ: |
| return 2; |
| case UNORDERED: |
| return 1; |
| |
| case LTGT: |
| return 12; |
| case LE: |
| return 10; |
| case GE: |
| return 6; |
| case UNLT: |
| return 9; |
| case UNGT: |
| return 5; |
| case UNEQ: |
| return 3; |
| |
| case ORDERED: |
| return 14; |
| case NE: |
| return 13; |
| case UNLE: |
| return 11; |
| case UNGE: |
| return 7; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Return a comparison corresponding to the MASK. */ |
| static enum rtx_code |
| mask_to_comparison (int mask) |
| { |
| switch (mask) |
| { |
| case 8: |
| return LT; |
| case 4: |
| return GT; |
| case 2: |
| return EQ; |
| case 1: |
| return UNORDERED; |
| |
| case 12: |
| return LTGT; |
| case 10: |
| return LE; |
| case 6: |
| return GE; |
| case 9: |
| return UNLT; |
| case 5: |
| return UNGT; |
| case 3: |
| return UNEQ; |
| |
| case 14: |
| return ORDERED; |
| case 13: |
| return NE; |
| case 11: |
| return UNLE; |
| case 7: |
| return UNGE; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Return true if CODE is valid for comparisons of mode MODE, false |
| otherwise. |
| |
| It is always safe to return false, even if the code was valid for the |
| given mode as that will merely suppress optimizations. */ |
| |
| static bool |
| comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode) |
| { |
| switch (code) |
| { |
| /* These are valid for integral, floating and vector modes. */ |
| case NE: |
| case EQ: |
| case GE: |
| case GT: |
| case LE: |
| case LT: |
| return (INTEGRAL_MODE_P (mode) |
| || FLOAT_MODE_P (mode) |
| || VECTOR_MODE_P (mode)); |
| |
| /* These are valid for floating point modes. */ |
| case LTGT: |
| case UNORDERED: |
| case ORDERED: |
| case UNEQ: |
| case UNGE: |
| case UNGT: |
| case UNLE: |
| case UNLT: |
| return FLOAT_MODE_P (mode); |
| |
| /* These are filtered out in simplify_logical_operation, but |
| we check for them too as a matter of safety. They are valid |
| for integral and vector modes. */ |
| case GEU: |
| case GTU: |
| case LEU: |
| case LTU: |
| return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode); |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right |
| false/true value of comparison with MODE where comparison operands |
| have CMP_MODE. */ |
| |
| static rtx |
| relational_result (machine_mode mode, machine_mode cmp_mode, rtx res) |
| { |
| if (SCALAR_FLOAT_MODE_P (mode)) |
| { |
| if (res == const0_rtx) |
| return CONST0_RTX (mode); |
| #ifdef FLOAT_STORE_FLAG_VALUE |
| REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode); |
| return const_double_from_real_value (val, mode); |
| #else |
| return NULL_RTX; |
| #endif |
| } |
| if (VECTOR_MODE_P (mode)) |
| { |
| if (res == const0_rtx) |
| return CONST0_RTX (mode); |
| #ifdef VECTOR_STORE_FLAG_VALUE |
| rtx val = VECTOR_STORE_FLAG_VALUE (mode); |
| if (val == NULL_RTX) |
| return NULL_RTX; |
| if (val == const1_rtx) |
| return CONST1_RTX (mode); |
| |
| return gen_const_vec_duplicate (mode, val); |
| #else |
| return NULL_RTX; |
| #endif |
| } |
| /* For vector comparison with scalar int result, it is unknown |
| if the target means here a comparison into an integral bitmask, |
| or comparison where all comparisons true mean const_true_rtx |
| whole result, or where any comparisons true mean const_true_rtx |
| whole result. For const0_rtx all the cases are the same. */ |
| if (VECTOR_MODE_P (cmp_mode) |
| && SCALAR_INT_MODE_P (mode) |
| && res == const_true_rtx) |
| return NULL_RTX; |
| |
| return res; |
| } |
| |
| /* Simplify a logical operation CODE with result mode MODE, operating on OP0 |
| and OP1, which should be both relational operations. Return 0 if no such |
| simplification is possible. */ |
| rtx |
| simplify_context::simplify_logical_relational_operation (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| /* We only handle IOR of two relational operations. */ |
| if (code != IOR) |
| return 0; |
| |
| if (!(COMPARISON_P (op0) && COMPARISON_P (op1))) |
| return 0; |
| |
| if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) |
| && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1)))) |
| return 0; |
| |
| enum rtx_code code0 = GET_CODE (op0); |
| enum rtx_code code1 = GET_CODE (op1); |
| |
| /* We don't handle unsigned comparisons currently. */ |
| if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU) |
| return 0; |
| if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU) |
| return 0; |
| |
| int mask0 = comparison_to_mask (code0); |
| int mask1 = comparison_to_mask (code1); |
| |
| int mask = mask0 | mask1; |
| |
| if (mask == 15) |
| return relational_result (mode, GET_MODE (op0), const_true_rtx); |
| |
| code = mask_to_comparison (mask); |
| |
| /* Many comparison codes are only valid for certain mode classes. */ |
| if (!comparison_code_valid_for_mode (code, mode)) |
| return 0; |
| |
| op0 = XEXP (op1, 0); |
| op1 = XEXP (op1, 1); |
| |
| return simplify_gen_relational (code, mode, VOIDmode, op0, op1); |
| } |
| |
| /* Simplify a binary operation CODE with result mode MODE, operating on OP0 |
| and OP1. Return 0 if no simplification is possible. |
| |
| Don't use this for relational operations such as EQ or LT. |
| Use simplify_relational_operation instead. */ |
| rtx |
| simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx trueop0, trueop1; |
| rtx tem; |
| |
| /* Relational operations don't work here. We must know the mode |
| of the operands in order to do the comparison correctly. |
| Assuming a full word can give incorrect results. |
| Consider comparing 128 with -128 in QImode. */ |
| gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE); |
| gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE); |
| |
| /* Make sure the constant is second. */ |
| if (GET_RTX_CLASS (code) == RTX_COMM_ARITH |
| && swap_commutative_operands_p (op0, op1)) |
| std::swap (op0, op1); |
| |
| trueop0 = avoid_constant_pool_reference (op0); |
| trueop1 = avoid_constant_pool_reference (op1); |
| |
| tem = simplify_const_binary_operation (code, mode, trueop0, trueop1); |
| if (tem) |
| return tem; |
| tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1); |
| |
| if (tem) |
| return tem; |
| |
| /* If the above steps did not result in a simplification and op0 or op1 |
| were constant pool references, use the referenced constants directly. */ |
| if (trueop0 != op0 || trueop1 != op1) |
| return simplify_gen_binary (code, mode, trueop0, trueop1); |
| |
| return NULL_RTX; |
| } |
| |
| /* Subroutine of simplify_binary_operation_1 that looks for cases in |
| which OP0 and OP1 are both vector series or vector duplicates |
| (which are really just series with a step of 0). If so, try to |
| form a new series by applying CODE to the bases and to the steps. |
| Return null if no simplification is possible. |
| |
| MODE is the mode of the operation and is known to be a vector |
| integer mode. */ |
| |
| rtx |
| simplify_context::simplify_binary_operation_series (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx base0, step0; |
| if (vec_duplicate_p (op0, &base0)) |
| step0 = const0_rtx; |
| else if (!vec_series_p (op0, &base0, &step0)) |
| return NULL_RTX; |
| |
| rtx base1, step1; |
| if (vec_duplicate_p (op1, &base1)) |
| step1 = const0_rtx; |
| else if (!vec_series_p (op1, &base1, &step1)) |
| return NULL_RTX; |
| |
| /* Only create a new series if we can simplify both parts. In other |
| cases this isn't really a simplification, and it's not necessarily |
| a win to replace a vector operation with a scalar operation. */ |
| scalar_mode inner_mode = GET_MODE_INNER (mode); |
| rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1); |
| if (!new_base) |
| return NULL_RTX; |
| |
| rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1); |
| if (!new_step) |
| return NULL_RTX; |
| |
| return gen_vec_series (mode, new_base, new_step); |
| } |
| |
| /* Subroutine of simplify_binary_operation_1. Un-distribute a binary |
| operation CODE with result mode MODE, operating on OP0 and OP1. |
| e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C). |
| Returns NULL_RTX if no simplification is possible. */ |
| |
| rtx |
| simplify_context::simplify_distributive_operation (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| enum rtx_code op = GET_CODE (op0); |
| gcc_assert (GET_CODE (op1) == op); |
| |
| if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1)) |
| && ! side_effects_p (XEXP (op0, 1))) |
| return simplify_gen_binary (op, mode, |
| simplify_gen_binary (code, mode, |
| XEXP (op0, 0), |
| XEXP (op1, 0)), |
| XEXP (op0, 1)); |
| |
| if (GET_RTX_CLASS (op) == RTX_COMM_ARITH) |
| { |
| if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) |
| && ! side_effects_p (XEXP (op0, 0))) |
| return simplify_gen_binary (op, mode, |
| simplify_gen_binary (code, mode, |
| XEXP (op0, 1), |
| XEXP (op1, 1)), |
| XEXP (op0, 0)); |
| if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1)) |
| && ! side_effects_p (XEXP (op0, 0))) |
| return simplify_gen_binary (op, mode, |
| simplify_gen_binary (code, mode, |
| XEXP (op0, 1), |
| XEXP (op1, 0)), |
| XEXP (op0, 0)); |
| if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0)) |
| && ! side_effects_p (XEXP (op0, 1))) |
| return simplify_gen_binary (op, mode, |
| simplify_gen_binary (code, mode, |
| XEXP (op0, 0), |
| XEXP (op1, 1)), |
| XEXP (op0, 1)); |
| } |
| |
| return NULL_RTX; |
| } |
| |
| /* Subroutine of simplify_binary_operation. Simplify a binary operation |
| CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or |
| OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the |
| actual constants. */ |
| |
| rtx |
| simplify_context::simplify_binary_operation_1 (rtx_code code, |
| machine_mode mode, |
| rtx op0, rtx op1, |
| rtx trueop0, rtx trueop1) |
| { |
| rtx tem, reversed, opleft, opright, elt0, elt1; |
| HOST_WIDE_INT val; |
| scalar_int_mode int_mode, inner_mode; |
| poly_int64 offset; |
| |
| /* Even if we can't compute a constant result, |
| there are some cases worth simplifying. */ |
| |
| switch (code) |
| { |
| case PLUS: |
| /* Maybe simplify x + 0 to x. The two expressions are equivalent |
| when x is NaN, infinite, or finite and nonzero. They aren't |
| when x is -0 and the rounding mode is not towards -infinity, |
| since (-0) + 0 is then 0. */ |
| if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) |
| return op0; |
| |
| /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These |
| transformations are safe even for IEEE. */ |
| if (GET_CODE (op0) == NEG) |
| return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); |
| else if (GET_CODE (op1) == NEG) |
| return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); |
| |
| /* (~a) + 1 -> -a */ |
| if (INTEGRAL_MODE_P (mode) |
| && GET_CODE (op0) == NOT |
| && trueop1 == const1_rtx) |
| return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode); |
| |
| /* Handle both-operands-constant cases. We can only add |
| CONST_INTs to constants since the sum of relocatable symbols |
| can't be handled by most assemblers. Don't add CONST_INT |
| to CONST_INT since overflow won't be computed properly if wider |
| than HOST_BITS_PER_WIDE_INT. */ |
| |
| if ((GET_CODE (op0) == CONST |
| || GET_CODE (op0) == SYMBOL_REF |
| || GET_CODE (op0) == LABEL_REF) |
| && poly_int_rtx_p (op1, &offset)) |
| return plus_constant (mode, op0, offset); |
| else if ((GET_CODE (op1) == CONST |
| || GET_CODE (op1) == SYMBOL_REF |
| || GET_CODE (op1) == LABEL_REF) |
| && poly_int_rtx_p (op0, &offset)) |
| return plus_constant (mode, op1, offset); |
| |
| /* See if this is something like X * C - X or vice versa or |
| if the multiplication is written as a shift. If so, we can |
| distribute and make a new multiply, shift, or maybe just |
| have X (if C is 2 in the example above). But don't make |
| something more expensive than we had before. */ |
| |
| if (is_a <scalar_int_mode> (mode, &int_mode)) |
| { |
| rtx lhs = op0, rhs = op1; |
| |
| wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode)); |
| wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode)); |
| |
| if (GET_CODE (lhs) == NEG) |
| { |
| coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode)); |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == MULT |
| && CONST_SCALAR_INT_P (XEXP (lhs, 1))) |
| { |
| coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode); |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == ASHIFT |
| && CONST_INT_P (XEXP (lhs, 1)) |
| && INTVAL (XEXP (lhs, 1)) >= 0 |
| && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode)) |
| { |
| coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), |
| GET_MODE_PRECISION (int_mode)); |
| lhs = XEXP (lhs, 0); |
| } |
| |
| if (GET_CODE (rhs) == NEG) |
| { |
| coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode)); |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == MULT |
| && CONST_INT_P (XEXP (rhs, 1))) |
| { |
| coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode); |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == ASHIFT |
| && CONST_INT_P (XEXP (rhs, 1)) |
| && INTVAL (XEXP (rhs, 1)) >= 0 |
| && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode)) |
| { |
| coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), |
| GET_MODE_PRECISION (int_mode)); |
| rhs = XEXP (rhs, 0); |
| } |
| |
| if (rtx_equal_p (lhs, rhs)) |
| { |
| rtx orig = gen_rtx_PLUS (int_mode, op0, op1); |
| rtx coeff; |
| bool speed = optimize_function_for_speed_p (cfun); |
| |
| coeff = immed_wide_int_const (coeff0 + coeff1, int_mode); |
| |
| tem = simplify_gen_binary (MULT, int_mode, lhs, coeff); |
| return (set_src_cost (tem, int_mode, speed) |
| <= set_src_cost (orig, int_mode, speed) ? tem : 0); |
| } |
| |
| /* Optimize (X - 1) * Y + Y to X * Y. */ |
| lhs = op0; |
| rhs = op1; |
| if (GET_CODE (op0) == MULT) |
| { |
| if (((GET_CODE (XEXP (op0, 0)) == PLUS |
| && XEXP (XEXP (op0, 0), 1) == constm1_rtx) |
| || (GET_CODE (XEXP (op0, 0)) == MINUS |
| && XEXP (XEXP (op0, 0), 1) == const1_rtx)) |
| && rtx_equal_p (XEXP (op0, 1), op1)) |
| lhs = XEXP (XEXP (op0, 0), 0); |
| else if (((GET_CODE (XEXP (op0, 1)) == PLUS |
| && XEXP (XEXP (op0, 1), 1) == constm1_rtx) |
| || (GET_CODE (XEXP (op0, 1)) == MINUS |
| && XEXP (XEXP (op0, 1), 1) == const1_rtx)) |
| && rtx_equal_p (XEXP (op0, 0), op1)) |
| lhs = XEXP (XEXP (op0, 1), 0); |
| } |
| else if (GET_CODE (op1) == MULT) |
| { |
| if (((GET_CODE (XEXP (op1, 0)) == PLUS |
| && XEXP (XEXP (op1, 0), 1) == constm1_rtx) |
| || (GET_CODE (XEXP (op1, 0)) == MINUS |
| && XEXP (XEXP (op1, 0), 1) == const1_rtx)) |
| && rtx_equal_p (XEXP (op1, 1), op0)) |
| rhs = XEXP (XEXP (op1, 0), 0); |
| else if (((GET_CODE (XEXP (op1, 1)) == PLUS |
| && XEXP (XEXP (op1, 1), 1) == constm1_rtx) |
| || (GET_CODE (XEXP (op1, 1)) == MINUS |
| && XEXP (XEXP (op1, 1), 1) == const1_rtx)) |
| && rtx_equal_p (XEXP (op1, 0), op0)) |
| rhs = XEXP (XEXP (op1, 1), 0); |
| } |
| if (lhs != op0 || rhs != op1) |
| return simplify_gen_binary (MULT, int_mode, lhs, rhs); |
| } |
| |
| /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ |
| if (CONST_SCALAR_INT_P (op1) |
| && GET_CODE (op0) == XOR |
| && CONST_SCALAR_INT_P (XEXP (op0, 1)) |
| && mode_signbit_p (mode, op1)) |
| return simplify_gen_binary (XOR, mode, XEXP (op0, 0), |
| simplify_gen_binary (XOR, mode, op1, |
| XEXP (op0, 1))); |
| |
| /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ |
| if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) |
| && GET_CODE (op0) == MULT |
| && GET_CODE (XEXP (op0, 0)) == NEG) |
| { |
| rtx in1, in2; |
| |
| in1 = XEXP (XEXP (op0, 0), 0); |
| in2 = XEXP (op0, 1); |
| return simplify_gen_binary (MINUS, mode, op1, |
| simplify_gen_binary (MULT, mode, |
| in1, in2)); |
| } |
| |
| /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if |
| C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE |
| is 1. */ |
| if (COMPARISON_P (op0) |
| && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx) |
| || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx)) |
| && (reversed = reversed_comparison (op0, mode))) |
| return |
| simplify_gen_unary (NEG, mode, reversed, mode); |
| |
| /* If one of the operands is a PLUS or a MINUS, see if we can |
| simplify this by the associative law. |
| Don't use the associative law for floating point. |
| The inaccuracy makes it nonassociative, |
| and subtle programs can break if operations are associated. */ |
| |
| if (INTEGRAL_MODE_P (mode) |
| && (plus_minus_operand_p (op0) |
| || plus_minus_operand_p (op1)) |
| && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) |
| return tem; |
| |
| /* Reassociate floating point addition only when the user |
| specifies associative math operations. */ |
| if (FLOAT_MODE_P (mode) |
| && flag_associative_math) |
| { |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| } |
| |
| /* Handle vector series. */ |
| if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT) |
| { |
| tem = simplify_binary_operation_series (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| } |
| break; |
| |
| case COMPARE: |
| /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ |
| if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) |
| || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) |
| && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) |
| { |
| rtx xop00 = XEXP (op0, 0); |
| rtx xop10 = XEXP (op1, 0); |
| |
| if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) |
| return xop00; |
| |
| if (REG_P (xop00) && REG_P (xop10) |
| && REGNO (xop00) == REGNO (xop10) |
| && GET_MODE (xop00) == mode |
| && GET_MODE (xop10) == mode |
| && GET_MODE_CLASS (mode) == MODE_CC) |
| return xop00; |
| } |
| break; |
| |
| case MINUS: |
| /* We can't assume x-x is 0 even with non-IEEE floating point, |
| but since it is zero except in very strange circumstances, we |
| will treat it as zero with -ffinite-math-only. */ |
| if (rtx_equal_p (trueop0, trueop1) |
| && ! side_effects_p (op0) |
| && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode))) |
| return CONST0_RTX (mode); |
| |
| /* Change subtraction from zero into negation. (0 - x) is the |
| same as -x when x is NaN, infinite, or finite and nonzero. |
| But if the mode has signed zeros, and does not round towards |
| -infinity, then 0 - 0 is 0, not -0. */ |
| if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) |
| return simplify_gen_unary (NEG, mode, op1, mode); |
| |
| /* (-1 - a) is ~a, unless the expression contains symbolic |
| constants, in which case not retaining additions and |
| subtractions could cause invalid assembly to be produced. */ |
| if (trueop0 == constm1_rtx |
| && !contains_symbolic_reference_p (op1)) |
| return simplify_gen_unary (NOT, mode, op1, mode); |
| |
| /* Subtracting 0 has no effect unless the mode has signalling NaNs, |
| or has signed zeros and supports rounding towards -infinity. |
| In such a case, 0 - 0 is -0. */ |
| if (!(HONOR_SIGNED_ZEROS (mode) |
| && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| && !HONOR_SNANS (mode) |
| && trueop1 == CONST0_RTX (mode)) |
| return op0; |
| |
| /* See if this is something like X * C - X or vice versa or |
| if the multiplication is written as a shift. If so, we can |
| distribute and make a new multiply, shift, or maybe just |
| have X (if C is 2 in the example above). But don't make |
| something more expensive than we had before. */ |
| |
| if (is_a <scalar_int_mode> (mode, &int_mode)) |
| { |
| rtx lhs = op0, rhs = op1; |
| |
| wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode)); |
| wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode)); |
| |
| if (GET_CODE (lhs) == NEG) |
| { |
| coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode)); |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == MULT |
| && CONST_SCALAR_INT_P (XEXP (lhs, 1))) |
| { |
| coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode); |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == ASHIFT |
| && CONST_INT_P (XEXP (lhs, 1)) |
| && INTVAL (XEXP (lhs, 1)) >= 0 |
| && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode)) |
| { |
| coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), |
| GET_MODE_PRECISION (int_mode)); |
| lhs = XEXP (lhs, 0); |
| } |
| |
| if (GET_CODE (rhs) == NEG) |
| { |
| negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode)); |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == MULT |
| && CONST_INT_P (XEXP (rhs, 1))) |
| { |
| negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode)); |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == ASHIFT |
| && CONST_INT_P (XEXP (rhs, 1)) |
| && INTVAL (XEXP (rhs, 1)) >= 0 |
| && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode)) |
| { |
| negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), |
| GET_MODE_PRECISION (int_mode)); |
| negcoeff1 = -negcoeff1; |
| rhs = XEXP (rhs, 0); |
| } |
| |
| if (rtx_equal_p (lhs, rhs)) |
| { |
| rtx orig = gen_rtx_MINUS (int_mode, op0, op1); |
| rtx coeff; |
| bool speed = optimize_function_for_speed_p (cfun); |
| |
| coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode); |
| |
| tem = simplify_gen_binary (MULT, int_mode, lhs, coeff); |
| return (set_src_cost (tem, int_mode, speed) |
| <= set_src_cost (orig, int_mode, speed) ? tem : 0); |
| } |
| |
| /* Optimize (X + 1) * Y - Y to X * Y. */ |
| lhs = op0; |
| if (GET_CODE (op0) == MULT) |
| { |
| if (((GET_CODE (XEXP (op0, 0)) == PLUS |
| && XEXP (XEXP (op0, 0), 1) == const1_rtx) |
| || (GET_CODE (XEXP (op0, 0)) == MINUS |
| && XEXP (XEXP (op0, 0), 1) == constm1_rtx)) |
| && rtx_equal_p (XEXP (op0, 1), op1)) |
| lhs = XEXP (XEXP (op0, 0), 0); |
| else if (((GET_CODE (XEXP (op0, 1)) == PLUS |
| && XEXP (XEXP (op0, 1), 1) == const1_rtx) |
| || (GET_CODE (XEXP (op0, 1)) == MINUS |
| && XEXP (XEXP (op0, 1), 1) == constm1_rtx)) |
| && rtx_equal_p (XEXP (op0, 0), op1)) |
| lhs = XEXP (XEXP (op0, 1), 0); |
| } |
| if (lhs != op0) |
| return simplify_gen_binary (MULT, int_mode, lhs, op1); |
| } |
| |
| /* (a - (-b)) -> (a + b). True even for IEEE. */ |
| if (GET_CODE (op1) == NEG) |
| return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); |
| |
| /* (-x - c) may be simplified as (-c - x). */ |
| if (GET_CODE (op0) == NEG |
| && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1))) |
| { |
|