| /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. |
| This file is consumed by genmatch which produces gimple-match.cc |
| and generic-match.cc from it. |
| |
| Copyright (C) 2014-2022 Free Software Foundation, Inc. |
| Contributed by Richard Biener <rguenther@suse.de> |
| and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| |
| /* Generic tree predicates we inherit. */ |
| (define_predicates |
| integer_onep integer_zerop integer_all_onesp integer_minus_onep |
| integer_each_onep integer_truep integer_nonzerop |
| real_zerop real_onep real_minus_onep |
| zerop |
| initializer_each_zero_or_onep |
| CONSTANT_CLASS_P |
| tree_expr_nonnegative_p |
| tree_expr_nonzero_p |
| integer_valued_real_p |
| integer_pow2p |
| uniform_integer_cst_p |
| HONOR_NANS |
| uniform_vector_p |
| expand_vec_cmp_expr_p |
| bitmask_inv_cst_vector_p) |
| |
| /* Operator lists. */ |
| (define_operator_list tcc_comparison |
| lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) |
| (define_operator_list inverted_tcc_comparison |
| ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) |
| (define_operator_list inverted_tcc_comparison_with_nans |
| unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) |
| (define_operator_list swapped_tcc_comparison |
| gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) |
| (define_operator_list simple_comparison lt le eq ne ge gt) |
| (define_operator_list swapped_simple_comparison gt ge eq ne le lt) |
| |
| #include "cfn-operators.pd" |
| |
| /* Define operand lists for math rounding functions {,i,l,ll}FN, |
| where the versions prefixed with "i" return an int, those prefixed with |
| "l" return a long and those prefixed with "ll" return a long long. |
| |
| Also define operand lists: |
| |
| X<FN>F for all float functions, in the order i, l, ll |
| X<FN> for all double functions, in the same order |
| X<FN>L for all long double functions, in the same order. */ |
| #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ |
| (define_operator_list X##FN##F BUILT_IN_I##FN##F \ |
| BUILT_IN_L##FN##F \ |
| BUILT_IN_LL##FN##F) \ |
| (define_operator_list X##FN BUILT_IN_I##FN \ |
| BUILT_IN_L##FN \ |
| BUILT_IN_LL##FN) \ |
| (define_operator_list X##FN##L BUILT_IN_I##FN##L \ |
| BUILT_IN_L##FN##L \ |
| BUILT_IN_LL##FN##L) |
| |
| DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) |
| DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) |
| DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) |
| DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) |
| |
| /* Unary operations and their associated IFN_COND_* function. */ |
| (define_operator_list UNCOND_UNARY |
| negate) |
| (define_operator_list COND_UNARY |
| IFN_COND_NEG) |
| |
| /* Binary operations and their associated IFN_COND_* function. */ |
| (define_operator_list UNCOND_BINARY |
| plus minus |
| mult trunc_div trunc_mod rdiv |
| min max |
| IFN_FMIN IFN_FMAX |
| bit_and bit_ior bit_xor |
| lshift rshift) |
| (define_operator_list COND_BINARY |
| IFN_COND_ADD IFN_COND_SUB |
| IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV |
| IFN_COND_MIN IFN_COND_MAX |
| IFN_COND_FMIN IFN_COND_FMAX |
| IFN_COND_AND IFN_COND_IOR IFN_COND_XOR |
| IFN_COND_SHL IFN_COND_SHR) |
| |
| /* Same for ternary operations. */ |
| (define_operator_list UNCOND_TERNARY |
| IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS) |
| (define_operator_list COND_TERNARY |
| IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS) |
| |
| /* __atomic_fetch_or_*, __atomic_fetch_xor_*, __atomic_xor_fetch_* */ |
| (define_operator_list ATOMIC_FETCH_OR_XOR_N |
| BUILT_IN_ATOMIC_FETCH_OR_1 BUILT_IN_ATOMIC_FETCH_OR_2 |
| BUILT_IN_ATOMIC_FETCH_OR_4 BUILT_IN_ATOMIC_FETCH_OR_8 |
| BUILT_IN_ATOMIC_FETCH_OR_16 |
| BUILT_IN_ATOMIC_FETCH_XOR_1 BUILT_IN_ATOMIC_FETCH_XOR_2 |
| BUILT_IN_ATOMIC_FETCH_XOR_4 BUILT_IN_ATOMIC_FETCH_XOR_8 |
| BUILT_IN_ATOMIC_FETCH_XOR_16 |
| BUILT_IN_ATOMIC_XOR_FETCH_1 BUILT_IN_ATOMIC_XOR_FETCH_2 |
| BUILT_IN_ATOMIC_XOR_FETCH_4 BUILT_IN_ATOMIC_XOR_FETCH_8 |
| BUILT_IN_ATOMIC_XOR_FETCH_16) |
| /* __sync_fetch_and_or_*, __sync_fetch_and_xor_*, __sync_xor_and_fetch_* */ |
| (define_operator_list SYNC_FETCH_OR_XOR_N |
| BUILT_IN_SYNC_FETCH_AND_OR_1 BUILT_IN_SYNC_FETCH_AND_OR_2 |
| BUILT_IN_SYNC_FETCH_AND_OR_4 BUILT_IN_SYNC_FETCH_AND_OR_8 |
| BUILT_IN_SYNC_FETCH_AND_OR_16 |
| BUILT_IN_SYNC_FETCH_AND_XOR_1 BUILT_IN_SYNC_FETCH_AND_XOR_2 |
| BUILT_IN_SYNC_FETCH_AND_XOR_4 BUILT_IN_SYNC_FETCH_AND_XOR_8 |
| BUILT_IN_SYNC_FETCH_AND_XOR_16 |
| BUILT_IN_SYNC_XOR_AND_FETCH_1 BUILT_IN_SYNC_XOR_AND_FETCH_2 |
| BUILT_IN_SYNC_XOR_AND_FETCH_4 BUILT_IN_SYNC_XOR_AND_FETCH_8 |
| BUILT_IN_SYNC_XOR_AND_FETCH_16) |
| /* __atomic_fetch_and_*. */ |
| (define_operator_list ATOMIC_FETCH_AND_N |
| BUILT_IN_ATOMIC_FETCH_AND_1 BUILT_IN_ATOMIC_FETCH_AND_2 |
| BUILT_IN_ATOMIC_FETCH_AND_4 BUILT_IN_ATOMIC_FETCH_AND_8 |
| BUILT_IN_ATOMIC_FETCH_AND_16) |
| /* __sync_fetch_and_and_*. */ |
| (define_operator_list SYNC_FETCH_AND_AND_N |
| BUILT_IN_SYNC_FETCH_AND_AND_1 BUILT_IN_SYNC_FETCH_AND_AND_2 |
| BUILT_IN_SYNC_FETCH_AND_AND_4 BUILT_IN_SYNC_FETCH_AND_AND_8 |
| BUILT_IN_SYNC_FETCH_AND_AND_16) |
| |
| /* With nop_convert? combine convert? and view_convert? in one pattern |
| plus conditionalize on tree_nop_conversion_p conversions. */ |
| (match (nop_convert @0) |
| (convert @0) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) |
| (match (nop_convert @0) |
| (view_convert @0) |
| (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) |
| && known_eq (TYPE_VECTOR_SUBPARTS (type), |
| TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) |
| && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) |
| |
| /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x> |
| ABSU_EXPR returns unsigned absolute value of the operand and the operand |
| of the ABSU_EXPR will have the corresponding signed type. */ |
| (simplify (abs (convert @0)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && !TYPE_UNSIGNED (TREE_TYPE (@0)) |
| && element_precision (type) > element_precision (TREE_TYPE (@0))) |
| (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } |
| (convert (absu:utype @0))))) |
| |
| #if GIMPLE |
| /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */ |
| (simplify |
| (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && !TYPE_UNSIGNED (TREE_TYPE (@0)) |
| && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1) |
| (abs @0))) |
| #endif |
| |
| /* Simplifications of operations with one constant operand and |
| simplifications to constants or single values. */ |
| |
| (for op (plus pointer_plus minus bit_ior bit_xor) |
| (simplify |
| (op @0 integer_zerop) |
| (non_lvalue @0))) |
| |
| /* 0 +p index -> (type)index */ |
| (simplify |
| (pointer_plus integer_zerop @1) |
| (non_lvalue (convert @1))) |
| |
| /* ptr - 0 -> (type)ptr */ |
| (simplify |
| (pointer_diff @0 integer_zerop) |
| (convert @0)) |
| |
| /* See if ARG1 is zero and X + ARG1 reduces to X. |
| Likewise if the operands are reversed. */ |
| (simplify |
| (plus:c @0 real_zerop@1) |
| (if (fold_real_zero_addition_p (type, @0, @1, 0)) |
| (non_lvalue @0))) |
| |
| /* See if ARG1 is zero and X - ARG1 reduces to X. */ |
| (simplify |
| (minus @0 real_zerop@1) |
| (if (fold_real_zero_addition_p (type, @0, @1, 1)) |
| (non_lvalue @0))) |
| |
| /* Even if the fold_real_zero_addition_p can't simplify X + 0.0 |
| into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0 |
| or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0 |
| if not -frounding-math. For sNaNs the first operation would raise |
| exceptions but turn the result into qNan, so the second operation |
| would not raise it. */ |
| (for inner_op (plus minus) |
| (for outer_op (plus minus) |
| (simplify |
| (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2) |
| (if (real_zerop (@1) |
| && real_zerop (@2) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (type)) |
| (with { bool inner_plus = ((inner_op == PLUS_EXPR) |
| ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))); |
| bool outer_plus |
| = ((outer_op == PLUS_EXPR) |
| ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); } |
| (if (outer_plus && !inner_plus) |
| (outer_op @0 @2) |
| @3)))))) |
| |
| /* Simplify x - x. |
| This is unsafe for certain floats even in non-IEEE formats. |
| In IEEE, it is unsafe because it does wrong for NaNs. |
| PR middle-end/98420: x - x may be -0.0 with FE_DOWNWARD. |
| Also note that operand_equal_p is always false if an operand |
| is volatile. */ |
| (simplify |
| (minus @0 @0) |
| (if (!FLOAT_TYPE_P (type) |
| || (!tree_expr_maybe_nan_p (@0) |
| && !tree_expr_maybe_infinite_p (@0) |
| && (!HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| || !HONOR_SIGNED_ZEROS (type)))) |
| { build_zero_cst (type); })) |
| (simplify |
| (pointer_diff @@0 @0) |
| { build_zero_cst (type); }) |
| |
| (simplify |
| (mult @0 integer_zerop@1) |
| @1) |
| |
| /* -x == x -> x == 0 */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp:c @0 (negate @0)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && !TYPE_OVERFLOW_WRAPS (TREE_TYPE(@0))) |
| (cmp @0 { build_zero_cst (TREE_TYPE(@0)); })))) |
| |
| /* Maybe fold x * 0 to 0. The expressions aren't the same |
| when x is NaN, since x * 0 is also NaN. Nor are they the |
| same in modes with signed zeros, since multiplying a |
| negative value by 0 gives -0, not +0. Nor when x is +-Inf, |
| since x * 0 is NaN. */ |
| (simplify |
| (mult @0 real_zerop@1) |
| (if (!tree_expr_maybe_nan_p (@0) |
| && (!HONOR_NANS (type) || !tree_expr_maybe_infinite_p (@0)) |
| && (!HONOR_SIGNED_ZEROS (type) || tree_expr_nonnegative_p (@0))) |
| @1)) |
| |
| /* In IEEE floating point, x*1 is not equivalent to x for snans. |
| Likewise for complex arithmetic with signed zeros. */ |
| (simplify |
| (mult @0 real_onep) |
| (if (!tree_expr_maybe_signaling_nan_p (@0) |
| && (!HONOR_SIGNED_ZEROS (type) |
| || !COMPLEX_FLOAT_TYPE_P (type))) |
| (non_lvalue @0))) |
| |
| /* Transform x * -1.0 into -x. */ |
| (simplify |
| (mult @0 real_minus_onep) |
| (if (!tree_expr_maybe_signaling_nan_p (@0) |
| && (!HONOR_SIGNED_ZEROS (type) |
| || !COMPLEX_FLOAT_TYPE_P (type))) |
| (negate @0))) |
| |
| /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */ |
| (simplify |
| (mult SSA_NAME@1 SSA_NAME@2) |
| (if (INTEGRAL_TYPE_P (type) |
| && get_nonzero_bits (@1) == 1 |
| && get_nonzero_bits (@2) == 1) |
| (bit_and @1 @2))) |
| |
| /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...}, |
| unless the target has native support for the former but not the latter. */ |
| (simplify |
| (mult @0 VECTOR_CST@1) |
| (if (initializer_each_zero_or_onep (@1) |
| && !HONOR_SNANS (type) |
| && !HONOR_SIGNED_ZEROS (type)) |
| (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; } |
| (if (itype |
| && (!VECTOR_MODE_P (TYPE_MODE (type)) |
| || (VECTOR_MODE_P (TYPE_MODE (itype)) |
| && optab_handler (and_optab, |
| TYPE_MODE (itype)) != CODE_FOR_nothing))) |
| (view_convert (bit_and:itype (view_convert @0) |
| (ne @1 { build_zero_cst (type); }))))))) |
| |
| (for cmp (gt ge lt le) |
| outp (convert convert negate negate) |
| outn (negate negate convert convert) |
| /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */ |
| /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */ |
| /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */ |
| /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */ |
| (simplify |
| (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)) |
| (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type)) |
| (outp (abs @0)))) |
| /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */ |
| /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */ |
| /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */ |
| /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */ |
| (simplify |
| (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)) |
| (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type)) |
| (outn (abs @0))))) |
| |
| /* Transform X * copysign (1.0, X) into abs(X). */ |
| (simplify |
| (mult:c @0 (COPYSIGN_ALL real_onep @0)) |
| (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type)) |
| (abs @0))) |
| |
| /* Transform X * copysign (1.0, -X) into -abs(X). */ |
| (simplify |
| (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) |
| (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type)) |
| (negate (abs @0)))) |
| |
| /* Transform copysign (CST, X) into copysign (ABS(CST), X). */ |
| (simplify |
| (COPYSIGN_ALL REAL_CST@0 @1) |
| (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) |
| (COPYSIGN_ALL (negate @0) @1))) |
| |
| /* X * 1, X / 1 -> X. */ |
| (for op (mult trunc_div ceil_div floor_div round_div exact_div) |
| (simplify |
| (op @0 integer_onep) |
| (non_lvalue @0))) |
| |
| /* (A / (1 << B)) -> (A >> B). |
| Only for unsigned A. For signed A, this would not preserve rounding |
| toward zero. |
| For example: (-1 / ( 1 << B)) != -1 >> B. |
| Also also widening conversions, like: |
| (A / (unsigned long long) (1U << B)) -> (A >> B) |
| or |
| (A / (unsigned long long) (1 << B)) -> (A >> B). |
| If the left shift is signed, it can be done only if the upper bits |
| of A starting from shift's type sign bit are zero, as |
| (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL, |
| so it is valid only if A >> 31 is zero. */ |
| (simplify |
| (trunc_div (convert?@0 @3) (convert2? (lshift integer_onep@1 @2))) |
| (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) |
| && (!VECTOR_TYPE_P (type) |
| || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) |
| || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)) |
| && (useless_type_conversion_p (type, TREE_TYPE (@1)) |
| || (element_precision (type) >= element_precision (TREE_TYPE (@1)) |
| && (TYPE_UNSIGNED (TREE_TYPE (@1)) |
| || (element_precision (type) |
| == element_precision (TREE_TYPE (@1))) |
| || (INTEGRAL_TYPE_P (type) |
| && (tree_nonzero_bits (@0) |
| & wi::mask (element_precision (TREE_TYPE (@1)) - 1, |
| true, |
| element_precision (type))) == 0))))) |
| (if (!VECTOR_TYPE_P (type) |
| && useless_type_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)) |
| && element_precision (TREE_TYPE (@3)) < element_precision (type)) |
| (convert (rshift @3 @2)) |
| (rshift @0 @2)))) |
| |
| /* Preserve explicit divisions by 0: the C++ front-end wants to detect |
| undefined behavior in constexpr evaluation, and assuming that the division |
| traps enables better optimizations than these anyway. */ |
| (for div (trunc_div ceil_div floor_div round_div exact_div) |
| /* 0 / X is always zero. */ |
| (simplify |
| (div integer_zerop@0 @1) |
| /* But not for 0 / 0 so that we can get the proper warnings and errors. */ |
| (if (!integer_zerop (@1)) |
| @0)) |
| /* X / -1 is -X. */ |
| (simplify |
| (div @0 integer_minus_onep@1) |
| (if (!TYPE_UNSIGNED (type)) |
| (negate @0))) |
| /* X / bool_range_Y is X. */ |
| (simplify |
| (div @0 SSA_NAME@1) |
| (if (INTEGRAL_TYPE_P (type) |
| && ssa_name_has_boolean_range (@1) |
| && !flag_non_call_exceptions) |
| @0)) |
| /* X / X is one. */ |
| (simplify |
| (div @0 @0) |
| /* But not for 0 / 0 so that we can get the proper warnings and errors. |
| And not for _Fract types where we can't build 1. */ |
| (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)) |
| && !integer_zerop (@0) |
| && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0))) |
| { build_one_cst (type); })) |
| /* X / abs (X) is X < 0 ? -1 : 1. */ |
| (simplify |
| (div:C @0 (abs @0)) |
| (if (INTEGRAL_TYPE_P (type) |
| && TYPE_OVERFLOW_UNDEFINED (type) |
| && !integer_zerop (@0) |
| && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0))) |
| (cond (lt @0 { build_zero_cst (type); }) |
| { build_minus_one_cst (type); } { build_one_cst (type); }))) |
| /* X / -X is -1. */ |
| (simplify |
| (div:C @0 (negate @0)) |
| (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) |
| && TYPE_OVERFLOW_UNDEFINED (type) |
| && !integer_zerop (@0) |
| && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0))) |
| { build_minus_one_cst (type); }))) |
| |
| /* For unsigned integral types, FLOOR_DIV_EXPR is the same as |
| TRUNC_DIV_EXPR. Rewrite into the latter in this case. Similarly |
| for MOD instead of DIV. */ |
| (for floor_divmod (floor_div floor_mod) |
| trunc_divmod (trunc_div trunc_mod) |
| (simplify |
| (floor_divmod @0 @1) |
| (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) |
| && TYPE_UNSIGNED (type)) |
| (trunc_divmod @0 @1)))) |
| |
| /* 1 / X -> X == 1 for unsigned integer X. |
| 1 / X -> X >= -1 && X <= 1 ? X : 0 for signed integer X. |
| But not for 1 / 0 so that we can get proper warnings and errors, |
| and not for 1-bit integers as they are edge cases better handled |
| elsewhere. */ |
| (simplify |
| (trunc_div integer_onep@0 @1) |
| (if (INTEGRAL_TYPE_P (type) |
| && TYPE_PRECISION (type) > 1 |
| && !integer_zerop (@1) |
| && (!flag_non_call_exceptions || tree_expr_nonzero_p (@1))) |
| (if (TYPE_UNSIGNED (type)) |
| (convert (eq:boolean_type_node @1 { build_one_cst (type); })) |
| (with { tree utype = unsigned_type_for (type); } |
| (cond (le (plus (convert:utype @1) { build_one_cst (utype); }) |
| { build_int_cst (utype, 2); }) |
| @1 { build_zero_cst (type); }))))) |
| |
| /* Combine two successive divisions. Note that combining ceil_div |
| and floor_div is trickier and combining round_div even more so. */ |
| (for div (trunc_div exact_div) |
| (simplify |
| (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2) |
| (with { |
| wi::overflow_type overflow; |
| wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), |
| TYPE_SIGN (type), &overflow); |
| } |
| (if (div == EXACT_DIV_EXPR |
| || optimize_successive_divisions_p (@2, @3)) |
| (if (!overflow) |
| (div @0 { wide_int_to_tree (type, mul); }) |
| (if (TYPE_UNSIGNED (type) |
| || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) |
| { build_zero_cst (type); })))))) |
| |
| /* Combine successive multiplications. Similar to above, but handling |
| overflow is different. */ |
| (simplify |
| (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) |
| (with { |
| wi::overflow_type overflow; |
| wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), |
| TYPE_SIGN (type), &overflow); |
| } |
| /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, |
| otherwise undefined overflow implies that @0 must be zero. */ |
| (if (!overflow || TYPE_OVERFLOW_WRAPS (type)) |
| (mult @0 { wide_int_to_tree (type, mul); })))) |
| |
| /* Optimize A / A to 1.0 if we don't care about |
| NaNs or Infinities. */ |
| (simplify |
| (rdiv @0 @0) |
| (if (FLOAT_TYPE_P (type) |
| && ! HONOR_NANS (type) |
| && ! HONOR_INFINITIES (type)) |
| { build_one_cst (type); })) |
| |
| /* Optimize -A / A to -1.0 if we don't care about |
| NaNs or Infinities. */ |
| (simplify |
| (rdiv:C @0 (negate @0)) |
| (if (FLOAT_TYPE_P (type) |
| && ! HONOR_NANS (type) |
| && ! HONOR_INFINITIES (type)) |
| { build_minus_one_cst (type); })) |
| |
| /* PR71078: x / abs(x) -> copysign (1.0, x) */ |
| (simplify |
| (rdiv:C (convert? @0) (convert? (abs @0))) |
| (if (SCALAR_FLOAT_TYPE_P (type) |
| && ! HONOR_NANS (type) |
| && ! HONOR_INFINITIES (type)) |
| (switch |
| (if (types_match (type, float_type_node)) |
| (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) |
| (if (types_match (type, double_type_node)) |
| (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) |
| (if (types_match (type, long_double_type_node)) |
| (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) |
| |
| /* In IEEE floating point, x/1 is not equivalent to x for snans. */ |
| (simplify |
| (rdiv @0 real_onep) |
| (if (!tree_expr_maybe_signaling_nan_p (@0)) |
| (non_lvalue @0))) |
| |
| /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ |
| (simplify |
| (rdiv @0 real_minus_onep) |
| (if (!tree_expr_maybe_signaling_nan_p (@0)) |
| (negate @0))) |
| |
| (if (flag_reciprocal_math) |
| /* Convert (A/B)/C to A/(B*C). */ |
| (simplify |
| (rdiv (rdiv:s @0 @1) @2) |
| (rdiv @0 (mult @1 @2))) |
| |
| /* Canonicalize x / (C1 * y) to (x * C2) / y. */ |
| (simplify |
| (rdiv @0 (mult:s @1 REAL_CST@2)) |
| (with |
| { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } |
| (if (tem) |
| (rdiv (mult @0 { tem; } ) @1)))) |
| |
| /* Convert A/(B/C) to (A/B)*C */ |
| (simplify |
| (rdiv @0 (rdiv:s @1 @2)) |
| (mult (rdiv @0 @1) @2))) |
| |
| /* Simplify x / (- y) to -x / y. */ |
| (simplify |
| (rdiv @0 (negate @1)) |
| (rdiv (negate @0) @1)) |
| |
| (if (flag_unsafe_math_optimizations) |
| /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan. |
| Since C / x may underflow to zero, do this only for unsafe math. */ |
| (for op (lt le gt ge) |
| neg_op (gt ge lt le) |
| (simplify |
| (op (rdiv REAL_CST@0 @1) real_zerop@2) |
| (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1)) |
| (switch |
| (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0))) |
| (op @1 @2)) |
| /* For C < 0, use the inverted operator. */ |
| (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0)) |
| (neg_op @1 @2))))))) |
| |
| /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ |
| (for div (trunc_div ceil_div floor_div round_div exact_div) |
| (simplify |
| (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) |
| (if (integer_pow2p (@2) |
| && tree_int_cst_sgn (@2) > 0 |
| && tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && wi::to_wide (@2) + wi::to_wide (@1) == 0) |
| (rshift (convert @0) |
| { build_int_cst (integer_type_node, |
| wi::exact_log2 (wi::to_wide (@2))); })))) |
| |
| /* If ARG1 is a constant, we can convert this to a multiply by the |
| reciprocal. This does not have the same rounding properties, |
| so only do this if -freciprocal-math. We can actually |
| always safely do it if ARG1 is a power of two, but it's hard to |
| tell if it is or not in a portable manner. */ |
| (for cst (REAL_CST COMPLEX_CST VECTOR_CST) |
| (simplify |
| (rdiv @0 cst@1) |
| (if (optimize) |
| (if (flag_reciprocal_math |
| && !real_zerop (@1)) |
| (with |
| { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } |
| (if (tem) |
| (mult @0 { tem; } ))) |
| (if (cst != COMPLEX_CST) |
| (with { tree inverse = exact_inverse (type, @1); } |
| (if (inverse) |
| (mult @0 { inverse; } )))))))) |
| |
| (for mod (ceil_mod floor_mod round_mod trunc_mod) |
| /* 0 % X is always zero. */ |
| (simplify |
| (mod integer_zerop@0 @1) |
| /* But not for 0 % 0 so that we can get the proper warnings and errors. */ |
| (if (!integer_zerop (@1)) |
| @0)) |
| /* X % 1 is always zero. */ |
| (simplify |
| (mod @0 integer_onep) |
| { build_zero_cst (type); }) |
| /* X % -1 is zero. */ |
| (simplify |
| (mod @0 integer_minus_onep@1) |
| (if (!TYPE_UNSIGNED (type)) |
| { build_zero_cst (type); })) |
| /* X % X is zero. */ |
| (simplify |
| (mod @0 @0) |
| /* But not for 0 % 0 so that we can get the proper warnings and errors. */ |
| (if (!integer_zerop (@0)) |
| { build_zero_cst (type); })) |
| /* (X % Y) % Y is just X % Y. */ |
| (simplify |
| (mod (mod@2 @0 @1) @1) |
| @2) |
| /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ |
| (simplify |
| (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) |
| (if (ANY_INTEGRAL_TYPE_P (type) |
| && TYPE_OVERFLOW_UNDEFINED (type) |
| && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), |
| TYPE_SIGN (type))) |
| { build_zero_cst (type); })) |
| /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned |
| modulo and comparison, since it is simpler and equivalent. */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp (mod @0 integer_pow2p@2) integer_zerop@1) |
| (if (!TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } |
| (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1))))))) |
| |
| /* X % -C is the same as X % C. */ |
| (simplify |
| (trunc_mod @0 INTEGER_CST@1) |
| (if (TYPE_SIGN (type) == SIGNED |
| && !TREE_OVERFLOW (@1) |
| && wi::neg_p (wi::to_wide (@1)) |
| && !TYPE_OVERFLOW_TRAPS (type) |
| /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ |
| && !sign_bit_p (@1, @1)) |
| (trunc_mod @0 (negate @1)))) |
| |
| /* X % -Y is the same as X % Y. */ |
| (simplify |
| (trunc_mod @0 (convert? (negate @1))) |
| (if (INTEGRAL_TYPE_P (type) |
| && !TYPE_UNSIGNED (type) |
| && !TYPE_OVERFLOW_TRAPS (type) |
| && tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| /* Avoid this transformation if X might be INT_MIN or |
| Y might be -1, because we would then change valid |
| INT_MIN % -(-1) into invalid INT_MIN % -1. */ |
| && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) |
| || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION |
| (TREE_TYPE (@1)))))) |
| (trunc_mod @0 (convert @1)))) |
| |
| /* X - (X / Y) * Y is the same as X % Y. */ |
| (simplify |
| (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) |
| (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) |
| (convert (trunc_mod @0 @1)))) |
| |
| /* x * (1 + y / x) - y -> x - y % x */ |
| (simplify |
| (minus (mult:cs @0 (plus:s (trunc_div:s @1 @0) integer_onep)) @1) |
| (if (INTEGRAL_TYPE_P (type)) |
| (minus @0 (trunc_mod @1 @0)))) |
| |
| /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, |
| i.e. "X % C" into "X & (C - 1)", if X and C are positive. |
| Also optimize A % (C << N) where C is a power of 2, |
| to A & ((C << N) - 1). |
| Also optimize "A shift (B % C)", if C is a power of 2, to |
| "A shift (B & (C - 1))". SHIFT operation include "<<" and ">>" |
| and assume (B % C) is nonnegative as shifts negative values would |
| be UB. */ |
| (match (power_of_two_cand @1) |
| INTEGER_CST@1) |
| (match (power_of_two_cand @1) |
| (lshift INTEGER_CST@1 @2)) |
| (for mod (trunc_mod floor_mod) |
| (for shift (lshift rshift) |
| (simplify |
| (shift @0 (mod @1 (power_of_two_cand@2 @3))) |
| (if (integer_pow2p (@3) && tree_int_cst_sgn (@3) > 0) |
| (shift @0 (bit_and @1 (minus @2 { build_int_cst (TREE_TYPE (@2), |
| 1); })))))) |
| (simplify |
| (mod @0 (convert? (power_of_two_cand@1 @2))) |
| (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) |
| /* Allow any integral conversions of the divisor, except |
| conversion from narrower signed to wider unsigned type |
| where if @1 would be negative power of two, the divisor |
| would not be a power of two. */ |
| && INTEGRAL_TYPE_P (type) |
| && INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) |
| || TYPE_UNSIGNED (TREE_TYPE (@1)) |
| || !TYPE_UNSIGNED (type)) |
| && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) |
| (with { tree utype = TREE_TYPE (@1); |
| if (!TYPE_OVERFLOW_WRAPS (utype)) |
| utype = unsigned_type_for (utype); } |
| (bit_and @0 (convert (minus (convert:utype @1) |
| { build_one_cst (utype); }))))))) |
| |
| /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ |
| (simplify |
| (trunc_div (mult @0 integer_pow2p@1) @1) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (bit_and @0 { wide_int_to_tree |
| (type, wi::mask (TYPE_PRECISION (type) |
| - wi::exact_log2 (wi::to_wide (@1)), |
| false, TYPE_PRECISION (type))); }))) |
| |
| /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ |
| (simplify |
| (mult (trunc_div @0 integer_pow2p@1) @1) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (bit_and @0 (negate @1)))) |
| |
| /* Simplify (t * 2) / 2) -> t. */ |
| (for div (trunc_div ceil_div floor_div round_div exact_div) |
| (simplify |
| (div (mult:c @0 @1) @1) |
| (if (ANY_INTEGRAL_TYPE_P (type)) |
| (if (TYPE_OVERFLOW_UNDEFINED (type)) |
| @0 |
| #if GIMPLE |
| (with |
| { |
| bool overflowed = true; |
| value_range vr0, vr1; |
| if (INTEGRAL_TYPE_P (type) |
| && get_global_range_query ()->range_of_expr (vr0, @0) |
| && get_global_range_query ()->range_of_expr (vr1, @1) |
| && vr0.kind () == VR_RANGE |
| && vr1.kind () == VR_RANGE) |
| { |
| wide_int wmin0 = vr0.lower_bound (); |
| wide_int wmax0 = vr0.upper_bound (); |
| wide_int wmin1 = vr1.lower_bound (); |
| wide_int wmax1 = vr1.upper_bound (); |
| /* If the multiplication can't overflow/wrap around, then |
| it can be optimized too. */ |
| wi::overflow_type min_ovf, max_ovf; |
| wi::mul (wmin0, wmin1, TYPE_SIGN (type), &min_ovf); |
| wi::mul (wmax0, wmax1, TYPE_SIGN (type), &max_ovf); |
| if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE) |
| { |
| wi::mul (wmin0, wmax1, TYPE_SIGN (type), &min_ovf); |
| wi::mul (wmax0, wmin1, TYPE_SIGN (type), &max_ovf); |
| if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE) |
| overflowed = false; |
| } |
| } |
| } |
| (if (!overflowed) |
| @0)) |
| #endif |
| )))) |
| |
| (for op (negate abs) |
| /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ |
| (for coss (COS COSH) |
| (simplify |
| (coss (op @0)) |
| (coss @0))) |
| /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ |
| (for pows (POW) |
| (simplify |
| (pows (op @0) REAL_CST@1) |
| (with { HOST_WIDE_INT n; } |
| (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) |
| (pows @0 @1))))) |
| /* Likewise for powi. */ |
| (for pows (POWI) |
| (simplify |
| (pows (op @0) INTEGER_CST@1) |
| (if ((wi::to_wide (@1) & 1) == 0) |
| (pows @0 @1)))) |
| /* Strip negate and abs from both operands of hypot. */ |
| (for hypots (HYPOT) |
| (simplify |
| (hypots (op @0) @1) |
| (hypots @0 @1)) |
| (simplify |
| (hypots @0 (op @1)) |
| (hypots @0 @1))) |
| /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ |
| (for copysigns (COPYSIGN_ALL) |
| (simplify |
| (copysigns (op @0) @1) |
| (copysigns @0 @1)))) |
| |
| /* abs(x)*abs(x) -> x*x. Should be valid for all types. */ |
| (simplify |
| (mult (abs@1 @0) @1) |
| (mult @0 @0)) |
| |
| /* Convert absu(x)*absu(x) -> x*x. */ |
| (simplify |
| (mult (absu@1 @0) @1) |
| (mult (convert@2 @0) @2)) |
| |
| /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ |
| (for coss (COS COSH) |
| copysigns (COPYSIGN) |
| (simplify |
| (coss (copysigns @0 @1)) |
| (coss @0))) |
| |
| /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ |
| (for pows (POW) |
| copysigns (COPYSIGN) |
| (simplify |
| (pows (copysigns @0 @2) REAL_CST@1) |
| (with { HOST_WIDE_INT n; } |
| (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) |
| (pows @0 @1))))) |
| /* Likewise for powi. */ |
| (for pows (POWI) |
| copysigns (COPYSIGN) |
| (simplify |
| (pows (copysigns @0 @2) INTEGER_CST@1) |
| (if ((wi::to_wide (@1) & 1) == 0) |
| (pows @0 @1)))) |
| |
| (for hypots (HYPOT) |
| copysigns (COPYSIGN) |
| /* hypot(copysign(x, y), z) -> hypot(x, z). */ |
| (simplify |
| (hypots (copysigns @0 @1) @2) |
| (hypots @0 @2)) |
| /* hypot(x, copysign(y, z)) -> hypot(x, y). */ |
| (simplify |
| (hypots @0 (copysigns @1 @2)) |
| (hypots @0 @1))) |
| |
| /* copysign(x, CST) -> [-]abs (x). */ |
| (for copysigns (COPYSIGN_ALL) |
| (simplify |
| (copysigns @0 REAL_CST@1) |
| (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) |
| (negate (abs @0)) |
| (abs @0)))) |
| |
| /* copysign(copysign(x, y), z) -> copysign(x, z). */ |
| (for copysigns (COPYSIGN_ALL) |
| (simplify |
| (copysigns (copysigns @0 @1) @2) |
| (copysigns @0 @2))) |
| |
| /* copysign(x,y)*copysign(x,y) -> x*x. */ |
| (for copysigns (COPYSIGN_ALL) |
| (simplify |
| (mult (copysigns@2 @0 @1) @2) |
| (mult @0 @0))) |
| |
| /* ccos(-x) -> ccos(x). Similarly for ccosh. */ |
| (for ccoss (CCOS CCOSH) |
| (simplify |
| (ccoss (negate @0)) |
| (ccoss @0))) |
| |
| /* cabs(-x) and cos(conj(x)) -> cabs(x). */ |
| (for ops (conj negate) |
| (for cabss (CABS) |
| (simplify |
| (cabss (ops @0)) |
| (cabss @0)))) |
| |
| /* Fold (a * (1 << b)) into (a << b) */ |
| (simplify |
| (mult:c @0 (convert? (lshift integer_onep@1 @2))) |
| (if (! FLOAT_TYPE_P (type) |
| && tree_nop_conversion_p (type, TREE_TYPE (@1))) |
| (lshift @0 @2))) |
| |
| /* Fold (1 << (C - x)) where C = precision(type) - 1 |
| into ((1 << C) >> x). */ |
| (simplify |
| (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) |
| (if (INTEGRAL_TYPE_P (type) |
| && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) |
| && single_use (@1)) |
| (if (TYPE_UNSIGNED (type)) |
| (rshift (lshift @0 @2) @3) |
| (with |
| { tree utype = unsigned_type_for (type); } |
| (convert (rshift (lshift (convert:utype @0) @2) @3)))))) |
| |
| /* Fold ((type)(a<0)) << SIGNBITOFA into ((type)a) & signbit. */ |
| (simplify |
| (lshift (convert (lt @0 integer_zerop@1)) INTEGER_CST@2) |
| (if (TYPE_SIGN (TREE_TYPE (@0)) == SIGNED |
| && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0)) - 1)) |
| (with { wide_int wone = wi::one (TYPE_PRECISION (type)); } |
| (bit_and (convert @0) |
| { wide_int_to_tree (type, |
| wi::lshift (wone, wi::to_wide (@2))); })))) |
| |
| /* Fold (-x >> C) into -(x > 0) where C = precision(type) - 1. */ |
| (for cst (INTEGER_CST VECTOR_CST) |
| (simplify |
| (rshift (negate:s @0) cst@1) |
| (if (!TYPE_UNSIGNED (type) |
| && TYPE_OVERFLOW_UNDEFINED (type)) |
| (with { tree stype = TREE_TYPE (@1); |
| tree bt = truth_type_for (type); |
| tree zeros = build_zero_cst (type); |
| tree cst = NULL_TREE; } |
| (switch |
| /* Handle scalar case. */ |
| (if (INTEGRAL_TYPE_P (type) |
| /* If we apply the rule to the scalar type before vectorization |
| we will enforce the result of the comparison being a bool |
| which will require an extra AND on the result that will be |
| indistinguishable from when the user did actually want 0 |
| or 1 as the result so it can't be removed. */ |
| && canonicalize_math_after_vectorization_p () |
| && wi::eq_p (wi::to_wide (@1), TYPE_PRECISION (type) - 1)) |
| (negate (convert (gt @0 { zeros; })))) |
| /* Handle vector case. */ |
| (if (VECTOR_INTEGER_TYPE_P (type) |
| /* First check whether the target has the same mode for vector |
| comparison results as it's operands do. */ |
| && TYPE_MODE (bt) == TYPE_MODE (type) |
| /* Then check to see if the target is able to expand the comparison |
| with the given type later on, otherwise we may ICE. */ |
| && expand_vec_cmp_expr_p (type, bt, GT_EXPR) |
| && (cst = uniform_integer_cst_p (@1)) != NULL |
| && wi::eq_p (wi::to_wide (cst), element_precision (type) - 1)) |
| (view_convert (gt:bt @0 { zeros; })))))))) |
| |
| /* Fold (C1/X)*C2 into (C1*C2)/X. */ |
| (simplify |
| (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) |
| (if (flag_associative_math |
| && single_use (@3)) |
| (with |
| { tree tem = const_binop (MULT_EXPR, type, @0, @2); } |
| (if (tem) |
| (rdiv { tem; } @1))))) |
| |
| /* Simplify ~X & X as zero. */ |
| (simplify |
| (bit_and:c (convert? @0) (convert? (bit_not @0))) |
| { build_zero_cst (type); }) |
| |
| /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ |
| (simplify |
| (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) |
| (if (TYPE_UNSIGNED (type)) |
| (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) |
| |
| (for bitop (bit_and bit_ior) |
| cmp (eq ne) |
| /* PR35691: Transform |
| (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. |
| (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ |
| (simplify |
| (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) |
| (cmp (bit_ior @0 (convert @1)) @2))) |
| /* Transform: |
| (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. |
| (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ |
| (simplify |
| (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) |
| (cmp (bit_and @0 (convert @1)) @2)))) |
| |
| /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ |
| (simplify |
| (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) |
| (minus (bit_xor @0 @1) @1)) |
| (simplify |
| (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) |
| (if (~wi::to_wide (@2) == wi::to_wide (@1)) |
| (minus (bit_xor @0 @1) @1))) |
| |
| /* Fold (A & B) - (A & ~B) into B - (A ^ B). */ |
| (simplify |
| (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) |
| (minus @1 (bit_xor @0 @1))) |
| |
| /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ |
| (for op (bit_ior bit_xor plus) |
| (simplify |
| (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) |
| (bit_xor @0 @1)) |
| (simplify |
| (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) |
| (if (~wi::to_wide (@2) == wi::to_wide (@1)) |
| (bit_xor @0 @1)))) |
| |
| /* PR53979: Transform ((a ^ b) | a) -> (a | b) */ |
| (simplify |
| (bit_ior:c (bit_xor:c @0 @1) @0) |
| (bit_ior @0 @1)) |
| |
| /* (a & ~b) | (a ^ b) --> a ^ b */ |
| (simplify |
| (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) |
| @2) |
| |
| /* (a & ~b) ^ ~a --> ~(a & b) */ |
| (simplify |
| (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) |
| (bit_not (bit_and @0 @1))) |
| |
| /* (~a & b) ^ a --> (a | b) */ |
| (simplify |
| (bit_xor:c (bit_and:cs (bit_not @0) @1) @0) |
| (bit_ior @0 @1)) |
| |
| /* (a | b) & ~(a ^ b) --> a & b */ |
| (simplify |
| (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) |
| (bit_and @0 @1)) |
| |
| /* a | ~(a ^ b) --> a | ~b */ |
| (simplify |
| (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) |
| (bit_ior @0 (bit_not @1))) |
| |
| /* (a | b) | (a &^ b) --> a | b */ |
| (for op (bit_and bit_xor) |
| (simplify |
| (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) |
| @2)) |
| |
| /* (a & b) | ~(a ^ b) --> ~(a ^ b) */ |
| (simplify |
| (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) |
| @2) |
| |
| /* ~(~a & b) --> a | ~b */ |
| (simplify |
| (bit_not (bit_and:cs (bit_not @0) @1)) |
| (bit_ior @0 (bit_not @1))) |
| |
| /* ~(~a | b) --> a & ~b */ |
| (simplify |
| (bit_not (bit_ior:cs (bit_not @0) @1)) |
| (bit_and @0 (bit_not @1))) |
| |
| /* (a ^ b) & ((b ^ c) ^ a) --> (a ^ b) & ~c */ |
| (simplify |
| (bit_and:c (bit_xor:c@3 @0 @1) (bit_xor:cs (bit_xor:cs @1 @2) @0)) |
| (bit_and @3 (bit_not @2))) |
| |
| /* (a ^ b) | ((b ^ c) ^ a) --> (a ^ b) | c */ |
| (simplify |
| (bit_ior:c (bit_xor:c@3 @0 @1) (bit_xor:c (bit_xor:c @1 @2) @0)) |
| (bit_ior @3 @2)) |
| |
| #if GIMPLE |
| /* (~X | C) ^ D -> (X | C) ^ (~D ^ C) if (~D ^ C) can be simplified. */ |
| (simplify |
| (bit_xor:c (bit_ior:cs (bit_not:s @0) @1) @2) |
| (bit_xor (bit_ior @0 @1) (bit_xor! (bit_not! @2) @1))) |
| |
| /* (~X & C) ^ D -> (X & C) ^ (D ^ C) if (D ^ C) can be simplified. */ |
| (simplify |
| (bit_xor:c (bit_and:cs (bit_not:s @0) @1) @2) |
| (bit_xor (bit_and @0 @1) (bit_xor! @2 @1))) |
| |
| /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ |
| (simplify |
| (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) |
| (bit_xor @0 @1))) |
| #endif |
| |
| /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, |
| ((A & N) + B) & M -> (A + B) & M |
| Similarly if (N & M) == 0, |
| ((A | N) + B) & M -> (A + B) & M |
| and for - instead of + (or unary - instead of +) |
| and/or ^ instead of |. |
| If B is constant and (B & M) == 0, fold into A & M. */ |
| (for op (plus minus) |
| (for bitop (bit_and bit_ior bit_xor) |
| (simplify |
| (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2) |
| (with |
| { tree pmop[2]; |
| tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop, |
| @3, @4, @1, ERROR_MARK, NULL_TREE, |
| NULL_TREE, pmop); } |
| (if (utype) |
| (convert (bit_and (op (convert:utype { pmop[0]; }) |
| (convert:utype { pmop[1]; })) |
| (convert:utype @2)))))) |
| (simplify |
| (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2) |
| (with |
| { tree pmop[2]; |
| tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, |
| NULL_TREE, NULL_TREE, @1, bitop, @3, |
| @4, pmop); } |
| (if (utype) |
| (convert (bit_and (op (convert:utype { pmop[0]; }) |
| (convert:utype { pmop[1]; })) |
| (convert:utype @2))))))) |
| (simplify |
| (bit_and (op:s @0 @1) INTEGER_CST@2) |
| (with |
| { tree pmop[2]; |
| tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, |
| NULL_TREE, NULL_TREE, @1, ERROR_MARK, |
| NULL_TREE, NULL_TREE, pmop); } |
| (if (utype) |
| (convert (bit_and (op (convert:utype { pmop[0]; }) |
| (convert:utype { pmop[1]; })) |
| (convert:utype @2))))))) |
| (for bitop (bit_and bit_ior bit_xor) |
| (simplify |
| (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1) |
| (with |
| { tree pmop[2]; |
| tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0, |
| bitop, @2, @3, NULL_TREE, ERROR_MARK, |
| NULL_TREE, NULL_TREE, pmop); } |
| (if (utype) |
| (convert (bit_and (negate (convert:utype { pmop[0]; })) |
| (convert:utype @1))))))) |
| |
| /* X % Y is smaller than Y. */ |
| (for cmp (lt ge) |
| (simplify |
| (cmp (trunc_mod @0 @1) @1) |
| (if (TYPE_UNSIGNED (TREE_TYPE (@0))) |
| { constant_boolean_node (cmp == LT_EXPR, type); }))) |
| (for cmp (gt le) |
| (simplify |
| (cmp @1 (trunc_mod @0 @1)) |
| (if (TYPE_UNSIGNED (TREE_TYPE (@0))) |
| { constant_boolean_node (cmp == GT_EXPR, type); }))) |
| |
| /* x | ~0 -> ~0 */ |
| (simplify |
| (bit_ior @0 integer_all_onesp@1) |
| @1) |
| |
| /* x | 0 -> x */ |
| (simplify |
| (bit_ior @0 integer_zerop) |
| @0) |
| |
| /* x & 0 -> 0 */ |
| (simplify |
| (bit_and @0 integer_zerop@1) |
| @1) |
| |
| /* ~x | x -> -1 */ |
| /* ~x ^ x -> -1 */ |
| /* ~x + x -> -1 */ |
| (for op (bit_ior bit_xor plus) |
| (simplify |
| (op:c (convert? @0) (convert? (bit_not @0))) |
| (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) |
| |
| /* x ^ x -> 0 */ |
| (simplify |
| (bit_xor @0 @0) |
| { build_zero_cst (type); }) |
| |
| /* Canonicalize X ^ ~0 to ~X. */ |
| (simplify |
| (bit_xor @0 integer_all_onesp@1) |
| (bit_not @0)) |
| |
| /* x & ~0 -> x */ |
| (simplify |
| (bit_and @0 integer_all_onesp) |
| (non_lvalue @0)) |
| |
| /* x & x -> x, x | x -> x */ |
| (for bitop (bit_and bit_ior) |
| (simplify |
| (bitop @0 @0) |
| (non_lvalue @0))) |
| |
| /* x & C -> x if we know that x & ~C == 0. */ |
| #if GIMPLE |
| (simplify |
| (bit_and SSA_NAME@0 INTEGER_CST@1) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) |
| @0)) |
| #endif |
| |
| /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */ |
| (simplify |
| (bit_not (minus (bit_not @0) @1)) |
| (plus @0 @1)) |
| (simplify |
| (bit_not (plus:c (bit_not @0) @1)) |
| (minus @0 @1)) |
| |
| /* ~(X - Y) -> ~X + Y. */ |
| (simplify |
| (bit_not (minus:s @0 @1)) |
| (plus (bit_not @0) @1)) |
| (simplify |
| (bit_not (plus:s @0 INTEGER_CST@1)) |
| (if ((INTEGRAL_TYPE_P (type) |
| && TYPE_UNSIGNED (type)) |
| || (!TYPE_OVERFLOW_SANITIZED (type) |
| && may_negate_without_overflow_p (@1))) |
| (plus (bit_not @0) { const_unop (NEGATE_EXPR, type, @1); }))) |
| |
| #if GIMPLE |
| /* ~X + Y -> (Y - X) - 1. */ |
| (simplify |
| (plus:c (bit_not @0) @1) |
| (if (ANY_INTEGRAL_TYPE_P (type) |
| && TYPE_OVERFLOW_WRAPS (type) |
| /* -1 - X is folded to ~X, so we'd recurse endlessly. */ |
| && !integer_all_onesp (@1)) |
| (plus (minus @1 @0) { build_minus_one_cst (type); }) |
| (if (INTEGRAL_TYPE_P (type) |
| && TREE_CODE (@1) == INTEGER_CST |
| && wi::to_wide (@1) != wi::min_value (TYPE_PRECISION (type), |
| SIGNED)) |
| (minus (plus @1 { build_minus_one_cst (type); }) @0)))) |
| |
| /* ~(X >> Y) -> ~X >> Y if ~X can be simplified. */ |
| (simplify |
| (bit_not (rshift:s @0 @1)) |
| (if (!TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (rshift (bit_not! @0) @1) |
| /* For logical right shifts, this is possible only if @0 doesn't |
| have MSB set and the logical right shift is changed into |
| arithmetic shift. */ |
| (if (!wi::neg_p (tree_nonzero_bits (@0))) |
| (with { tree stype = signed_type_for (TREE_TYPE (@0)); } |
| (convert (rshift (bit_not! (convert:stype @0)) @1)))))) |
| #endif |
| |
| /* x + (x & 1) -> (x + 1) & ~1 */ |
| (simplify |
| (plus:c @0 (bit_and:s @0 integer_onep@1)) |
| (bit_and (plus @0 @1) (bit_not @1))) |
| |
| /* x & ~(x & y) -> x & ~y */ |
| /* x | ~(x | y) -> x | ~y */ |
| (for bitop (bit_and bit_ior) |
| (simplify |
| (bitop:c @0 (bit_not (bitop:cs @0 @1))) |
| (bitop @0 (bit_not @1)))) |
| |
| /* (~x & y) | ~(x | y) -> ~x */ |
| (simplify |
| (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1))) |
| @2) |
| |
| /* (x | y) ^ (x | ~y) -> ~x */ |
| (simplify |
| (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1))) |
| (bit_not @0)) |
| |
| /* (x & y) | ~(x | y) -> ~(x ^ y) */ |
| (simplify |
| (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) |
| (bit_not (bit_xor @0 @1))) |
| |
| /* (~x | y) ^ (x ^ y) -> x | ~y */ |
| (simplify |
| (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1)) |
| (bit_ior @0 (bit_not @1))) |
| |
| /* (x ^ y) | ~(x | y) -> ~(x & y) */ |
| (simplify |
| (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) |
| (bit_not (bit_and @0 @1))) |
| |
| /* (x | y) & ~x -> y & ~x */ |
| /* (x & y) | ~x -> y | ~x */ |
| (for bitop (bit_and bit_ior) |
| rbitop (bit_ior bit_and) |
| (simplify |
| (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) |
| (bitop @1 @2))) |
| |
| /* (x & y) ^ (x | y) -> x ^ y */ |
| (simplify |
| (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) |
| (bit_xor @0 @1)) |
| |
| /* (x ^ y) ^ (x | y) -> x & y */ |
| (simplify |
| (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) |
| (bit_and @0 @1)) |
| |
| /* (x & y) + (x ^ y) -> x | y */ |
| /* (x & y) | (x ^ y) -> x | y */ |
| /* (x & y) ^ (x ^ y) -> x | y */ |
| (for op (plus bit_ior bit_xor) |
| (simplify |
| (op:c (bit_and @0 @1) (bit_xor @0 @1)) |
| (bit_ior @0 @1))) |
| |
| /* (x & y) + (x | y) -> x + y */ |
| (simplify |
| (plus:c (bit_and @0 @1) (bit_ior @0 @1)) |
| (plus @0 @1)) |
| |
| /* (x + y) - (x | y) -> x & y */ |
| (simplify |
| (minus (plus @0 @1) (bit_ior @0 @1)) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) |
| && !TYPE_SATURATING (type)) |
| (bit_and @0 @1))) |
| |
| /* (x + y) - (x & y) -> x | y */ |
| (simplify |
| (minus (plus @0 @1) (bit_and @0 @1)) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) |
| && !TYPE_SATURATING (type)) |
| (bit_ior @0 @1))) |
| |
| /* (x | y) - y -> (x & ~y) */ |
| (simplify |
| (minus (bit_ior:cs @0 @1) @1) |
| (bit_and @0 (bit_not @1))) |
| |
| /* (x | y) - (x ^ y) -> x & y */ |
| (simplify |
| (minus (bit_ior @0 @1) (bit_xor @0 @1)) |
| (bit_and @0 @1)) |
| |
| /* (x | y) - (x & y) -> x ^ y */ |
| (simplify |
| (minus (bit_ior @0 @1) (bit_and @0 @1)) |
| (bit_xor @0 @1)) |
| |
| /* (x | y) & ~(x & y) -> x ^ y */ |
| (simplify |
| (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) |
| (bit_xor @0 @1)) |
| |
| /* (x | y) & (~x ^ y) -> x & y */ |
| (simplify |
| (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) |
| (bit_and @0 @1)) |
| |
| /* (~x | y) & (x | ~y) -> ~(x ^ y) */ |
| (simplify |
| (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1))) |
| (bit_not (bit_xor @0 @1))) |
| |
| /* (~x | y) ^ (x | ~y) -> x ^ y */ |
| (simplify |
| (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1))) |
| (bit_xor @0 @1)) |
| |
| /* ((x & y) - (x | y)) - 1 -> ~(x ^ y) */ |
| (simplify |
| (plus (nop_convert1? (minus@2 (nop_convert2? (bit_and:c @0 @1)) |
| (nop_convert2? (bit_ior @0 @1)))) |
| integer_all_onesp) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) |
| && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)) |
| && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2)) |
| && !TYPE_SATURATING (TREE_TYPE (@2))) |
| (bit_not (convert (bit_xor @0 @1))))) |
| (simplify |
| (minus (nop_convert1? (plus@2 (nop_convert2? (bit_and:c @0 @1)) |
| integer_all_onesp)) |
| (nop_convert3? (bit_ior @0 @1))) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) |
| && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)) |
| && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2)) |
| && !TYPE_SATURATING (TREE_TYPE (@2))) |
| (bit_not (convert (bit_xor @0 @1))))) |
| (simplify |
| (minus (nop_convert1? (bit_and @0 @1)) |
| (nop_convert2? (plus@2 (nop_convert3? (bit_ior:c @0 @1)) |
| integer_onep))) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) |
| && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)) |
| && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2)) |
| && !TYPE_SATURATING (TREE_TYPE (@2))) |
| (bit_not (convert (bit_xor @0 @1))))) |
| |
| /* ~x & ~y -> ~(x | y) |
| ~x | ~y -> ~(x & y) */ |
| (for op (bit_and bit_ior) |
| rop (bit_ior bit_and) |
| (simplify |
| (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) |
| (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) |
| && element_precision (type) <= element_precision (TREE_TYPE (@1))) |
| (bit_not (rop (convert @0) (convert @1)))))) |
| |
| /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing |
| with a constant, and the two constants have no bits in common, |
| we should treat this as a BIT_IOR_EXPR since this may produce more |
| simplifications. */ |
| (for op (bit_xor plus) |
| (simplify |
| (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) |
| (convert2? (bit_and@5 @2 INTEGER_CST@3))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@2)) |
| && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) |
| (bit_ior (convert @4) (convert @5))))) |
| |
| /* (X | Y) ^ X -> Y & ~ X*/ |
| (simplify |
| (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (convert (bit_and @1 (bit_not @0))))) |
| |
| /* Convert ~X ^ ~Y to X ^ Y. */ |
| (simplify |
| (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) |
| (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) |
| && element_precision (type) <= element_precision (TREE_TYPE (@1))) |
| (bit_xor (convert @0) (convert @1)))) |
| |
| /* Convert ~X ^ C to X ^ ~C. */ |
| (simplify |
| (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (bit_xor (convert @0) (bit_not @1)))) |
| |
| /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ |
| (for opo (bit_and bit_xor) |
| opi (bit_xor bit_and) |
| (simplify |
| (opo:c (opi:cs @0 @1) @1) |
| (bit_and (bit_not @0) @1))) |
| |
| /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both |
| operands are another bit-wise operation with a common input. If so, |
| distribute the bit operations to save an operation and possibly two if |
| constants are involved. For example, convert |
| (A | B) & (A | C) into A | (B & C) |
| Further simplification will occur if B and C are constants. */ |
| (for op (bit_and bit_ior bit_xor) |
| rop (bit_ior bit_and bit_and) |
| (simplify |
| (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@2))) |
| (rop (convert @0) (op (convert @1) (convert @2)))))) |
| |
| /* Some simple reassociation for bit operations, also handled in reassoc. */ |
| /* (X & Y) & Y -> X & Y |
| (X | Y) | Y -> X | Y */ |
| (for op (bit_and bit_ior) |
| (simplify |
| (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) |
| @2)) |
| /* (X ^ Y) ^ Y -> X */ |
| (simplify |
| (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) |
| (convert @0)) |
| /* (X & Y) & (X & Z) -> (X & Y) & Z |
| (X | Y) | (X | Z) -> (X | Y) | Z */ |
| (for op (bit_and bit_ior) |
| (simplify |
| (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@2))) |
| (if (single_use (@5) && single_use (@6)) |
| (op @3 (convert @2)) |
| (if (single_use (@3) && single_use (@4)) |
| (op (convert @1) @5)))))) |
| /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ |
| (simplify |
| (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@2))) |
| (bit_xor (convert @1) (convert @2)))) |
| |
| /* Convert abs (abs (X)) into abs (X). |
| also absu (absu (X)) into absu (X). */ |
| (simplify |
| (abs (abs@1 @0)) |
| @1) |
| |
| (simplify |
| (absu (convert@2 (absu@1 @0))) |
| (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1))) |
| @1)) |
| |
| /* Convert abs[u] (-X) -> abs[u] (X). */ |
| (simplify |
| (abs (negate @0)) |
| (abs @0)) |
| |
| (simplify |
| (absu (negate @0)) |
| (absu @0)) |
| |
| /* Convert abs[u] (X) where X is nonnegative -> (X). */ |
| (simplify |
| (abs tree_expr_nonnegative_p@0) |
| @0) |
| |
| (simplify |
| (absu tree_expr_nonnegative_p@0) |
| (convert @0)) |
| |
| /* Simplify (-(X < 0) | 1) * X into abs (X) or absu(X). */ |
| (simplify |
| (mult:c (nop_convert1? |
| (bit_ior (nop_convert2? (negate (convert? (lt @0 integer_zerop)))) |
| integer_onep)) |
| (nop_convert3? @0)) |
| (if (INTEGRAL_TYPE_P (type) |
| && INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && !TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (if (TYPE_UNSIGNED (type)) |
| (absu @0) |
| (abs @0) |
| ) |
| ) |
| ) |
| |
| /* A few cases of fold-const.cc negate_expr_p predicate. */ |
| (match negate_expr_p |
| INTEGER_CST |
| (if ((INTEGRAL_TYPE_P (type) |
| && TYPE_UNSIGNED (type)) |
| || (!TYPE_OVERFLOW_SANITIZED (type) |
| && may_negate_without_overflow_p (t))))) |
| (match negate_expr_p |
| FIXED_CST) |
| (match negate_expr_p |
| (negate @0) |
| (if (!TYPE_OVERFLOW_SANITIZED (type)))) |
| (match negate_expr_p |
| REAL_CST |
| (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) |
| /* VECTOR_CST handling of non-wrapping types would recurse in unsupported |
| ways. */ |
| (match negate_expr_p |
| VECTOR_CST |
| (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) |
| (match negate_expr_p |
| (minus @0 @1) |
| (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && !HONOR_SIGNED_ZEROS (type))))) |
| |
| /* (-A) * (-B) -> A * B */ |
| (simplify |
| (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@1))) |
| (mult (convert @0) (convert (negate @1))))) |
| |
| /* -(A + B) -> (-B) - A. */ |
| (simplify |
| (negate (plus:c @0 negate_expr_p@1)) |
| (if (!HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && !HONOR_SIGNED_ZEROS (type)) |
| (minus (negate @1) @0))) |
| |
| /* -(A - B) -> B - A. */ |
| (simplify |
| (negate (minus @0 @1)) |
| (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && !HONOR_SIGNED_ZEROS (type))) |
| (minus @1 @0))) |
| (simplify |
| (negate (pointer_diff @0 @1)) |
| (if (TYPE_OVERFLOW_UNDEFINED (type)) |
| (pointer_diff @1 @0))) |
| |
| /* A - B -> A + (-B) if B is easily negatable. */ |
| (simplify |
| (minus @0 negate_expr_p@1) |
| (if (!FIXED_POINT_TYPE_P (type)) |
| (plus @0 (negate @1)))) |
| |
| /* Other simplifications of negation (c.f. fold_negate_expr_1). */ |
| (simplify |
| (negate (mult:c@0 @1 negate_expr_p@2)) |
| (if (! TYPE_UNSIGNED (type) |
| && ! HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && single_use (@0)) |
| (mult @1 (negate @2)))) |
| |
| (simplify |
| (negate (rdiv@0 @1 negate_expr_p@2)) |
| (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && single_use (@0)) |
| (rdiv @1 (negate @2)))) |
| |
| (simplify |
| (negate (rdiv@0 negate_expr_p@1 @2)) |
| (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type) |
| && single_use (@0)) |
| (rdiv (negate @1) @2))) |
| |
| /* Fold -((int)x >> (prec - 1)) into (unsigned)x >> (prec - 1). */ |
| (simplify |
| (negate (convert? (rshift @0 INTEGER_CST@1))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && wi::to_wide (@1) == element_precision (type) - 1) |
| (with { tree stype = TREE_TYPE (@0); |
| tree ntype = TYPE_UNSIGNED (stype) ? signed_type_for (stype) |
| : unsigned_type_for (stype); } |
| (if (VECTOR_TYPE_P (type)) |
| (view_convert (rshift (view_convert:ntype @0) @1)) |
| (convert (rshift (convert:ntype @0) @1)))))) |
| |
| /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) |
| when profitable. |
| For bitwise binary operations apply operand conversions to the |
| binary operation result instead of to the operands. This allows |
| to combine successive conversions and bitwise binary operations. |
| We combine the above two cases by using a conditional convert. */ |
| (for bitop (bit_and bit_ior bit_xor) |
| (simplify |
| (bitop (convert@2 @0) (convert?@3 @1)) |
| (if (((TREE_CODE (@1) == INTEGER_CST |
| && INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (int_fits_type_p (@1, TREE_TYPE (@0)) |
| || tree_nop_conversion_p (TREE_TYPE (@0), type))) |
| || types_match (@0, @1)) |
| /* ??? This transform conflicts with fold-const.cc doing |
| Convert (T)(x & c) into (T)x & (T)c, if c is an integer |
| constants (if x has signed type, the sign bit cannot be set |
| in c). This folds extension into the BIT_AND_EXPR. |
| Restrict it to GIMPLE to avoid endless recursions. */ |
| && (bitop != BIT_AND_EXPR || GIMPLE) |
| && (/* That's a good idea if the conversion widens the operand, thus |
| after hoisting the conversion the operation will be narrower. |
| It is also a good if the conversion is a nop as moves the |
| conversion to one side; allowing for combining of the conversions. */ |
| TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) |
| /* The conversion check for being a nop can only be done at the gimple |
| level as fold_binary has some re-association code which can conflict |
| with this if there is a "constant" which is not a full INTEGER_CST. */ |
| || (GIMPLE && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type)) |
| /* It's also a good idea if the conversion is to a non-integer |
| mode. */ |
| || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT |
| /* Or if the precision of TO is not the same as the precision |
| of its mode. */ |
| || !type_has_mode_precision_p (type) |
| /* In GIMPLE, getting rid of 2 conversions for one new results |
| in smaller IL. */ |
| || (GIMPLE |
| && TREE_CODE (@1) != INTEGER_CST |
| && tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && single_use (@2) |
| && single_use (@3)))) |
| (convert (bitop @0 (convert @1))))) |
| /* In GIMPLE, getting rid of 2 conversions for one new results |
| in smaller IL. */ |
| (simplify |
| (convert (bitop:cs@2 (nop_convert:s @0) @1)) |
| (if (GIMPLE |
| && TREE_CODE (@1) != INTEGER_CST |
| && tree_nop_conversion_p (type, TREE_TYPE (@2)) |
| && types_match (type, @0)) |
| (bitop @0 (convert @1))))) |
| |
| (for bitop (bit_and bit_ior) |
| rbitop (bit_ior bit_and) |
| /* (x | y) & x -> x */ |
| /* (x & y) | x -> x */ |
| (simplify |
| (bitop:c (rbitop:c @0 @1) @0) |
| @0) |
| /* (~x | y) & x -> x & y */ |
| /* (~x & y) | x -> x | y */ |
| (simplify |
| (bitop:c (rbitop:c (bit_not @0) @1) @0) |
| (bitop @0 @1))) |
| |
| /* ((x | y) & z) | x -> (z & y) | x */ |
| (simplify |
| (bit_ior:c (bit_and:cs (bit_ior:cs @0 @1) @2) @0) |
| (bit_ior (bit_and @2 @1) @0)) |
| |
| /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ |
| (simplify |
| (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) |
| (bit_ior (bit_and @0 @2) (bit_and @1 @2))) |
| |
| /* Combine successive equal operations with constants. */ |
| (for bitop (bit_and bit_ior bit_xor) |
| (simplify |
| (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) |
| (if (!CONSTANT_CLASS_P (@0)) |
| /* This is the canonical form regardless of whether (bitop @1 @2) can be |
| folded to a constant. */ |
| (bitop @0 (bitop @1 @2)) |
| /* In this case we have three constants and (bitop @0 @1) doesn't fold |
| to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if |
| the values involved are such that the operation can't be decided at |
| compile time. Try folding one of @0 or @1 with @2 to see whether |
| that combination can be decided at compile time. |
| |
| Keep the existing form if both folds fail, to avoid endless |
| oscillation. */ |
| (with { tree cst1 = const_binop (bitop, type, @0, @2); } |
| (if (cst1) |
| (bitop @1 { cst1; }) |
| (with { tree cst2 = const_binop (bitop, type, @1, @2); } |
| (if (cst2) |
| (bitop @0 { cst2; })))))))) |
| |
| /* Try simple folding for X op !X, and X op X with the help |
| of the truth_valued_p and logical_inverted_value predicates. */ |
| (match truth_valued_p |
| @0 |
| (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) |
| (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) |
| (match truth_valued_p |
| (op @0 @1))) |
| (match truth_valued_p |
| (truth_not @0)) |
| |
| (match (logical_inverted_value @0) |
| (truth_not @0)) |
| (match (logical_inverted_value @0) |
| (bit_not truth_valued_p@0)) |
| (match (logical_inverted_value @0) |
| (eq @0 integer_zerop)) |
| (match (logical_inverted_value @0) |
| (ne truth_valued_p@0 integer_truep)) |
| (match (logical_inverted_value @0) |
| (bit_xor truth_valued_p@0 integer_truep)) |
| |
| /* X & !X -> 0. */ |
| (simplify |
| (bit_and:c @0 (logical_inverted_value @0)) |
| { build_zero_cst (type); }) |
| /* X | !X and X ^ !X -> 1, , if X is truth-valued. */ |
| (for op (bit_ior bit_xor) |
| (simplify |
| (op:c truth_valued_p@0 (logical_inverted_value @0)) |
| { constant_boolean_node (true, type); })) |
| /* X ==/!= !X is false/true. */ |
| (for op (eq ne) |
| (simplify |
| (op:c truth_valued_p@0 (logical_inverted_value @0)) |
| { constant_boolean_node (op == NE_EXPR ? true : false, type); })) |
| |
| /* ~~x -> x */ |
| (simplify |
| (bit_not (bit_not @0)) |
| @0) |
| |
| /* Convert ~ (-A) to A - 1. */ |
| (simplify |
| (bit_not (convert? (negate @0))) |
| (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) |
| || !TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) |
| |
| /* Convert - (~A) to A + 1. */ |
| (simplify |
| (negate (nop_convert? (bit_not @0))) |
| (plus (view_convert @0) { build_each_one_cst (type); })) |
| |
| /* (a & b) ^ (a == b) -> !(a | b) */ |
| /* (a & b) == (a ^ b) -> !(a | b) */ |
| (for first_op (bit_xor eq) |
| second_op (eq bit_xor) |
| (simplify |
| (first_op:c (bit_and:c truth_valued_p@0 truth_valued_p@1) (second_op:c @0 @1)) |
| (bit_not (bit_ior @0 @1)))) |
| |
| /* Convert ~ (A - 1) or ~ (A + -1) to -A. */ |
| (simplify |
| (bit_not (convert? (minus @0 integer_each_onep))) |
| (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) |
| || !TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (convert (negate @0)))) |
| (simplify |
| (bit_not (convert? (plus @0 integer_all_onesp))) |
| (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) |
| || !TYPE_UNSIGNED (TREE_TYPE (@0))) |
| (convert (negate @0)))) |
| |
| /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ |
| (simplify |
| (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (convert (bit_xor @0 (bit_not @1))))) |
| (simplify |
| (bit_not (convert? (bit_xor:c (bit_not @0) @1))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (convert (bit_xor @0 @1)))) |
| |
| /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ |
| (simplify |
| (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (bit_not (bit_xor (view_convert @0) @1)))) |
| |
| /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ |
| (simplify |
| (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) |
| (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) |
| |
| /* Fold A - (A & B) into ~B & A. */ |
| (simplify |
| (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@1))) |
| (convert (bit_and (bit_not @1) @0)))) |
| |
| /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ |
| (if (!canonicalize_math_p ()) |
| (for cmp (gt lt ge le) |
| (simplify |
| (mult (convert (cmp @0 @1)) @2) |
| (cond (cmp @0 @1) @2 { build_zero_cst (type); })))) |
| |
| /* For integral types with undefined overflow and C != 0 fold |
| x * C EQ/NE y * C into x EQ/NE y. */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp (mult:c @0 @1) (mult:c @2 @1)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| && tree_expr_nonzero_p (@1)) |
| (cmp @0 @2)))) |
| |
| /* For integral types with wrapping overflow and C odd fold |
| x * C EQ/NE y * C into x EQ/NE y. */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) |
| && (TREE_INT_CST_LOW (@1) & 1) != 0) |
| (cmp @0 @2)))) |
| |
| /* For integral types with undefined overflow and C != 0 fold |
| x * C RELOP y * C into: |
| |
| x RELOP y for nonnegative C |
| y RELOP x for negative C */ |
| (for cmp (lt gt le ge) |
| (simplify |
| (cmp (mult:c @0 @1) (mult:c @2 @1)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) |
| (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) |
| (cmp @0 @2) |
| (if (TREE_CODE (@1) == INTEGER_CST |
| && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) |
| (cmp @2 @0)))))) |
| |
| /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ |
| (for cmp (le gt) |
| icmp (gt le) |
| (simplify |
| (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && TYPE_UNSIGNED (TREE_TYPE (@0)) |
| && TYPE_PRECISION (TREE_TYPE (@0)) > 1 |
| && (wi::to_wide (@2) |
| == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) |
| (with { tree stype = signed_type_for (TREE_TYPE (@0)); } |
| (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) |
| |
| /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ |
| (for cmp (simple_comparison) |
| (simplify |
| (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2))) |
| (if (element_precision (@3) >= element_precision (@0) |
| && types_match (@0, @1)) |
| (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) |
| (if (!TYPE_UNSIGNED (TREE_TYPE (@3))) |
| (cmp @1 @0) |
| (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1)) |
| (with |
| { |
| tree utype = unsigned_type_for (TREE_TYPE (@0)); |
| } |
| (cmp (convert:utype @1) (convert:utype @0))))) |
| (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2)))) |
| (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3))) |
| (cmp @0 @1) |
| (with |
| { |
| tree utype = unsigned_type_for (TREE_TYPE (@0)); |
| } |
| (cmp (convert:utype @0) (convert:utype @1))))))))) |
| |
| /* X / C1 op C2 into a simple range test. */ |
| (for cmp (simple_comparison) |
| (simplify |
| (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && integer_nonzerop (@1) |
| && !TREE_OVERFLOW (@1) |
| && !TREE_OVERFLOW (@2)) |
| (with { tree lo, hi; bool neg_overflow; |
| enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, |
| &neg_overflow); } |
| (switch |
| (if (code == LT_EXPR || code == GE_EXPR) |
| (if (TREE_OVERFLOW (lo)) |
| { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } |
| (if (code == LT_EXPR) |
| (lt @0 { lo; }) |
| (ge @0 { lo; })))) |
| (if (code == LE_EXPR || code == GT_EXPR) |
| (if (TREE_OVERFLOW (hi)) |
| { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } |
| (if (code == LE_EXPR) |
| (le @0 { hi; }) |
| (gt @0 { hi; })))) |
| (if (!lo && !hi) |
| { build_int_cst (type, code == NE_EXPR); }) |
| (if (code == EQ_EXPR && !hi) |
| (ge @0 { lo; })) |
| (if (code == EQ_EXPR && !lo) |
| (le @0 { hi; })) |
| (if (code == NE_EXPR && !hi) |
| (lt @0 { lo; })) |
| (if (code == NE_EXPR && !lo) |
| (gt @0 { hi; })) |
| (if (GENERIC) |
| { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, |
| lo, hi); }) |
| (with |
| { |
| tree etype = range_check_type (TREE_TYPE (@0)); |
| if (etype) |
| { |
| hi = fold_convert (etype, hi); |
| lo = fold_convert (etype, lo); |
| hi = const_binop (MINUS_EXPR, etype, hi, lo); |
| } |
| } |
| (if (etype && hi && !TREE_OVERFLOW (hi)) |
| (if (code == EQ_EXPR) |
| (le (minus (convert:etype @0) { lo; }) { hi; }) |
| (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) |
| |
| /* X + Z < Y + Z is the same as X < Y when there is no overflow. */ |
| (for op (lt le ge gt) |
| (simplify |
| (op (plus:c @0 @2) (plus:c @1 @2)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) |
| (op @0 @1)))) |
| /* For equality and subtraction, this is also true with wrapping overflow. */ |
| (for op (eq ne minus) |
| (simplify |
| (op (plus:c @0 @2) (plus:c @1 @2)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) |
| (op @0 @1)))) |
| |
| /* X - Z < Y - Z is the same as X < Y when there is no overflow. */ |
| (for op (lt le ge gt) |
| (simplify |
| (op (minus @0 @2) (minus @1 @2)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) |
| (op @0 @1)))) |
| /* For equality and subtraction, this is also true with wrapping overflow. */ |
| (for op (eq ne minus) |
| (simplify |
| (op (minus @0 @2) (minus @1 @2)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) |
| (op @0 @1)))) |
| /* And for pointers... */ |
| (for op (simple_comparison) |
| (simplify |
| (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) |
| (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) |
| (op @0 @1)))) |
| (simplify |
| (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) |
| (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) |
| (pointer_diff @0 @1))) |
| |
| /* Z - X < Z - Y is the same as Y < X when there is no overflow. */ |
| (for op (lt le ge gt) |
| (simplify |
| (op (minus @2 @0) (minus @2 @1)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) |
| (op @1 @0)))) |
| /* For equality and subtraction, this is also true with wrapping overflow. */ |
| (for op (eq ne minus) |
| (simplify |
| (op (minus @2 @0) (minus @2 @1)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) |
| (op @1 @0)))) |
| /* And for pointers... */ |
| (for op (simple_comparison) |
| (simplify |
| (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) |
| (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) |
| (op @1 @0)))) |
| (simplify |
| (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) |
| (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) |
| (pointer_diff @1 @0))) |
| |
| /* X + Y < Y is the same as X < 0 when there is no overflow. */ |
| (for op (lt le gt ge) |
| (simplify |
| (op:c (plus:c@2 @0 @1) @1) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) |
| && (CONSTANT_CLASS_P (@0) || single_use (@2))) |
| (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) |
| /* For equality, this is also true with wrapping overflow. */ |
| (for op (eq ne) |
| (simplify |
| (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) |
| && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) |
| && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) |
| && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) |
| (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) |
| (simplify |
| (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) |
| (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) |
| && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) |
| && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) |
| (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) |
| |
| /* X - Y < X is the same as Y > 0 when there is no overflow. |
| For equality, this is also true with wrapping overflow. */ |
| (for op (simple_comparison) |
| (simplify |
| (op:c @0 (minus@2 @0 @1)) |
| (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) |
| || ((op == EQ_EXPR || op == NE_EXPR) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) |
| && (CONSTANT_CLASS_P (@1) || single_use (@2))) |
| (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) |
| |
| /* Transform: |
| (X / Y) == 0 -> X < Y if X, Y are unsigned. |
| (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ |
| (for cmp (eq ne) |
| ocmp (lt ge) |
| (simplify |
| (cmp (trunc_div @0 @1) integer_zerop) |
| (if (TYPE_UNSIGNED (TREE_TYPE (@0)) |
| /* Complex ==/!= is allowed, but not </>=. */ |
| && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE |
| && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) |
| (ocmp @0 @1)))) |
| |
| /* X == C - X can never be true if C is odd. */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) |
| (if (TREE_INT_CST_LOW (@1) & 1) |
| { constant_boolean_node (cmp == NE_EXPR, type); }))) |
| |
| /* Arguments on which one can call get_nonzero_bits to get the bits |
| possibly set. */ |
| (match with_possible_nonzero_bits |
| INTEGER_CST@0) |
| (match with_possible_nonzero_bits |
| SSA_NAME@0 |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) |
| /* Slightly extended version, do not make it recursive to keep it cheap. */ |
| (match (with_possible_nonzero_bits2 @0) |
| with_possible_nonzero_bits@0) |
| (match (with_possible_nonzero_bits2 @0) |
| (bit_and:c with_possible_nonzero_bits@0 @2)) |
| |
| /* Same for bits that are known to be set, but we do not have |
| an equivalent to get_nonzero_bits yet. */ |
| (match (with_certain_nonzero_bits2 @0) |
| INTEGER_CST@0) |
| (match (with_certain_nonzero_bits2 @0) |
| (bit_ior @1 INTEGER_CST@0)) |
| |
| /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ |
| (for cmp (eq ne) |
| (simplify |
| (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) |
| (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) |
| { constant_boolean_node (cmp == NE_EXPR, type); }))) |
| |
| /* ((X inner_op C0) outer_op C1) |
| With X being a tree where value_range has reasoned certain bits to always be |
| zero throughout its computed value range, |
| inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op |
| where zero_mask has 1's for all bits that are sure to be 0 in |
| and 0's otherwise. |
| if (inner_op == '^') C0 &= ~C1; |
| if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) |
| if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) |
| */ |
| (for inner_op (bit_ior bit_xor) |
| outer_op (bit_xor bit_ior) |
| (simplify |
| (outer_op |
| (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) |
| (with |
| { |
| bool fail = false; |
| wide_int zero_mask_not; |
| wide_int C0; |
| wide_int cst_emit; |
| |
| if (TREE_CODE (@2) == SSA_NAME) |
| zero_mask_not = get_nonzero_bits (@2); |
| else |
| fail = true; |
| |
| if (inner_op == BIT_XOR_EXPR) |
| { |
| C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); |
| cst_emit = C0 | wi::to_wide (@1); |
| } |
| else |
| { |
| C0 = wi::to_wide (@0); |
| cst_emit = C0 ^ wi::to_wide (@1); |
| } |
| } |
| (if (!fail && (C0 & zero_mask_not) == 0) |
| (outer_op @2 { wide_int_to_tree (type, cst_emit); }) |
| (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) |
| (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) |
| |
| /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ |
| (simplify |
| (pointer_plus (pointer_plus:s @0 @1) @3) |
| (pointer_plus @0 (plus @1 @3))) |
| #if GENERIC |
| (simplify |
| (pointer_plus (convert:s (pointer_plus:s @0 @1)) @3) |
| (convert:type (pointer_plus @0 (plus @1 @3)))) |
| #endif |
| |
| /* Pattern match |
| tem1 = (long) ptr1; |
| tem2 = (long) ptr2; |
| tem3 = tem2 - tem1; |
| tem4 = (unsigned long) tem3; |
| tem5 = ptr1 + tem4; |
| and produce |
| tem5 = ptr2; */ |
| (simplify |
| (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) |
| /* Conditionally look through a sign-changing conversion. */ |
| (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) |
| && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) |
| || (GENERIC && type == TREE_TYPE (@1)))) |
| @1)) |
| (simplify |
| (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) |
| (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) |
| (convert @1))) |
| |
| /* Pattern match |
| tem = (sizetype) ptr; |
| tem = tem & algn; |
| tem = -tem; |
| ... = ptr p+ tem; |
| and produce the simpler and easier to analyze with respect to alignment |
| ... = ptr & ~algn; */ |
| (simplify |
| (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) |
| (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } |
| (bit_and @0 { algn; }))) |
| |
| /* Try folding difference of addresses. */ |
| (simplify |
| (minus (convert ADDR_EXPR@0) (convert @1)) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (with { poly_int64 diff; } |
| (if (ptr_difference_const (@0, @1, &diff)) |
| { build_int_cst_type (type, diff); })))) |
| (simplify |
| (minus (convert @0) (convert ADDR_EXPR@1)) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (with { poly_int64 diff; } |
| (if (ptr_difference_const (@0, @1, &diff)) |
| { build_int_cst_type (type, diff); })))) |
| (simplify |
| (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) |
| (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) |
| && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) |
| (with { poly_int64 diff; } |
| (if (ptr_difference_const (@0, @1, &diff)) |
| { build_int_cst_type (type, diff); })))) |
| (simplify |
| (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) |
| (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) |
| && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) |
| (with { poly_int64 diff; } |
| (if (ptr_difference_const (@0, @1, &diff)) |
| { build_int_cst_type (type, diff); })))) |
| |
| /* (&a+b) - (&a[1] + c) -> sizeof(a[0]) + (b - c) */ |
| (simplify |
| (pointer_diff (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3)) |
| (with { poly_int64 diff; } |
| (if (ptr_difference_const (@0, @2, &diff)) |
| (plus { build_int_cst_type (type, diff); } (convert (minus @1 @3)))))) |
| |
| /* (&a+b) !=/== (&a[1] + c) -> sizeof(a[0]) + b !=/== c */ |
| (for neeq (ne eq) |
| (simplify |
| (neeq (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3)) |
| (with { poly_int64 diff; tree inner_type = TREE_TYPE (@1);} |
| (if (ptr_difference_const (@0, @2, &diff)) |
| (neeq (plus { build_int_cst_type (inner_type, diff); } @1) @3))))) |
| |
| /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */ |
| (simplify |
| (convert (pointer_diff @0 INTEGER_CST@1)) |
| (if (POINTER_TYPE_P (type)) |
| { build_fold_addr_expr_with_type |
| (build2 (MEM_REF, char_type_node, @0, |
| wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))), |
| type); })) |
| |
| /* If arg0 is derived from the address of an object or function, we may |
| be able to fold this expression using the object or function's |
| alignment. */ |
| (simplify |
| (bit_and (convert? @0) INTEGER_CST@1) |
| (if (POINTER_TYPE_P (TREE_TYPE (@0)) |
| && tree_nop_conversion_p (type, TREE_TYPE (@0))) |
| (with |
| { |
| unsigned int align; |
| unsigned HOST_WIDE_INT bitpos; |
| get_pointer_alignment_1 (@0, &align, &bitpos); |
| } |
| (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) |
| { wide_int_to_tree (type, (wi::to_wide (@1) |
| & (bitpos / BITS_PER_UNIT))); })))) |
| |
| (match min_value |
| INTEGER_CST |
| (if (INTEGRAL_TYPE_P (type) |
| && wi::eq_p (wi::to_wide (t), wi::min_value (type))))) |
| |
| (match max_value |
| INTEGER_CST |
| (if (INTEGRAL_TYPE_P (type) |
| && wi::eq_p (wi::to_wide (t), wi::max_value (type))))) |
| |
| /* x > y && x != XXX_MIN --> x > y |
| x > y && x == XXX_MIN --> false . */ |
| (for eqne (eq ne) |
| (simplify |
| (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value)) |
| (switch |
| (if (eqne == EQ_EXPR) |
| { constant_boolean_node (false, type); }) |
| (if (eqne == NE_EXPR) |
| @2) |
| ))) |
| |
| /* x < y && x != XXX_MAX --> x < y |
| x < y && x == XXX_MAX --> false. */ |
| (for eqne (eq ne) |
| (simplify |
| (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value)) |
| (switch |
| (if (eqne == EQ_EXPR) |
| { constant_boolean_node (false, type); }) |
| (if (eqne == NE_EXPR) |
| @2) |
| ))) |
| |
| /* x <= y && x == XXX_MIN --> x == XXX_MIN. */ |
| (simplify |
| (bit_and:c (le:c @0 @1) (eq@2 @0 min_value)) |
| @2) |
| |
| /* x >= y && x == XXX_MAX --> x == XXX_MAX. */ |
| (simplify |
| (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value)) |
| @2) |
| |
| /* x > y || x != XXX_MIN --> x != XXX_MIN. */ |
| (simplify |
| (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value)) |
| @2) |
| |
| /* x <= y || x != XXX_MIN --> true. */ |
| (simplify |
| (bit_ior:c (le:c @0 @1) (ne @0 min_value)) |
| { constant_boolean_node (true, type); }) |
| |
| /* x <= y || x == XXX_MIN --> x <= y. */ |
| (simplify |
| (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value)) |
| @2) |
| |
| /* x < y || x != XXX_MAX --> x != XXX_MAX. */ |
| (simplify |
| (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value)) |
| @2) |
| |
| /* x >= y || x != XXX_MAX --> true |
| x >= y || x == XXX_MAX --> x >= y. */ |
| (for eqne (eq ne) |
| (simplify |
| (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value)) |
| (switch |
| (if (eqne == EQ_EXPR) |
| @2) |
| (if (eqne == NE_EXPR) |
| { constant_boolean_node (true, type); })))) |
| |
| /* y == XXX_MIN || x < y --> x <= y - 1 */ |
| (simplify |
| (bit_ior:c (eq:s @1 min_value) (lt:s @0 @1)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) |
| (le @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); })))) |
| |
| /* y != XXX_MIN && x >= y --> x > y - 1 */ |
| (simplify |
| (bit_and:c (ne:s @1 min_value) (ge:s @0 @1)) |
| (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) |
| (gt @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); })))) |
| |
| /* Convert (X == CST1) && (X OP2 CST2) to a known value |
| based on CST1 OP2 CST2. Similarly for (X != CST1). */ |
| |
| (for code1 (eq ne) |
| (for code2 (eq ne lt gt le ge) |
| (simplify |
| (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) |
| (with |
| { |
| int cmp = tree_int_cst_compare (@1, @2); |
| bool val; |
| switch (code2) |
| { |
| case EQ_EXPR: val = (cmp == 0); break; |
| case NE_EXPR: val = (cmp != 0); break; |
| case LT_EXPR: val = (cmp < 0); break; |
| case GT_EXPR: val = (cmp > 0); break; |
| case LE_EXPR: val = (cmp <= 0); break; |
| case GE_EXPR: val = (cmp >= 0); break; |
| default: gcc_unreachable (); |
| } |
| } |
| (switch |
| (if (code1 == EQ_EXPR && val) @3) |
| (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); }) |
| (if (code1 == NE_EXPR && !val) @4)))))) |
| |
| /* Convert (X OP1 CST1) && (X OP2 CST2). */ |
| |
| (for code1 (lt le gt ge) |
| (for code2 (lt le gt ge) |
| (simplify |
| (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2)) |
| (with |
| { |
| int cmp = tree_int_cst_compare (@1, @2); |
| } |
| (switch |
| /* Choose the more restrictive of two < or <= comparisons. */ |
| (if ((code1 == LT_EXPR || code1 == LE_EXPR) |
| && (code2 == LT_EXPR || code2 == LE_EXPR)) |
| (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) |
| @3 |
| @4)) |
| /* Likewise chose the more restrictive of two > or >= comparisons. */ |
| (if ((code1 == GT_EXPR || code1 == GE_EXPR) |
| && (code2 == GT_EXPR || code2 == GE_EXPR)) |
| (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) |
| @3 |
| @4)) |
| /* Check for singleton ranges. */ |
| (if (cmp == 0 |
| && ((code1 == LE_EXPR && code2 == GE_EXPR) |
| || (code1 == GE_EXPR && code2 == LE_EXPR))) |
| (eq @0 @1)) |
| /* Check for disjoint ranges. */ |
| (if (cmp <= 0 |
| && (code1 == LT_EXPR || code1 == LE_EXPR) |
| && (code2 == GT_EXPR || code2 == GE_EXPR)) |
| { constant_boolean_node (false, type); }) |
| (if (cmp >= 0 |
| && (code1 == GT_EXPR || code1 == GE_EXPR) |
| && (code2 == LT_EXPR || code2 == LE_EXPR)) |
| { constant_boolean_node (false, type); }) |
| ))))) |
| |
| /* Convert (X == CST1) || (X OP2 CST2) to a known value |
| based on CST1 OP2 CST2. Similarly for (X != CST1). */ |
| |
| (for code1 (eq ne) |
| (for code2 (eq ne lt gt le ge) |
| (simplify |
| (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) |
| (with |
| { |
| int cmp = tree_int_cst_compare (@1, @2); |
| bool val; |
| switch (code2) |
| { |
| case EQ_EXPR: val = (cmp == 0); break; |
| case NE_EXPR: val = (cmp != 0); break; |
| case LT_EXPR: val = (cmp < 0); break; |
| case GT_EXPR: val = (cmp > 0); break; |
| case LE_EXPR: val = (cmp <= 0); break; |
| case GE_EXPR: val = (cmp >= 0); break; |
| default: gcc_unreachable (); |
| } |
| } |
| (switch |
| (if (code1 == EQ_EXPR && val) @4) |
| (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); }) |
| (if (code1 == NE_EXPR && !val) @3)))))) |
| |
| /* Convert (X OP1 CST1) || (X OP2 CST2). */ |
| |
| (for code1 (lt le gt ge) |
| (for code2 (lt le gt ge) |
| (simplify |
| (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) |
| (with |
| { |
| int cmp = tree_int_cst_compare (@1, @2); |
| } |
| (switch |
| /* Choose the more restrictive of two < or <= comparisons. */ |
| (if ((code1 == LT_EXPR || code1 == LE_EXPR) |
| && (code2 == LT_EXPR || code2 == LE_EXPR)) |
| (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) |
| @4 |
| @3)) |
| /* Likewise chose the more restrictive of two > or >= comparisons. */ |
| (if ((code1 == GT_EXPR || code1 == GE_EXPR) |
| && (code2 == GT_EXPR || code2 == GE_EXPR)) |
| (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) |
| @4 |
| @3)) |
| /* Check for singleton ranges. */ |
| (if (cmp == 0 |
| && ((code1 == LT_EXPR && code2 == GT_EXPR) |
| || (code1 == GT_EXPR && code2 == LT_EXPR))) |
| (ne @0 @2)) |
| /* Check for disjoint ranges. */ |
| (if (cmp >= 0 |
| && (code1 == LT_EXPR || code1 == LE_EXPR) |
| && (code2 == GT_EXPR || code2 == GE_EXPR)) |
| { constant_boolean_node (true, type); }) |
| (if (cmp <= 0 |
| && (code1 == GT_EXPR || code1 == GE_EXPR) |
| && (code2 == LT_EXPR || code2 == LE_EXPR)) |
| { constant_boolean_node (true, type); }) |
| ))))) |
| |
| /* We can't reassociate at all for saturating types. */ |
| (if (!TYPE_SATURATING (type)) |
| |
| /* Contract negates. */ |
| /* A + (-B) -> A - B */ |
| (simplify |
| (plus:c @0 (convert? (negate @1))) |
| /* Apply STRIP_NOPS on the negate. */ |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| && !TYPE_OVERFLOW_SANITIZED (type)) |
| (with |
| { |
| tree t1 = type; |
| if (INTEGRAL_TYPE_P (type) |
| && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) |
| t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); |
| } |
| (convert (minus (convert:t1 @0) (convert:t1 @1)))))) |
| /* A - (-B) -> A + B */ |
| (simplify |
| (minus @0 (convert? (negate @1))) |
| (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) |
| && !TYPE_OVERFLOW_SANITIZED (type)) |
| (with |
| { |
| tree t1 = type; |
| if (INTEGRAL_TYPE_P (type) |
| && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) |
| t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); |
| } |
| (convert (plus (convert:t1 @0) (convert:t1 @1)))))) |
| /* -(T)(-A) -> (T)A |
| Sign-extension is ok except for INT_MIN, which thankfully cannot |
| happen without overflow. */ |
| (simplify |
| (negate (convert (negate @1))) |
| (if (INTEGRAL_TYPE_P (type) |
| && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) |
| || (!TYPE_UNSIGNED (TREE_TYPE (@1)) |
| && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) |
| && !TYPE_OVERFLOW_SANITIZED (type) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) |
| (convert @1))) |
| (simplify |
| (negate (convert negate_expr_p@1)) |
| (if (SCALAR_FLOAT_TYPE_P (type) |
| && ((DECIMAL_FLOAT_TYPE_P (type) |
| == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) |
| && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) |
| || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) |
| (convert (negate @1)))) |
| (simplify |
| (negate (nop_convert? (negate @1))) |
| (if (!TYPE_OVERFLOW_SANITIZED (type) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) |
| (view_convert @1))) |
| |
| /* We can't reassociate floating-point unless -fassociative-math |
| or fixed-point plus or minus because of saturation to +-Inf. */ |
| (if ((!FLOAT_TYPE_P (type) || flag_associative_math) |
| && !FIXED_POINT_TYPE_P (type)) |
| |
| /* Match patterns that allow contracting a plus-minus pair |
| irrespective of overflow issues. */ |
| /* (A +- B) - A -> +- B */ |
| /* (A +- B) -+ B -> A */ |
| /* A - (A +- B) -> -+ B */ |
| /* A +- (B -+ A) -> +- B */ |
| (simplify |
| (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0) |
| (view_convert @1)) |
| (simplify |
| (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0) |
| (if (!ANY_INTEGRAL_TYPE_P (type) |
| || TYPE_OVERFLOW_WRAPS (type)) |
| (negate (view_convert @1)) |
| (view_convert (negate @1)))) |
| (simplify |
| (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1) |
| (view_convert @0)) |
| (simplify |
| (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1))) |
| (if (!ANY_INTEGRAL_TYPE_P (type) |
| || TYPE_OVERFLOW_WRAPS (type)) |
| (negate (view_convert @1)) |
| (view_convert (negate @1)))) |
| (simplify |
| (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1))) |
| (view_convert @1)) |
| /* (A +- B) + (C - A) -> C +- B */ |
| /* (A + B) - (A - C) -> B + C */ |
| /* More cases are handled with comparisons. */ |
| (simplify |
| (plus:c (plus:c @0 @1) (minus @2 @0)) |
| (plus @2 @1)) |
| (simplify |
| (plus:c (minus @0 @1) (minus @2 @0)) |
| (minus @2 @1)) |
| (simplify |
| (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) |
| (if (TYPE_OVERFLOW_UNDEFINED (type) |
| && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) |
| (pointer_diff @2 @1))) |
| (simplify |
| (minus (plus:c @0 @1) (minus @0 @2)) |
| (plus @1 @2)) |
| |
| /* (A +- CST1) +- CST2 -> A + CST3 |
| Use view_convert because it is safe for vectors and equivalent for |
| scalars. */ |
| (for outer_op (plus minus) |
| (for inner_op (plus minus) |
| neg_inner_op (minus plus) |
| (simplify |
| (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1)) |
| CONSTANT_CLASS_P@2) |
| /* If one of the types wraps, use that one. */ |
| (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) |
| /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse |
| forever if something doesn't simplify into a constant. */ |
| (if (!CONSTANT_CLASS_P (@0)) |
| (if (outer_op == PLUS_EXPR) |
| (plus (view_convert @0) (inner_op @2 (view_convert @1))) |
| (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) |
| (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) |
| || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) |
| (if (outer_op == PLUS_EXPR) |
| (view_convert (plus @0 (inner_op (view_convert @2) @1))) |
| (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) |
| /* If the constant operation overflows we cannot do the transform |
| directly as we would introduce undefined overflow, for example |
| with (a - 1) + INT_MIN. */ |
| (if (types_match (type, @0)) |
| (with { tree cst = const_binop (outer_op == inner_op |
| ? PLUS_EXPR : MINUS_EXPR, |
| type, @1, @2); } |
| (if (cst && !TREE_OVERFLOW (cst)) |
| (inner_op @0 { cst; } ) |
| /* X+INT_MAX+1 is X-INT_MIN. */ |
| (if (INTEGRAL_TYPE_P (type) && cst |
| && wi::to_wide (cst) == wi::min_value (type)) |
| (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) |
| /* Last resort, use some unsigned type. */ |
| (with { tree utype = unsigned_type_for (type); } |
| (if (utype) |
| (view_convert (inner_op |
| |