blob: 9db48bda047b6af8278894f38e521a1b757baa5f [file] [log] [blame]
<
/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
This file is consumed by genmatch which produces gimple-match.c
and generic-match.c from it.
Copyright (C) 2014-2017 Free Software Foundation, Inc.
Contributed by Richard Biener <rguenther@suse.de>
and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Generic tree predicates we inherit. */
(define_predicates
integer_onep integer_zerop integer_all_onesp integer_minus_onep
integer_each_onep integer_truep integer_nonzerop
real_zerop real_onep real_minus_onep
zerop
CONSTANT_CLASS_P
tree_expr_nonnegative_p
tree_expr_nonzero_p
integer_valued_real_p
integer_pow2p
HONOR_NANS)
/* Operator lists. */
(define_operator_list tcc_comparison
lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
(define_operator_list inverted_tcc_comparison
ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
(define_operator_list inverted_tcc_comparison_with_nans
unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
(define_operator_list swapped_tcc_comparison
gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
(define_operator_list simple_comparison lt le eq ne ge gt)
(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
#include "cfn-operators.pd"
/* Define operand lists for math rounding functions {,i,l,ll}FN,
where the versions prefixed with "i" return an int, those prefixed with
"l" return a long and those prefixed with "ll" return a long long.
Also define operand lists:
X<FN>F for all float functions, in the order i, l, ll
X<FN> for all double functions, in the same order
X<FN>L for all long double functions, in the same order. */
#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
(define_operator_list X##FN##F BUILT_IN_I##FN##F \
BUILT_IN_L##FN##F \
BUILT_IN_LL##FN##F) \
(define_operator_list X##FN BUILT_IN_I##FN \
BUILT_IN_L##FN \
BUILT_IN_LL##FN) \
(define_operator_list X##FN##L BUILT_IN_I##FN##L \
BUILT_IN_L##FN##L \
BUILT_IN_LL##FN##L)
DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* As opposed to convert?, this still creates a single pattern, so
it is not a suitable replacement for convert? in all cases. */
(match (nop_convert @0)
(convert @0)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
(match (nop_convert @0)
(view_convert @0)
(if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
&& TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
&& tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
/* This one has to be last, or it shadows the others. */
(match (nop_convert @0)
@0)
/* Simplifications of operations with one constant operand and
simplifications to constants or single values. */
(for op (plus pointer_plus minus bit_ior bit_xor)
(simplify
(op @0 integer_zerop)
(non_lvalue @0)))
/* 0 +p index -> (type)index */
(simplify
(pointer_plus integer_zerop @1)
(non_lvalue (convert @1)))
/* ptr - 0 -> (type)ptr */
(simplify
(pointer_diff @0 integer_zerop)
(convert @0))
/* See if ARG1 is zero and X + ARG1 reduces to X.
Likewise if the operands are reversed. */
(simplify
(plus:c @0 real_zerop@1)
(if (fold_real_zero_addition_p (type, @1, 0))
(non_lvalue @0)))
/* See if ARG1 is zero and X - ARG1 reduces to X. */
(simplify
(minus @0 real_zerop@1)
(if (fold_real_zero_addition_p (type, @1, 1))
(non_lvalue @0)))
/* Simplify x - x.
This is unsafe for certain floats even in non-IEEE formats.
In IEEE, it is unsafe because it does wrong for NaNs.
Also note that operand_equal_p is always false if an operand
is volatile. */
(simplify
(minus @0 @0)
(if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
{ build_zero_cst (type); }))
(simplify
(pointer_diff @@0 @0)
{ build_zero_cst (type); })
(simplify
(mult @0 integer_zerop@1)
@1)
/* Maybe fold x * 0 to 0. The expressions aren't the same
when x is NaN, since x * 0 is also NaN. Nor are they the
same in modes with signed zeros, since multiplying a
negative value by 0 gives -0, not +0. */
(simplify
(mult @0 real_zerop@1)
(if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
@1))
/* In IEEE floating point, x*1 is not equivalent to x for snans.
Likewise for complex arithmetic with signed zeros. */
(simplify
(mult @0 real_onep)
(if (!HONOR_SNANS (type)
&& (!HONOR_SIGNED_ZEROS (type)
|| !COMPLEX_FLOAT_TYPE_P (type)))
(non_lvalue @0)))
/* Transform x * -1.0 into -x. */
(simplify
(mult @0 real_minus_onep)
(if (!HONOR_SNANS (type)
&& (!HONOR_SIGNED_ZEROS (type)
|| !COMPLEX_FLOAT_TYPE_P (type)))
(negate @0)))
(for cmp (gt ge lt le)
outp (convert convert negate negate)
outn (negate negate convert convert)
/* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
/* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
/* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
/* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
(simplify
(cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
(if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
&& types_match (type, TREE_TYPE (@0)))
(switch
(if (types_match (type, float_type_node))
(BUILT_IN_COPYSIGNF @1 (outp @0)))
(if (types_match (type, double_type_node))
(BUILT_IN_COPYSIGN @1 (outp @0)))
(if (types_match (type, long_double_type_node))
(BUILT_IN_COPYSIGNL @1 (outp @0))))))
/* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
/* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
/* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
/* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
(simplify
(cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
(if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
&& types_match (type, TREE_TYPE (@0)))
(switch
(if (types_match (type, float_type_node))
(BUILT_IN_COPYSIGNF @1 (outn @0)))
(if (types_match (type, double_type_node))
(BUILT_IN_COPYSIGN @1 (outn @0)))
(if (types_match (type, long_double_type_node))
(BUILT_IN_COPYSIGNL @1 (outn @0)))))))
/* Transform X * copysign (1.0, X) into abs(X). */
(simplify
(mult:c @0 (COPYSIGN real_onep @0))
(if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
(abs @0)))
/* Transform X * copysign (1.0, -X) into -abs(X). */
(simplify
(mult:c @0 (COPYSIGN real_onep (negate @0)))
(if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
(negate (abs @0))))
/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
(simplify
(COPYSIGN REAL_CST@0 @1)
(if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
(COPYSIGN (negate @0) @1)))
/* X * 1, X / 1 -> X. */
(for op (mult trunc_div ceil_div floor_div round_div exact_div)
(simplify
(op @0 integer_onep)
(non_lvalue @0)))
/* (A / (1 << B)) -> (A >> B).
Only for unsigned A. For signed A, this would not preserve rounding
toward zero.
For example: (-1 / ( 1 << B)) != -1 >> B. */
(simplify
(trunc_div @0 (lshift integer_onep@1 @2))
(if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
&& (!VECTOR_TYPE_P (type)
|| target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
|| target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
(rshift @0 @2)))
/* Preserve explicit divisions by 0: the C++ front-end wants to detect
undefined behavior in constexpr evaluation, and assuming that the division
traps enables better optimizations than these anyway. */
(for div (trunc_div ceil_div floor_div round_div exact_div)
/* 0 / X is always zero. */
(simplify
(div integer_zerop@0 @1)
/* But not for 0 / 0 so that we can get the proper warnings and errors. */
(if (!integer_zerop (@1))
@0))
/* X / -1 is -X. */
(simplify
(div @0 integer_minus_onep@1)
(if (!TYPE_UNSIGNED (type))
(negate @0)))
/* X / X is one. */
(simplify
(div @0 @0)
/* But not for 0 / 0 so that we can get the proper warnings and errors.
And not for _Fract types where we can't build 1. */
(if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
{ build_one_cst (type); }))
/* X / abs (X) is X < 0 ? -1 : 1. */
(simplify
(div:C @0 (abs @0))
(if (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_UNDEFINED (type))
(cond (lt @0 { build_zero_cst (type); })
{ build_minus_one_cst (type); } { build_one_cst (type); })))
/* X / -X is -1. */
(simplify
(div:C @0 (negate @0))
(if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
&& TYPE_OVERFLOW_UNDEFINED (type))
{ build_minus_one_cst (type); })))
/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
(simplify
(floor_div @0 @1)
(if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
&& TYPE_UNSIGNED (type))
(trunc_div @0 @1)))
/* Combine two successive divisions. Note that combining ceil_div
and floor_div is trickier and combining round_div even more so. */
(for div (trunc_div exact_div)
(simplify
(div (div @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (type), &overflow_p);
}
(if (!overflow_p)
(div @0 { wide_int_to_tree (type, mul); })
(if (TYPE_UNSIGNED (type)
|| mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
{ build_zero_cst (type); })))))
/* Combine successive multiplications. Similar to above, but handling
overflow is different. */
(simplify
(mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
bool overflow_p;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (type), &overflow_p);
}
/* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
otherwise undefined overflow implies that @0 must be zero. */
(if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
(mult @0 { wide_int_to_tree (type, mul); }))))
/* Optimize A / A to 1.0 if we don't care about
NaNs or Infinities. */
(simplify
(rdiv @0 @0)
(if (FLOAT_TYPE_P (type)
&& ! HONOR_NANS (type)
&& ! HONOR_INFINITIES (type))
{ build_one_cst (type); }))
/* Optimize -A / A to -1.0 if we don't care about
NaNs or Infinities. */
(simplify
(rdiv:C @0 (negate @0))
(if (FLOAT_TYPE_P (type)
&& ! HONOR_NANS (type)
&& ! HONOR_INFINITIES (type))
{ build_minus_one_cst (type); }))
/* PR71078: x / abs(x) -> copysign (1.0, x) */
(simplify
(rdiv:C (convert? @0) (convert? (abs @0)))
(if (SCALAR_FLOAT_TYPE_P (type)
&& ! HONOR_NANS (type)
&& ! HONOR_INFINITIES (type))
(switch
(if (types_match (type, float_type_node))
(BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
(if (types_match (type, double_type_node))
(BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
(if (types_match (type, long_double_type_node))
(BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
/* In IEEE floating point, x/1 is not equivalent to x for snans. */
(simplify
(rdiv @0 real_onep)
(if (!HONOR_SNANS (type))
(non_lvalue @0)))
/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
(simplify
(rdiv @0 real_minus_onep)
(if (!HONOR_SNANS (type))
(negate @0)))
(if (flag_reciprocal_math)
/* Convert (A/B)/C to A/(B*C). */
(simplify
(rdiv (rdiv:s @0 @1) @2)
(rdiv @0 (mult @1 @2)))
/* Canonicalize x / (C1 * y) to (x * C2) / y. */
(simplify
(rdiv @0 (mult:s @1 REAL_CST@2))
(with
{ tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
(if (tem)
(rdiv (mult @0 { tem; } ) @1))))
/* Convert A/(B/C) to (A/B)*C */
(simplify
(rdiv @0 (rdiv:s @1 @2))
(mult (rdiv @0 @1) @2)))
/* Simplify x / (- y) to -x / y. */
(simplify
(rdiv @0 (negate @1))
(rdiv (negate @0) @1))
/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
(for div (trunc_div ceil_div floor_div round_div exact_div)
(simplify
(div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
(if (integer_pow2p (@2)
&& tree_int_cst_sgn (@2) > 0
&& tree_nop_conversion_p (type, TREE_TYPE (@0))
&& wi::to_wide (@2) + wi::to_wide (@1) == 0)
(rshift (convert @0)
{ build_int_cst (integer_type_node,
wi::exact_log2 (wi::to_wide (@2))); }))))
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
so only do this if -freciprocal-math. We can actually
always safely do it if ARG1 is a power of two, but it's hard to
tell if it is or not in a portable manner. */
(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
(simplify
(rdiv @0 cst@1)
(if (optimize)
(if (flag_reciprocal_math
&& !real_zerop (@1))
(with
{ tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
(if (tem)
(mult @0 { tem; } )))
(if (cst != COMPLEX_CST)
(with { tree inverse = exact_inverse (type, @1); }
(if (inverse)
(mult @0 { inverse; } ))))))))
(for mod (ceil_mod floor_mod round_mod trunc_mod)
/* 0 % X is always zero. */
(simplify
(mod integer_zerop@0 @1)
/* But not for 0 % 0 so that we can get the proper warnings and errors. */
(if (!integer_zerop (@1))
@0))
/* X % 1 is always zero. */
(simplify
(mod @0 integer_onep)
{ build_zero_cst (type); })
/* X % -1 is zero. */
(simplify
(mod @0 integer_minus_onep@1)
(if (!TYPE_UNSIGNED (type))
{ build_zero_cst (type); }))
/* X % X is zero. */
(simplify
(mod @0 @0)
/* But not for 0 % 0 so that we can get the proper warnings and errors. */
(if (!integer_zerop (@0))
{ build_zero_cst (type); }))
/* (X % Y) % Y is just X % Y. */
(simplify
(mod (mod@2 @0 @1) @1)
@2)
/* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
(simplify
(mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(if (ANY_INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_UNDEFINED (type)
&& wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (type)))
{ build_zero_cst (type); })))
/* X % -C is the same as X % C. */
(simplify
(trunc_mod @0 INTEGER_CST@1)
(if (TYPE_SIGN (type) == SIGNED
&& !TREE_OVERFLOW (@1)
&& wi::neg_p (wi::to_wide (@1))
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (@1, @1))
(trunc_mod @0 (negate @1))))
/* X % -Y is the same as X % Y. */
(simplify
(trunc_mod @0 (convert? (negate @1)))
(if (INTEGRAL_TYPE_P (type)
&& !TYPE_UNSIGNED (type)
&& !TYPE_OVERFLOW_TRAPS (type)
&& tree_nop_conversion_p (type, TREE_TYPE (@1))
/* Avoid this transformation if X might be INT_MIN or
Y might be -1, because we would then change valid
INT_MIN % -(-1) into invalid INT_MIN % -1. */
&& (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
|| expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
(TREE_TYPE (@1))))))
(trunc_mod @0 (convert @1))))
/* X - (X / Y) * Y is the same as X % Y. */
(simplify
(minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
(if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
(convert (trunc_mod @0 @1))))
/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
i.e. "X % C" into "X & (C - 1)", if X and C are positive.
Also optimize A % (C << N) where C is a power of 2,
to A & ((C << N) - 1). */
(match (power_of_two_cand @1)
INTEGER_CST@1)
(match (power_of_two_cand @1)
(lshift INTEGER_CST@1 @2))
(for mod (trunc_mod floor_mod)
(simplify
(mod @0 (convert?@3 (power_of_two_cand@1 @2)))
(if ((TYPE_UNSIGNED (type)
|| tree_expr_nonnegative_p (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@3))
&& integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
(bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
(simplify
(trunc_div (mult @0 integer_pow2p@1) @1)
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
(bit_and @0 { wide_int_to_tree
(type, wi::mask (TYPE_PRECISION (type)
- wi::exact_log2 (wi::to_wide (@1)),
false, TYPE_PRECISION (type))); })))
/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
(simplify
(mult (trunc_div @0 integer_pow2p@1) @1)
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
(bit_and @0 (negate @1))))
/* Simplify (t * 2) / 2) -> t. */
(for div (trunc_div ceil_div floor_div round_div exact_div)
(simplify
(div (mult @0 @1) @1)
(if (ANY_INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_UNDEFINED (type))
@0)))
(for op (negate abs)
/* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
(for coss (COS COSH)
(simplify
(coss (op @0))
(coss @0)))
/* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
(for pows (POW)
(simplify
(pows (op @0) REAL_CST@1)
(with { HOST_WIDE_INT n; }
(if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
(pows @0 @1)))))
/* Likewise for powi. */
(for pows (POWI)
(simplify
(pows (op @0) INTEGER_CST@1)
(if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
/* Strip negate and abs from both operands of hypot. */
(for hypots (HYPOT)
(simplify
(hypots (op @0) @1)
(hypots @0 @1))
(simplify
(hypots @0 (op @1))
(hypots @0 @1)))
/* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
(for copysigns (COPYSIGN)
(simplify
(copysigns (op @0) @1)
(copysigns @0 @1))))
/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
(simplify
(mult (abs@1 @0) @1)
(mult @0 @0))
/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
(for coss (COS COSH)
copysigns (COPYSIGN)
(simplify
(coss (copysigns @0 @1))
(coss @0)))
/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
(for pows (POW)
copysigns (COPYSIGN)
(simplify
(pows (copysigns @0 @2) REAL_CST@1)
(with { HOST_WIDE_INT n; }
(if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
(pows @0 @1)))))
/* Likewise for powi. */
(for pows (POWI)
copysigns (COPYSIGN)
(simplify
(pows (copysigns @0 @2) INTEGER_CST@1)
(if ((wi::to_wide (@1) & 1) == 0)
(pows @0 @1))))
(for hypots (HYPOT)
copysigns (COPYSIGN)
/* hypot(copysign(x, y), z) -> hypot(x, z). */
(simplify
(hypots (copysigns @0 @1) @2)
(hypots @0 @2))
/* hypot(x, copysign(y, z)) -> hypot(x, y). */
(simplify
(hypots @0 (copysigns @1 @2))
(hypots @0 @1)))
/* copysign(x, CST) -> [-]abs (x). */
(for copysigns (COPYSIGN)
(simplify
(copysigns @0 REAL_CST@1)
(if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
(negate (abs @0))
(abs @0))))
/* copysign(copysign(x, y), z) -> copysign(x, z). */
(for copysigns (COPYSIGN)
(simplify
(copysigns (copysigns @0 @1) @2)
(copysigns @0 @2)))
/* copysign(x,y)*copysign(x,y) -> x*x. */
(for copysigns (COPYSIGN)
(simplify
(mult (copysigns@2 @0 @1) @2)
(mult @0 @0)))
/* ccos(-x) -> ccos(x). Similarly for ccosh. */
(for ccoss (CCOS CCOSH)
(simplify
(ccoss (negate @0))
(ccoss @0)))
/* cabs(-x) and cos(conj(x)) -> cabs(x). */
(for ops (conj negate)
(for cabss (CABS)
(simplify
(cabss (ops @0))
(cabss @0))))
/* Fold (a * (1 << b)) into (a << b) */
(simplify
(mult:c @0 (convert? (lshift integer_onep@1 @2)))
(if (! FLOAT_TYPE_P (type)
&& tree_nop_conversion_p (type, TREE_TYPE (@1)))
(lshift @0 @2)))
/* Fold (1 << (C - x)) where C = precision(type) - 1
into ((1 << C) >> x). */
(simplify
(lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
(if (INTEGRAL_TYPE_P (type)
&& wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
&& single_use (@1))
(if (TYPE_UNSIGNED (type))
(rshift (lshift @0 @2) @3)
(with
{ tree utype = unsigned_type_for (type); }
(convert (rshift (lshift (convert:utype @0) @2) @3))))))
/* Fold (C1/X)*C2 into (C1*C2)/X. */
(simplify
(mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
(if (flag_associative_math
&& single_use (@3))
(with
{ tree tem = const_binop (MULT_EXPR, type, @0, @2); }
(if (tem)
(rdiv { tem; } @1)))))
/* Simplify ~X & X as zero. */
(simplify
(bit_and:c (convert? @0) (convert? (bit_not @0)))
{ build_zero_cst (type); })
/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
(simplify
(bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
(if (TYPE_UNSIGNED (type))
(bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
(for bitop (bit_and bit_ior)
cmp (eq ne)
/* PR35691: Transform
(x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
(x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
(simplify
(bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
(cmp (bit_ior @0 (convert @1)) @2)))
/* Transform:
(x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
(x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
(simplify
(bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
(cmp (bit_and @0 (convert @1)) @2))))
/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
(simplify
(minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
(minus (bit_xor @0 @1) @1))
(simplify
(minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
(if (~wi::to_wide (@2) == wi::to_wide (@1))
(minus (bit_xor @0 @1) @1)))
/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
(simplify
(minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
(minus @1 (bit_xor @0 @1)))
/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
(for op (bit_ior bit_xor plus)
(simplify
(op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
(bit_xor @0 @1))
(simplify
(op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
(if (~wi::to_wide (@2) == wi::to_wide (@1))
(bit_xor @0 @1))))
/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
(simplify
(bit_ior:c (bit_xor:c @0 @1) @0)
(bit_ior @0 @1))
/* (a & ~b) | (a ^ b) --> a ^ b */
(simplify
(bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
@2)
/* (a & ~b) ^ ~a --> ~(a & b) */
(simplify
(bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
(bit_not (bit_and @0 @1)))
/* (a | b) & ~(a ^ b) --> a & b */
(simplify
(bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
(bit_and @0 @1))
/* a | ~(a ^ b) --> a | ~b */
(simplify
(bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
(bit_ior @0 (bit_not @1)))
/* (a | b) | (a &^ b) --> a | b */
(for op (bit_and bit_xor)
(simplify
(bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
@2))
/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
(simplify
(bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
@2)
/* ~(~a & b) --> a | ~b */
(simplify
(bit_not (bit_and:cs (bit_not @0) @1))
(bit_ior @0 (bit_not @1)))
/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
#if GIMPLE
(simplify
(bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
(bit_xor @0 @1)))
#endif
/* X % Y is smaller than Y. */
(for cmp (lt ge)
(simplify
(cmp (trunc_mod @0 @1) @1)
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
{ constant_boolean_node (cmp == LT_EXPR, type); })))
(for cmp (gt le)
(simplify
(cmp @1 (trunc_mod @0 @1))
(if (TYPE_UNSIGNED (TREE_TYPE (@0)))
{ constant_boolean_node (cmp == GT_EXPR, type); })))
/* x | ~0 -> ~0 */
(simplify
(bit_ior @0 integer_all_onesp@1)
@1)
/* x | 0 -> x */
(simplify
(bit_ior @0 integer_zerop)
@0)
/* x & 0 -> 0 */
(simplify
(bit_and @0 integer_zerop@1)
@1)
/* ~x | x -> -1 */
/* ~x ^ x -> -1 */
/* ~x + x -> -1 */
(for op (bit_ior bit_xor plus)
(simplify
(op:c (convert? @0) (convert? (bit_not @0)))
(convert { build_all_ones_cst (TREE_TYPE (@0)); })))
/* x ^ x -> 0 */
(simplify
(bit_xor @0 @0)
{ build_zero_cst (type); })
/* Canonicalize X ^ ~0 to ~X. */
(simplify
(bit_xor @0 integer_all_onesp@1)
(bit_not @0))
/* x & ~0 -> x */
(simplify
(bit_and @0 integer_all_onesp)
(non_lvalue @0))
/* x & x -> x, x | x -> x */
(for bitop (bit_and bit_ior)
(simplify
(bitop @0 @0)
(non_lvalue @0)))
/* x & C -> x if we know that x & ~C == 0. */
#if GIMPLE
(simplify
(bit_and SSA_NAME@0 INTEGER_CST@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
@0))
#endif
/* x + (x & 1) -> (x + 1) & ~1 */
(simplify
(plus:c @0 (bit_and:s @0 integer_onep@1))
(bit_and (plus @0 @1) (bit_not @1)))
/* x & ~(x & y) -> x & ~y */
/* x | ~(x | y) -> x | ~y */
(for bitop (bit_and bit_ior)
(simplify
(bitop:c @0 (bit_not (bitop:cs @0 @1)))
(bitop @0 (bit_not @1))))
/* (x | y) & ~x -> y & ~x */
/* (x & y) | ~x -> y | ~x */
(for bitop (bit_and bit_ior)
rbitop (bit_ior bit_and)
(simplify
(bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
(bitop @1 @2)))
/* (x & y) ^ (x | y) -> x ^ y */
(simplify
(bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
(bit_xor @0 @1))
/* (x ^ y) ^ (x | y) -> x & y */
(simplify
(bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
(bit_and @0 @1))
/* (x & y) + (x ^ y) -> x | y */
/* (x & y) | (x ^ y) -> x | y */
/* (x & y) ^ (x ^ y) -> x | y */
(for op (plus bit_ior bit_xor)
(simplify
(op:c (bit_and @0 @1) (bit_xor @0 @1))
(bit_ior @0 @1)))
/* (x & y) + (x | y) -> x + y */
(simplify
(plus:c (bit_and @0 @1) (bit_ior @0 @1))
(plus @0 @1))
/* (x + y) - (x | y) -> x & y */
(simplify
(minus (plus @0 @1) (bit_ior @0 @1))
(if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
&& !TYPE_SATURATING (type))
(bit_and @0 @1)))
/* (x + y) - (x & y) -> x | y */
(simplify
(minus (plus @0 @1) (bit_and @0 @1))
(if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
&& !TYPE_SATURATING (type))
(bit_ior @0 @1)))
/* (x | y) - (x ^ y) -> x & y */
(simplify
(minus (bit_ior @0 @1) (bit_xor @0 @1))
(bit_and @0 @1))
/* (x | y) - (x & y) -> x ^ y */
(simplify
(minus (bit_ior @0 @1) (bit_and @0 @1))
(bit_xor @0 @1))
/* (x | y) & ~(x & y) -> x ^ y */
(simplify
(bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
(bit_xor @0 @1))
/* (x | y) & (~x ^ y) -> x & y */
(simplify
(bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
(bit_and @0 @1))
/* ~x & ~y -> ~(x | y)
~x | ~y -> ~(x & y) */
(for op (bit_and bit_ior)
rop (bit_ior bit_and)
(simplify
(op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@0))
&& element_precision (type) <= element_precision (TREE_TYPE (@1)))
(bit_not (rop (convert @0) (convert @1))))))
/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
with a constant, and the two constants have no bits in common,
we should treat this as a BIT_IOR_EXPR since this may produce more
simplifications. */
(for op (bit_xor plus)
(simplify
(op (convert1? (bit_and@4 @0 INTEGER_CST@1))
(convert2? (bit_and@5 @2 INTEGER_CST@3)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@2))
&& (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
(bit_ior (convert @4) (convert @5)))))
/* (X | Y) ^ X -> Y & ~ X*/
(simplify
(bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(convert (bit_and @1 (bit_not @0)))))
/* Convert ~X ^ ~Y to X ^ Y. */
(simplify
(bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@0))
&& element_precision (type) <= element_precision (TREE_TYPE (@1)))
(bit_xor (convert @0) (convert @1))))
/* Convert ~X ^ C to X ^ ~C. */
(simplify
(bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(bit_xor (convert @0) (bit_not @1))))
/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
(for opo (bit_and bit_xor)
opi (bit_xor bit_and)
(simplify
(opo:c (opi:c @0 @1) @1)
(bit_and (bit_not @0) @1)))
/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
operands are another bit-wise operation with a common input. If so,
distribute the bit operations to save an operation and possibly two if
constants are involved. For example, convert
(A | B) & (A | C) into A | (B & C)
Further simplification will occur if B and C are constants. */
(for op (bit_and bit_ior bit_xor)
rop (bit_ior bit_and bit_and)
(simplify
(op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@1))
&& tree_nop_conversion_p (type, TREE_TYPE (@2)))
(rop (convert @0) (op (convert @1) (convert @2))))))
/* Some simple reassociation for bit operations, also handled in reassoc. */
/* (X & Y) & Y -> X & Y
(X | Y) | Y -> X | Y */
(for op (bit_and bit_ior)
(simplify
(op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
@2))
/* (X ^ Y) ^ Y -> X */
(simplify
(bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
(convert @0))
/* (X & Y) & (X & Z) -> (X & Y) & Z
(X | Y) | (X | Z) -> (X | Y) | Z */
(for op (bit_and bit_ior)
(simplify
(op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@1))
&& tree_nop_conversion_p (type, TREE_TYPE (@2)))
(if (single_use (@5) && single_use (@6))
(op @3 (convert @2))
(if (single_use (@3) && single_use (@4))
(op (convert @1) @5))))))
/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
(simplify
(bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@1))
&& tree_nop_conversion_p (type, TREE_TYPE (@2)))
(bit_xor (convert @1) (convert @2))))
(simplify
(abs (abs@1 @0))
@1)
(simplify
(abs (negate @0))
(abs @0))
(simplify
(abs tree_expr_nonnegative_p@0)
@0)
/* A few cases of fold-const.c negate_expr_p predicate. */
(match negate_expr_p
INTEGER_CST
(if ((INTEGRAL_TYPE_P (type)
&& TYPE_UNSIGNED (type))
|| (!TYPE_OVERFLOW_SANITIZED (type)
&& may_negate_without_overflow_p (t)))))
(match negate_expr_p
FIXED_CST)
(match negate_expr_p
(negate @0)
(if (!TYPE_OVERFLOW_SANITIZED (type))))
(match negate_expr_p
REAL_CST
(if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
ways. */
(match negate_expr_p
VECTOR_CST
(if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
(match negate_expr_p
(minus @0 @1)
(if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
|| (FLOAT_TYPE_P (type)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (type)
&& !HONOR_SIGNED_ZEROS (type)))))
/* (-A) * (-B) -> A * B */
(simplify
(mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@1)))
(mult (convert @0) (convert (negate @1)))))
/* -(A + B) -> (-B) - A. */
(simplify
(negate (plus:c @0 negate_expr_p@1))
(if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
&& !HONOR_SIGNED_ZEROS (element_mode (type)))
(minus (negate @1) @0)))
/* -(A - B) -> B - A. */
(simplify
(negate (minus @0 @1))
(if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
|| (FLOAT_TYPE_P (type)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (type)
&& !HONOR_SIGNED_ZEROS (type)))
(minus @1 @0)))
(simplify
(negate (pointer_diff @0 @1))
(if (TYPE_OVERFLOW_UNDEFINED (type))
(pointer_diff @1 @0)))
/* A - B -> A + (-B) if B is easily negatable. */
(simplify
(minus @0 negate_expr_p@1)
(if (!FIXED_POINT_TYPE_P (type))
(plus @0 (negate @1))))
/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
when profitable.
For bitwise binary operations apply operand conversions to the
binary operation result instead of to the operands. This allows
to combine successive conversions and bitwise binary operations.
We combine the above two cases by using a conditional convert. */
(for bitop (bit_and bit_ior bit_xor)
(simplify
(bitop (convert @0) (convert? @1))
(if (((TREE_CODE (@1) == INTEGER_CST
&& INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& int_fits_type_p (@1, TREE_TYPE (@0)))
|| types_match (@0, @1))
/* ??? This transform conflicts with fold-const.c doing
Convert (T)(x & c) into (T)x & (T)c, if c is an integer
constants (if x has signed type, the sign bit cannot be set
in c). This folds extension into the BIT_AND_EXPR.
Restrict it to GIMPLE to avoid endless recursions. */
&& (bitop != BIT_AND_EXPR || GIMPLE)
&& (/* That's a good idea if the conversion widens the operand, thus
after hoisting the conversion the operation will be narrower. */
TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
/* It's also a good idea if the conversion is to a non-integer
mode. */
|| GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
/* Or if the precision of TO is not the same as the precision
of its mode. */
|| !type_has_mode_precision_p (type)))
(convert (bitop @0 (convert @1))))))
(for bitop (bit_and bit_ior)
rbitop (bit_ior bit_and)
/* (x | y) & x -> x */
/* (x & y) | x -> x */
(simplify
(bitop:c (rbitop:c @0 @1) @0)
@0)
/* (~x | y) & x -> x & y */
/* (~x & y) | x -> x | y */
(simplify
(bitop:c (rbitop:c (bit_not @0) @1) @0)
(bitop @0 @1)))
/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
(simplify
(bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
(bit_ior (bit_and @0 @2) (bit_and @1 @2)))
/* Combine successive equal operations with constants. */
(for bitop (bit_and bit_ior bit_xor)
(simplify
(bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
(bitop @0 (bitop @1 @2))))
/* Try simple folding for X op !X, and X op X with the help
of the truth_valued_p and logical_inverted_value predicates. */
(match truth_valued_p
@0
(if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
(match truth_valued_p
(op @0 @1)))
(match truth_valued_p
(truth_not @0))
(match (logical_inverted_value @0)
(truth_not @0))
(match (logical_inverted_value @0)
(bit_not truth_valued_p@0))
(match (logical_inverted_value @0)
(eq @0 integer_zerop))
(match (logical_inverted_value @0)
(ne truth_valued_p@0 integer_truep))
(match (logical_inverted_value @0)
(bit_xor truth_valued_p@0 integer_truep))
/* X & !X -> 0. */
(simplify
(bit_and:c @0 (logical_inverted_value @0))
{ build_zero_cst (type); })
/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
(for op (bit_ior bit_xor)
(simplify
(op:c truth_valued_p@0 (logical_inverted_value @0))
{ constant_boolean_node (true, type); }))
/* X ==/!= !X is false/true. */
(for op (eq ne)
(simplify
(op:c truth_valued_p@0 (logical_inverted_value @0))
{ constant_boolean_node (op == NE_EXPR ? true : false, type); }))
/* ~~x -> x */
(simplify
(bit_not (bit_not @0))
@0)
/* Convert ~ (-A) to A - 1. */
(simplify
(bit_not (convert? (negate @0)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@0))
|| !TYPE_UNSIGNED (TREE_TYPE (@0)))
(convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
/* Convert - (~A) to A + 1. */
(simplify
(negate (nop_convert (bit_not @0)))
(plus (view_convert @0) { build_each_one_cst (type); }))
/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
(simplify
(bit_not (convert? (minus @0 integer_each_onep)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@0))
|| !TYPE_UNSIGNED (TREE_TYPE (@0)))
(convert (negate @0))))
(simplify
(bit_not (convert? (plus @0 integer_all_onesp)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@0))
|| !TYPE_UNSIGNED (TREE_TYPE (@0)))
(convert (negate @0))))
/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
(simplify
(bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(convert (bit_xor @0 (bit_not @1)))))
(simplify
(bit_not (convert? (bit_xor:c (bit_not @0) @1)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(convert (bit_xor @0 @1))))
/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
(simplify
(bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(bit_not (bit_xor (view_convert @0) @1))))
/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
(simplify
(bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
(bit_xor (bit_and (bit_xor @0 @1) @2) @0))
/* Fold A - (A & B) into ~B & A. */
(simplify
(minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@1)))
(convert (bit_and (bit_not @1) @0))))
/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
(for cmp (gt lt ge le)
(simplify
(mult (convert (cmp @0 @1)) @2)
(cond (cmp @0 @1) @2 { build_zero_cst (type); })))
/* For integral types with undefined overflow and C != 0 fold
x * C EQ/NE y * C into x EQ/NE y. */
(for cmp (eq ne)
(simplify
(cmp (mult:c @0 @1) (mult:c @2 @1))
(if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
&& tree_expr_nonzero_p (@1))
(cmp @0 @2))))
/* For integral types with wrapping overflow and C odd fold
x * C EQ/NE y * C into x EQ/NE y. */
(for cmp (eq ne)
(simplify
(cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
(if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
&& (TREE_INT_CST_LOW (@1) & 1) != 0)
(cmp @0 @2))))
/* For integral types with undefined overflow and C != 0 fold
x * C RELOP y * C into:
x RELOP y for nonnegative C
y RELOP x for negative C */
(for cmp (lt gt le ge)
(simplify
(cmp (mult:c @0 @1) (mult:c @2 @1))
(if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
(cmp @0 @2)
(if (TREE_CODE (@1) == INTEGER_CST
&& wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
(cmp @2 @0))))))
/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
(for cmp (le gt)
icmp (gt le)
(simplify
(cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_UNSIGNED (TREE_TYPE (@0))
&& TYPE_PRECISION (TREE_TYPE (@0)) > 1
&& (wi::to_wide (@2)
== wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
(icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
(for cmp (simple_comparison)
(simplify
(cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
(if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
(cmp @0 @1))))
/* X / C1 op C2 into a simple range test. */
(for cmp (simple_comparison)
(simplify
(cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& integer_nonzerop (@1)
&& !TREE_OVERFLOW (@1)
&& !TREE_OVERFLOW (@2))
(with { tree lo, hi; bool neg_overflow;
enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
&neg_overflow); }
(switch
(if (code == LT_EXPR || code == GE_EXPR)
(if (TREE_OVERFLOW (lo))
{ build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
(if (code == LT_EXPR)
(lt @0 { lo; })
(ge @0 { lo; }))))
(if (code == LE_EXPR || code == GT_EXPR)
(if (TREE_OVERFLOW (hi))
{ build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
(if (code == LE_EXPR)
(le @0 { hi; })
(gt @0 { hi; }))))
(if (!lo && !hi)
{ build_int_cst (type, code == NE_EXPR); })
(if (code == EQ_EXPR && !hi)
(ge @0 { lo; }))
(if (code == EQ_EXPR && !lo)
(le @0 { hi; }))
(if (code == NE_EXPR && !hi)
(lt @0 { lo; }))
(if (code == NE_EXPR && !lo)
(gt @0 { hi; }))
(if (GENERIC)
{ build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
lo, hi); })
(with
{
tree etype = range_check_type (TREE_TYPE (@0));
if (etype)
{
if (! TYPE_UNSIGNED (etype))
etype = unsigned_type_for (etype);
hi = fold_convert (etype, hi);
lo = fold_convert (etype, lo);
hi = const_binop (MINUS_EXPR, etype, hi, lo);
}
}
(if (etype && hi && !TREE_OVERFLOW (hi))
(if (code == EQ_EXPR)
(le (minus (convert:etype @0) { lo; }) { hi; })
(gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
(for op (lt le ge gt)
(simplify
(op (plus:c @0 @2) (plus:c @1 @2))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(op @0 @1))))
/* For equality and subtraction, this is also true with wrapping overflow. */
(for op (eq ne minus)
(simplify
(op (plus:c @0 @2) (plus:c @1 @2))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
(op @0 @1))))
/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
(for op (lt le ge gt)
(simplify
(op (minus @0 @2) (minus @1 @2))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(op @0 @1))))
/* For equality and subtraction, this is also true with wrapping overflow. */
(for op (eq ne minus)
(simplify
(op (minus @0 @2) (minus @1 @2))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
(op @0 @1))))
/* And for pointers... */
(for op (simple_comparison)
(simplify
(op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
(if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
(op @0 @1))))
(simplify
(minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
(if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
(pointer_diff @0 @1)))
/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
(for op (lt le ge gt)
(simplify
(op (minus @2 @0) (minus @2 @1))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(op @1 @0))))
/* For equality and subtraction, this is also true with wrapping overflow. */
(for op (eq ne minus)
(simplify
(op (minus @2 @0) (minus @2 @1))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
(op @1 @0))))
/* And for pointers... */
(for op (simple_comparison)
(simplify
(op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
(if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
(op @1 @0))))
(simplify
(minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
(if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
(pointer_diff @1 @0)))
/* X + Y < Y is the same as X < 0 when there is no overflow. */
(for op (lt le gt ge)
(simplify
(op:c (plus:c@2 @0 @1) @1)
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
&& (CONSTANT_CLASS_P (@0) || single_use (@2)))
(op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
/* For equality, this is also true with wrapping overflow. */
(for op (eq ne)
(simplify
(op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
&& (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
&& tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
&& tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
(op @0 { build_zero_cst (TREE_TYPE (@0)); })))
(simplify
(op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
(if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
&& tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
&& (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
(op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
/* X - Y < X is the same as Y > 0 when there is no overflow.
For equality, this is also true with wrapping overflow. */
(for op (simple_comparison)
(simplify
(op:c @0 (minus@2 @0 @1))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| ((op == EQ_EXPR || op == NE_EXPR)
&& TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
&& (CONSTANT_CLASS_P (@1) || single_use (@2)))
(op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
/* Transform:
* (X / Y) == 0 -> X < Y if X, Y are unsigned.
* (X / Y) != 0 -> X >= Y, if X, Y are unsigned.
*/
(for cmp (eq ne)
ocmp (lt ge)
(simplify
(cmp (trunc_div @0 @1) integer_zerop)
(if (TYPE_UNSIGNED (TREE_TYPE (@0))
&& (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
(ocmp @0 @1))))
/* X == C - X can never be true if C is odd. */
(for cmp (eq ne)
(simplify
(cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
(if (TREE_INT_CST_LOW (@1) & 1)
{ constant_boolean_node (cmp == NE_EXPR, type); })))
/* Arguments on which one can call get_nonzero_bits to get the bits
possibly set. */
(match with_possible_nonzero_bits
INTEGER_CST@0)
(match with_possible_nonzero_bits
SSA_NAME@0
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
/* Slightly extended version, do not make it recursive to keep it cheap. */
(match (with_possible_nonzero_bits2 @0)
with_possible_nonzero_bits@0)
(match (with_possible_nonzero_bits2 @0)
(bit_and:c with_possible_nonzero_bits@0 @2))
/* Same for bits that are known to be set, but we do not have
an equivalent to get_nonzero_bits yet. */
(match (with_certain_nonzero_bits2 @0)
INTEGER_CST@0)
(match (with_certain_nonzero_bits2 @0)
(bit_ior @1 INTEGER_CST@0))
/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
(for cmp (eq ne)
(simplify
(cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
(if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
{ constant_boolean_node (cmp == NE_EXPR, type); })))
/* ((X inner_op C0) outer_op C1)
With X being a tree where value_range has reasoned certain bits to always be
zero throughout its computed value range,
inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
where zero_mask has 1's for all bits that are sure to be 0 in
and 0's otherwise.
if (inner_op == '^') C0 &= ~C1;
if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
*/
(for inner_op (bit_ior bit_xor)
outer_op (bit_xor bit_ior)
(simplify
(outer_op
(inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
(with
{
bool fail = false;
wide_int zero_mask_not;
wide_int C0;
wide_int cst_emit;
if (TREE_CODE (@2) == SSA_NAME)
zero_mask_not = get_nonzero_bits (@2);
else
fail = true;
if (inner_op == BIT_XOR_EXPR)
{
C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
cst_emit = C0 | wi::to_wide (@1);
}
else
{
C0 = wi::to_wide (@0);
cst_emit = C0 ^ wi::to_wide (@1);
}
}
(if (!fail && (C0 & zero_mask_not) == 0)
(outer_op @2 { wide_int_to_tree (type, cst_emit); })
(if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
(inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
(simplify
(pointer_plus (pointer_plus:s @0 @1) @3)
(pointer_plus @0 (plus @1 @3)))
/* Pattern match
tem1 = (long) ptr1;
tem2 = (long) ptr2;
tem3 = tem2 - tem1;
tem4 = (unsigned long) tem3;
tem5 = ptr1 + tem4;
and produce
tem5 = ptr2; */
(simplify
(pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
/* Conditionally look through a sign-changing conversion. */
(if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
&& ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
|| (GENERIC && type == TREE_TYPE (@1))))
@1))
(simplify
(pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
(if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
(convert @1)))
/* Pattern match
tem = (sizetype) ptr;
tem = tem & algn;
tem = -tem;
... = ptr p+ tem;
and produce the simpler and easier to analyze with respect to alignment
... = ptr & ~algn; */
(simplify
(pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
(with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
(bit_and @0 { algn; })))
/* Try folding difference of addresses. */
(simplify
(minus (convert ADDR_EXPR@0) (convert @1))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(with { HOST_WIDE_INT diff; }
(if (ptr_difference_const (@0, @1, &diff))
{ build_int_cst_type (type, diff); }))))
(simplify
(minus (convert @0) (convert ADDR_EXPR@1))
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(with { HOST_WIDE_INT diff; }
(if (ptr_difference_const (@0, @1, &diff))
{ build_int_cst_type (type, diff); }))))
(simplify
(pointer_diff (convert?@2 ADDR_EXPR@0) (convert?@3 @1))
(if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
&& tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
(with { HOST_WIDE_INT diff; }
(if (ptr_difference_const (@0, @1, &diff))
{ build_int_cst_type (type, diff); }))))
(simplify
(pointer_diff (convert?@2 @0) (convert?@3 ADDR_EXPR@1))
(if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
&& tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
(with { HOST_WIDE_INT diff; }
(if (ptr_difference_const (@0, @1, &diff))
{ build_int_cst_type (type, diff); }))))
/* If arg0 is derived from the address of an object or function, we may
be able to fold this expression using the object or function's
alignment. */
(simplify
(bit_and (convert? @0) INTEGER_CST@1)
(if (POINTER_TYPE_P (TREE_TYPE (@0))
&& tree_nop_conversion_p (type, TREE_TYPE (@0)))
(with
{
unsigned int align;
unsigned HOST_WIDE_INT bitpos;
get_pointer_alignment_1 (@0, &align, &bitpos);
}
(if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
{ wide_int_to_tree (type, (wi::to_wide (@1)
& (bitpos / BITS_PER_UNIT))); }))))
/* We can't reassociate at all for saturating types. */
(if (!TYPE_SATURATING (type))
/* Contract negates. */
/* A + (-B) -> A - B */
(simplify
(plus:c @0 (convert? (negate @1)))
/* Apply STRIP_NOPS on the negate. */
(if (tree_nop_conversion_p (type, TREE_TYPE (@1))
&& !TYPE_OVERFLOW_SANITIZED (type))
(with
{
tree t1 = type;
if (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
}
(convert (minus (convert:t1 @0) (convert:t1 @1))))))
/* A - (-B) -> A + B */
(simplify
(minus @0 (convert? (negate @1)))
(if (tree_nop_conversion_p (type, TREE_TYPE (@1))
&& !TYPE_OVERFLOW_SANITIZED (type))
(with
{
tree t1 = type;
if (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
}
(convert (plus (convert:t1 @0) (convert:t1 @1))))))
/* -(T)(-A) -> (T)A
Sign-extension is ok except for INT_MIN, which thankfully cannot
happen without overflow. */
(simplify
(negate (convert (negate @1)))
(if (INTEGRAL_TYPE_P (type)
&& (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
|| (!TYPE_UNSIGNED (TREE_TYPE (@1))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
&& !TYPE_OVERFLOW_SANITIZED (type)
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
(convert @1)))
(simplify
(negate (convert negate_expr_p@1))
(if (SCALAR_FLOAT_TYPE_P (type)
&& ((DECIMAL_FLOAT_TYPE_P (type)
== DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
&& TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
|| !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
(convert (negate @1))))
(simplify
(negate (nop_convert (negate @1)))
(if (!TYPE_OVERFLOW_SANITIZED (type)
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
(view_convert @1)))
/* We can't reassociate floating-point unless -fassociative-math
or fixed-point plus or minus because of saturation to +-Inf. */
(if ((!FLOAT_TYPE_P (type) || flag_associative_math)
&& !FIXED_POINT_TYPE_P (type))
/* Match patterns that allow contracting a plus-minus pair
irrespective of overflow issues. */
/* (A +- B) - A -> +- B */
/* (A +- B) -+ B -> A */
/* A - (A +- B) -> -+ B */
/* A +- (B -+ A) -> +- B */
(simplify
(minus (plus:c @0 @1) @0)
@1)
(simplify
(minus (minus @0 @1) @0)
(negate @1))
(simplify
(plus:c (minus @0 @1) @1)
@0)
(simplify
(minus @0 (plus:c @0 @1))
(negate @1))
(simplify
(minus @0 (minus @0 @1))
@1)
/* (A +- B) + (C - A) -> C +- B */
/* (A + B) - (A - C) -> B + C */
/* More cases are handled with comparisons. */
(simplify
(plus:c (plus:c @0 @1) (minus @2 @0))
(plus @2 @1))
(simplify
(plus:c (minus @0 @1) (minus @2 @0))
(minus @2 @1))
(simplify
(plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
(if (TYPE_OVERFLOW_UNDEFINED (type)
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
(pointer_diff @2 @1)))
(simplify
(minus (plus:c @0 @1) (minus @0 @2))
(plus @1 @2))
/* (A +- CST1) +- CST2 -> A + CST3
Use view_convert because it is safe for vectors and equivalent for
scalars. */
(for outer_op (plus minus)
(for inner_op (plus minus)
neg_inner_op (minus plus)
(simplify
(outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
CONSTANT_CLASS_P@2)
/* If one of the types wraps, use that one. */
(if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
(if (outer_op == PLUS_EXPR)
(plus (view_convert @0) (inner_op @2 (view_convert @1)))
(minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))
(if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
(if (outer_op == PLUS_EXPR)
(view_convert (plus @0 (inner_op (view_convert @2) @1)))
(view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
/* If the constant operation overflows we cannot do the transform
directly as we would introduce undefined overflow, for example
with (a - 1) + INT_MIN. */
(if (types_match (type, @0))
(with { tree cst = const_binop (outer_op == inner_op
? PLUS_EXPR : MINUS_EXPR,
type, @1, @2); }
(if (cst && !TREE_OVERFLOW (cst))
(inner_op @0 { cst; } )
/* X+INT_MAX+1 is X-INT_MIN. */
(if (INTEGRAL_TYPE_P (type) && cst
&& wi::to_wide (cst) == wi::min_value (type))
(neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
/* Last resort, use some unsigned type. */
(with { tree utype = unsigned_type_for (type); }
(view_convert (inner_op
(view_convert:utype @0)
(view_convert:utype
{ drop_tree_overflow (cst); })))))))))))))
/* (CST1 - A) +- CST2 -> CST3 - A */
(for outer_op (plus minus)
(simplify
(outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
(with { tree cst = const_binop (outer_op, type, @1, @2); }
(if (cst && !TREE_OVERFLOW (cst))
(minus { cst; } @0)))))
/* CST1 - (CST2 - A) -> CST3 + A */
(simplify
(minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
(with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
(if (cst && !TREE_OVERFLOW (cst))
(plus { cst; } @0))))
/* ~A + A -> -1 */
(simplify
(plus:c (bit_not @0) @0)
(if (!TYPE_OVERFLOW_TRAPS (type))
{ build_all_ones_cst (type); }))
/* ~A + 1 -> -A */
(simplify
(plus (convert? (bit_not @0)) integer_each_onep)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(negate (convert @0))))
/* -A - 1 -> ~A */
(simplify
(minus (convert? (negate @0)) integer_each_onep)
(if (!TYPE_OVERFLOW_TRAPS (type)
&& tree_nop_conversion_p (type, TREE_TYPE (@0)))
(bit_not (convert @0))))
/* -1 - A -> ~A */
(simplify
(minus integer_all_onesp @0)
(bit_not @0))
/* (T)(P + A) - (T)P -> (T) A */
(for add (plus pointer_plus)
(simplify
(minus (convert (add @@0 @1))
(convert @0))
(if (element_precision (type) <= element_precision (TREE_TYPE (@1))
/* For integer types, if A has a smaller type
than T the result depends on the possible
overflow in P + A.
E.g. T=size_t, A=(unsigned)429497295, P>0.
However, if an overflow in P + A would cause
undefined behavior, we can assume that there
is no overflow. */
|| (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
/* For pointer types, if the conversion of A to the
final type requires a sign- or zero-extension,
then we have to punt - it is not defined which
one is correct. */
|| (POINTER_TYPE_P (TREE_TYPE (@0))
&& TREE_CODE (@1) == INTEGER_CST
&& tree_int_cst_sign_bit (@1) == 0))
(convert @1))))
(simplify
(pointer_diff (pointer_plus @@0 @1) @0)
/* The second argument of pointer_plus must be interpreted as signed, and
thus sign-extended if necessary. */
(with { tree stype = signed_type_for (TREE_TYPE (@1)); }
(convert (convert:stype @1))))
/* (T)P - (T)(P + A) -> -(T) A */
(for add (plus pointer_plus)
(simplify
(minus (convert @0)
(convert (add @@0 @1)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@1))
/* For integer types, if A has a smaller type
than T the result depends on the possible
overflow in P + A.
E.g. T=size_t, A=(unsigned)429497295, P>0.
However, if an overflow in P + A would cause
undefined behavior, we can assume that there
is no overflow. */
|| (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
/* For pointer types, if the conversion of A to the
final type requires a sign- or zero-extension,
then we have to punt - it is not defined which
one is correct. */
|| (POINTER_TYPE_P (TREE_TYPE (@0))
&& TREE_CODE (@1) == INTEGER_CST
&& tree_int_cst_sign_bit (@1) == 0))
(negate (convert @1)))))
(simplify
(pointer_diff @0 (pointer_plus @@0 @1))
/* The second argument of pointer_plus must be interpreted as signed, and
thus sign-extended if necessary. */
(with { tree stype = signed_type_for (TREE_TYPE (@1)); }
(negate (convert (convert:stype @1)))))
/* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
(for add (plus pointer_plus)
(simplify
(minus (convert (add @@0 @1))
(convert (add @0 @2)))
(if (element_precision (type) <= element_precision (TREE_TYPE (@1))
/* For integer types, if A has a smaller type
than T the result depends on the possible
overflow in P + A.
E.g. T=size_t, A=(unsigned)429497295, P>0.
However, if an overflow in P + A would cause
undefined behavior, we can assume that there
is no overflow. */
|| (INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
/* For pointer types, if the conversion of A to the
final type requires a sign- or zero-extension,
then we have to punt - it is not defined which
one is correct. */
|| (POINTER_TYPE_P (TREE_TYPE (@0))
&& TREE_CODE (@1) == INTEGER_CST
&& tree_int_cst_sign_bit (@1) == 0
&& TREE_CODE (@2) == INTEGER_CST
&& tree_int_cst_sign_bit (@2) == 0))
(minus (convert @1) (convert @2)))))))
(simplify
(pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
/* The second argument of pointer_plus must be interpreted as signed, and
thus sign-extended if necessary. */
(with { tree stype = signed_type_for (TREE_TYPE (@1)); }
(minus (convert (convert:stype @1)) (convert (convert:stype @2)))))
/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
(for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
(simplify
(minmax @0 @0)
@0))
/* min(max(x,y),y) -> y. */
(simplify
(min:c (max:c @0 @1) @1)
@1)
/* max(min(x,y),y) -> y. */
(simplify
(max:c (min:c @0 @1) @1)
@1)
/* max(a,-a) -> abs(a). */
(simplify
(max:c @0 (negate @0))
(if (TREE_CODE (type) != COMPLEX_TYPE
&& (! ANY_INTEGRAL_TYPE_P (type)
|| TYPE_OVERFLOW_UNDEFINED (type)))
(abs @0)))
/* min(a,-a) -> -abs(a). */
(simplify
(min:c @0 (negate @0))
(if (TREE_CODE (type) != COMPLEX_TYPE
&& (! ANY_INTEGRAL_TYPE_P (type)
|| TYPE_OVERFLOW_UNDEFINED (type)))
(negate (abs @0))))
(simplify
(min @0 @1)
(switch
(if (INTEGRAL_TYPE_P (type)
&& TYPE_MIN_VALUE (type)
&& operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
@1)
(if (INTEGRAL_TYPE_P (type)
&& TYPE_MAX_VALUE (type)
&& operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
@0)))
(simplify
(max @0 @1)
(switch
(if (INTEGRAL_TYPE_P (type)
&& TYPE_MAX_VALUE (type)
&& operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
@1)
(if (INTEGRAL_TYPE_P (type)
&& TYPE_MIN_VALUE (type)
&& operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
@0)))
/* max (a, a + CST) -> a + CST where CST is positive. */
/* max (a, a + CST) -> a where CST is negative. */
(simplify
(max:c @0 (plus@2 @0 INTEGER_CST@1))
(if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(if (tree_int_cst_sgn (@1) > 0)
@2
@0)))
/* min (a, a + CST) -> a where CST is positive. */
/* min (a, a + CST) -> a + CST where CST is negative. */
(simplify
(min:c @0 (plus@2 @0 INTEGER_CST@1))
(if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
(if (tree_int_cst_sgn (@1) > 0)
@0
@2)))
/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
and the outer convert demotes the expression back to x's type. */
(for minmax (min max)
(simplify
(convert (minmax@0 (convert @1) INTEGER_CST@2))
(if (INTEGRAL_TYPE_P (type)
&& types_match (@1, type) && int_fits_type_p (@2, type)
&& TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
&& TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
(minmax @1 (convert @2)))))
(for minmax (FMIN FMIN_FN FMAX FMAX_FN)
/* If either argument is NaN, return the other one. Avoid the
transformation if we get (and honor) a signalling NaN. */
(simplify
(minmax:c @0 REAL_CST@1)
(if (real_isnan (TREE_REAL_CST_PTR (@1))
&& (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
@0)))
/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
functions to return the numeric arg if the other one is NaN.
MIN and MAX don't honor that, so only transform if -ffinite-math-only
is set. C99 doesn't require -0.0 to be handled, so we don't have to
worry about it either. */
(if (flag_finite_math_only)
(simplify
(FMIN @0 @1)
(min @0 @1))
(simplify
(FMIN_FN @0 @1)
(min @0 @1))
(simplify
(FMAX @0 @1)
(max @0 @1))
(simplify
(FMAX_FN @0 @1)
(max @0 @1)))
/* min (-A, -B) -> -max (A, B) */
(for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
maxmin (max min FMAX FMAX_FN FMIN FMAX_FN)
(simplify
(minmax (negate:s@2 @0) (negate:s@3 @1))
(if (FLOAT_TYPE_P (TREE_TYPE (@0))
|| (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
(negate (maxmin @0 @1)))))
/* MIN (~X, ~Y) -> ~MAX (X, Y)
MAX (~X, ~Y) -> ~MIN (X, Y) */
(for minmax (min max)
maxmin (max min)
(simplify
(minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
(bit_not (maxmin @0 @1))))
/* MIN (X, Y) == X -> X <= Y */
(for minmax (min min max max)
cmp (eq ne eq ne )
out (le gt ge lt )
(simplify
(cmp:c (minmax:c @0 @1) @0)
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
(out @0 @1))))
/* MIN (X, 5) == 0 -> X == 0
MIN (X, 5) == 7 -> false */
(for cmp (eq ne)
(simplify
(cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
(if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
(if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
(for cmp (eq ne)
(simplify
(cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
(if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (TREE_TYPE (@0))))
{ constant_boolean_node (cmp == NE_EXPR, type); }
(if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
TYPE_SIGN (TREE_TYPE (@0))))
(cmp @0 @2)))))
/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
(for minmax (min min max max min min max max )
cmp (lt le gt ge gt ge lt le )
comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
(simplify
(cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
(comb (cmp @0 @2) (cmp @1 @2))))
/* Simplifications of shift and rotates. */
(for rotate (lrotate rrotate)
(simplify
(rotate integer_all_onesp@0 @1)
@0))
/* Optimize -1 >> x for arithmetic right shifts. */
(simplify
(rshift integer_all_onesp@0 @1)
(if (!TYPE_UNSIGNED (type)
&& tree_expr_nonnegative_p (@1))
@0))
/* Optimize (x >> c) << c into x & (-1<<c). */
(simplify
(lshift (rshift @0 INTEGER_CST@1) @1)
(if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
(bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
types. */
(simplify
(rshift (lshift @0 INTEGER_CST@1) @1)
(if (TYPE_UNSIGNED (type)
&& (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
(bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
(for shiftrotate (lrotate rrotate lshift rshift)
(simplify
(shiftrotate @0 integer_zerop)
(non_lvalue @0))
(simplify
(shiftrotate integer_zerop@0 @1)
@0)
/* Prefer vector1 << scalar to vector1 << vector2
if vector2 is uniform. */
(for vec (VECTOR_CST CONSTRUCTOR)
(simplify
(shiftrotate @0 vec@1)
(with { tree tem = uniform_vector_p (@1); }
(if (tem)
(shiftrotate @0 { tem; }))))))
/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
Y is 0. Similarly for X >> Y. */
#if GIMPLE
(for shift (lshift rshift)
(simplify
(shift @0 SSA_NAME@1)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
(with {
int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
int prec = TYPE_PRECISION (TREE_TYPE (@1));
}
(if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
@0)))))
#endif
/* Rewrite an LROTATE_EXPR by a constant into an
RROTATE_EXPR by a new constant. */
(simplify
(lrotate @0 INTEGER_CST@1)
(rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
build_int_cst (TREE_TYPE (@1),
element_precision (type)), @1); }))
/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
(for op (lrotate rrotate rshift lshift)
(simplify
(op (op @0 INTEGER_CST@1) INTEGER_CST@2)
(with { unsigned int prec = element_precision (type); }
(if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
&& wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
&& wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
&& wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
(with { unsigned int low = (tree_to_uhwi (@1)
+ tree_to_uhwi (@2)); }
/* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
being well defined. */
(if (low >= prec)
(if (op == LROTATE_EXPR || op == RROTATE_EXPR)
(op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
(if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
{ build_zero_cst (type); }
(op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
(op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
/* ((1 << A) & 1) != 0 -> A == 0
((1 << A) & 1) == 0 -> A != 0 */
(for cmp (ne eq)
icmp (eq ne)
(simplify
(cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
(icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
(CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
if CST2 != 0. */
(for cmp (ne eq)
(simplify
(cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
(with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
(if (cand < 0
|| (!integer_zerop (@2)
&& wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
{ constant_boolean_node (cmp == NE_EXPR, type); }
(if (!integer_zerop (@2)
&& wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
(cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
(X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
if the new mask might be further optimized. */
(for shift (lshift rshift)
(simplify
(bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
INTEGER_CST@2)
(if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
&& TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
&& tree_fits_uhwi_p (@1)
&& tree_to_uhwi (@1) > 0
&& tree_to_uhwi (@1) < TYPE_PRECISION (type))
(with
{
unsigned int shiftc = tree_to_uhwi (@1);
unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
unsigned HOST_WIDE_INT newmask, zerobits = 0;
tree shift_type = TREE_TYPE (@3);
unsigned int prec;
if (shift == LSHIFT_EXPR)
zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
else if (shift == RSHIFT_EXPR
&& type_has_mode_precision_p (shift_type))
{
prec = TYPE_PRECISION (TREE_TYPE (@3));
tree arg00 = @0;
/* See if more bits can be proven as zero because of
zero extension. */
if (@3 != @0
&& TYPE_UNSIGNED (TREE_TYPE (@0)))
{
tree inner_type = TREE_TYPE (@0);
if (type_has_mode_precision_p (inner_type)
&& TYPE_PRECISION (inner_type) < prec)
{
prec = TYPE_PRECISION (inner_type);
/* See if we can shorten the right shift. */
if (shiftc < prec)
shift_type = inner_type;
/* Otherwise X >> C1 is all zeros, so we'll optimize
it into (X, 0) later on by making sure zerobits
is all ones. */
}
}
zerobits = HOST_WIDE_INT_M1U;
if (shiftc < prec)
{
zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
zerobits <<= prec - shiftc;
}
/* For arithmetic shift if sign bit could be set, zerobits
can contain actually sign bits, so no transformation is
possible, unless MASK masks them all away. In that
case the shift needs to be converted into logical shift. */
if (!TYPE_UNSIGNED (TREE_TYPE (@3))
&& prec == TYPE_PRECISION (TREE_TYPE (@3)))
{
if ((mask & zerobits) == 0)
shift_type = unsigned_type_for (TREE_TYPE (@3));
else
zerobits = 0;
}
}
}
/* ((X << 16) & 0xff00) is (X, 0). */
(if ((mask & zerobits) == mask)
{ build_int_cst (type, 0); }
(with { newmask = mask | zerobits; }
(if (newmask != mask && (newmask & (newmask + 1)) == 0)
(with
{
/* Only do the transformation if NEWMASK is some integer
mode's mask. */
for (prec = BITS_PER_UNIT;
prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
break;
}
(if (prec < HOST_BITS_PER_WIDE_INT
|| newmask == HOST_WIDE_INT_M1U)
(with
{ tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
(if (!tree_int_cst_equal (newmaskt, @2))
(if (shift_type != TREE_TYPE (@3))
(bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
(bit_and @4 { newmaskt; })))))))))))))
/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
(X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
(for shift (lshift rshift)
(for bit_op (bit_and bit_xor bit_ior)
(simplify
(shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
(bit_op (shift (convert @0) @1) { mask; }))))))
/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
(simplify
(bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
(if (!TYPE_UNSIGNED (TREE_TYPE (@0))
&& (element_precision (TREE_TYPE (@0))
<= element_precision (TREE_TYPE (@1))
|| !TYPE_UNSIGNED (TREE_TYPE (@1))))
(with
{ tree shift_type = TREE_TYPE (@0); }
(convert (rshift (convert:shift_type @1) @2)))))
/* ~(~X >>r Y) -> X >>r Y
~(~X <<r Y) -> X <<r Y */
(for rotate (lrotate rrotate)
(simplify
(bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
(if ((element_precision (TREE_TYPE (@0))
<= element_precision (TREE_TYPE (@1))
|| !TYPE_UNSIGNED (TREE_TYPE (@1)))
&& (element_precision (type) <= element_precision (TREE_TYPE (@0))
|| !TYPE_UNSIGNED (TREE_TYPE (@0))))
(with
{ tree rotate_type = TREE_TYPE (@0); }
(convert (rotate (convert:rotate_type @1) @2))))))
/* Simplifications of conversions. */
/* Basic strip-useless-type-conversions / strip_nops. */
(for cvt (convert view_convert float fix_trunc)
(simplify
(cvt @0)
(if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
|| (GENERIC && type == TREE_TYPE (@0)))
@0)))
/* Contract view-conversions. */
(simplify
(view_convert (view_convert @0))
(view_convert @0))
/* For integral conversions with the same precision or pointer
conversions use a NOP_EXPR instead. */
(simplify
(view_convert @0)
(if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
&& (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
&& TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
(convert @0)))
/* Strip inner integral conversions that do not change precision or size, or
zero-extend while keeping the same size (for bool-to-char). */
(simplify
(view_convert (convert@0 @1))
(if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
&& (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
&& TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
&& (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
|| (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
&& TYPE_UNSIGNED (TREE_TYPE (@1)))))
(view_convert @1)))
/* Re-association barriers around constants and other re-association
barriers can be removed. */
(simplify
(paren CONSTANT_CLASS_P@0)
@0)
(simplify
(paren (paren@1 @0))
@1)
/* Handle cases of two conversions in a row. */
(for ocvt (convert float fix_trunc)
(for icvt (convert float)
(simplify
(ocvt (icvt@1 @0))
(with
{
tree inside_type = TREE_TYPE (@0);
tree inter_type = TREE_TYPE (@1);
int inside_int = INTEGRAL_TYPE_P (inside_type);
int inside_ptr = POINTER_TYPE_P (inside_type);
int inside_float = FLOAT_TYPE_P (inside_type);
int inside_vec = VECTOR_TYPE_P (inside_type);
unsigned int inside_prec = TYPE_PRECISION (inside_type);
int inside_unsignedp = TYPE_UNSIGNED (inside_type);
int inter_int = INTEGRAL_TYPE_P (inter_type);
int inter_ptr = POINTER_TYPE_P (inter_type);
int inter_float = FLOAT_TYPE_P (inter_type);
int inter_vec = VECTOR_TYPE_P (inter_type);
unsigned int inter_prec = TYPE_PRECISION (inter_type);
int inter_unsignedp = TYPE_UNSIGNED (inter_type);
int final_int = INTEGRAL_TYPE_P (type);
int final_ptr = POINTER_TYPE_P (type);
int final_float = FLOAT_TYPE_P (type);
int final_vec = VECTOR_TYPE_P (type);
unsigned int final_prec = TYPE_PRECISION (type);
int final_unsignedp = TYPE_UNSIGNED (type);
}
(switch
/* In addition to the cases of two conversions in a row
handled below, if we are converting something to its own
type via an object of identical or wider precision, neither
conversion is needed. */
(if (((GIMPLE && useless_type_conversion_p (type, inside_type))
|| (GENERIC
&& TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
&& (((inter_int || inter_ptr) && final_int)
|| (inter_float && final_float))
&& inter_prec >= final_prec)
(ocvt @0))
/* Likewise, if the intermediate and initial types are either both
float or both integer, we don't need the middle conversion if the
former is wider than the latter and doesn't change the signedness
(for integers). Avoid this if the final type is a pointer since
then we sometimes need the middle conversion. */
(if (((inter_int && inside_int) || (inter_float && inside_float))
&& (final_int || final_float)
&& inter_prec >= inside_prec
&& (inter_float || inter_unsignedp == inside_unsignedp))
(ocvt @0))
/* If we have a sign-extension of a zero-extended value, we can
replace that by a single zero-extension. Likewise if the
final conversion does not change precision we can drop the
intermediate conversion. */
(if (inside_int && inter_int && final_int
&& ((inside_prec < inter_prec && inter_prec < final_prec
&& inside_unsignedp && !inter_unsignedp)
|| final_prec == inter_prec))
(ocvt @0))
/* Two conversions in a row are not needed unless:
- some conversion is floating-point (overstrict for now), or
- some conversion is a vector (overstrict for now), or
- the intermediate type is narrower than both initial and
final, or
- the intermediate type and innermost type differ in signedness,
and the outermost type is wider than the intermediate, or
- the initial type is a pointer type and the precisions of the
intermediate and final types differ, or
- the final type is a pointer type and the precisions of the
initial and intermediate types differ. */
(if (! inside_float && ! inter_float && ! final_float
&& ! inside_vec && ! inter_vec && ! final_vec
&& (inter_prec >= inside_prec || inter_prec >= final_prec)
&& ! (inside_int && inter_int
&& inter_unsignedp != inside_unsignedp
&& inter_prec < final_prec)
&& ((inter_unsignedp && inter_prec > inside_prec)
== (final_unsignedp && final_prec > inter_prec))
&& ! (inside_ptr && inter_prec != final_prec)
&& ! (final_ptr && inside_prec != inter_prec))
(ocvt @0))
/* A truncation to an unsigned type (a zero-extension) should be
canonicalized as bitwise and of a mask. */
(if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
&& final_int && inter_int && inside_int
&& final_prec == inside_prec
&& final_prec > inter_prec
&& inter_unsignedp)
(convert (bit_and @0 { wide_int_to_tree
(inside_type,
wi::mask (inter_prec, false,
TYPE_PRECISION (inside_type))); })))
/* If we are converting an integer to a floating-point that can
represent it exactly and back to an integer, we can skip the
floating-point conversion. */
(if (GIMPLE /* PR66211 */
&& inside_int && inter_float && final_int &&
(unsigned) significand_size (TYPE_MODE (inter_type))
>= inside_prec - !inside_unsignedp)
(convert @0)))))))
/* If we have a narrowing conversion to an integral type that is fed by a
BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
masks off bits outside the final type (and nothing else). */
(simplify
(convert (bit_and @0 INTEGER_CST@1))
(if (INTEGRAL_TYPE_P (type)
&& INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
&& operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
TYPE_PRECISION (type)), 0))
(convert @0)))
/* (X /[ex] A) * A -> X. */
(simplify
(mult (convert1? (exact_div @0 @@1)) (convert2? @1))
(convert @0))
/* Canonicalization of binary operations. */
/* Convert X + -C into X - C. */
(simplify
(plus @0 REAL_CST@1)
(if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
(with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
(if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
(minus @0 { tem; })))))
/* Convert x+x into x*2. */
(simplify
(plus @0 @0)
(if (SCALAR_FLOAT_TYPE_P (type))
(mult @0 { build_real (type, dconst2); })
(if (INTEGRAL_TYPE_P (type))
(mult @0 { build_int_cst (type, 2); }))))
/* 0 - X -> -X. */
(simplify
(minus integer_zerop @1)
(negate @1))
(simplify
(pointer_diff integer_zerop @1)
(negate (convert @1)))
/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
ARG0 is zero and X + ARG0 reduces to X, since that would mean
(-ARG1 + ARG0) reduces to -ARG1. */
(simplify
(minus real_zerop@0 @1)
(if (fold_real_zero_addition_p (type, @0, 0))
(negate @1)))
/* Transform x * -1 into -x. */
(simplify
(mult @0 integer_minus_onep)
(negate @0))
/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
signed overflow for CST != 0 && CST != -1. */
(simplify
(mult:c (mult:s @0 INTEGER_CST@1) @2)
(if (TREE_CODE (@2) != INTEGER_CST
&& !integer_zerop (@1) && !integer_minus_onep (@1))
(mult (mult @0 @2) @1)))
/* True if we can easily extract the real and imaginary parts of a complex
number. */
(match compositional_complex
(convert? (complex @0 @1)))
/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
(simplify
(complex (realpart @0) (imagpart @0))
@0)
(simplify
(realpart (complex @0 @1))
@0)
(simplify
(imagpart (complex @0 @1))
@1)
/* Sometimes we only care about half of a complex expression. */
(simplify
(realpart (convert?:s (conj:s @0)))
(convert (realpart @0)))
(simplify
(imagpart (convert?:s (conj:s @0)))
(convert (negate (imagpart @0))))
(for part (realpart imagpart)
(for op (plus minus)
(simplify
(part (convert?:s@2 (op:s @0 @1)))
(convert (op (part @0) (part @1))))))
(simplify
(realpart (convert?:s (CEXPI:s @0)))
(convert (COS @0)))
(simplify
(imagpart (convert?:s (CEXPI:s @0)))
(convert (SIN @0)))
/* conj(conj(x)) -> x */
(simplify
(conj (convert? (conj @0)))
(if (tree_nop_conversion_p (TREE_TYPE (@0), type))
(convert @0)))
/* conj({x,y}) -> {x,-y} */
(simplify
(conj (convert?:s (complex:s @0 @1)))
(with { tree itype = TREE_TYPE (type); }
(complex (convert:itype @0) (negate (convert:itype @1)))))
/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
(simplify
(bswap (bswap @0))
@0)
(simplify
(bswap (bit_not (bswap @0)))
(bit_not @0))
(for bitop (bit_xor bit_ior bit_and)
(simplify
(bswap (bitop:c (bswap @0) @1))
(bitop @0 (bswap @1)))))
/* Combine COND_EXPRs and VEC_COND_EXPRs. */
/* Simplify constant conditions.
Only optimize constant conditions when the selected branch
has the same type as the COND_EXPR. This avoids optimizing
away "c ? x : throw", where the throw has a void type.
Note that we cannot throw away the fold-const.c variant nor
this one as we depend on doing this transform before possibly
A ? B : B -> B triggers and the fold-const.c one can optimize
0 ? A : B to B even if A has side-effects. Something
genmatch cannot handle. */
(simplify
(cond INTEGER_CST@0 @1 @2)
(if (integer_zerop (@0))
(if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
@2)
(if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
@1)))
(simplify
(vec_cond VECTOR_CST@0 @1 @2)
(if (integer_all_onesp (@0))
@1
(if (integer_zerop (@0))
@2)))
/* Simplification moved from fold_cond_expr_with_comparison. It may also
be extended. */
/* This pattern implements two kinds simplification:
Case 1)
(cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
1) Conversions are type widening from smaller type.
2) Const c1 equals to c2 after canonicalizing comparison.
3) Comparison has tree code LT, LE, GT or GE.
This specific pattern is needed when (cmp (convert x) c) may not
be simplified by comparison patterns because of multiple uses of
x. It also makes sense here because simplifying across multiple
referred var is always benefitial for complicated cases.
Case 2)
(cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
(for cmp (lt le gt ge eq)
(simplify
(cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
(with
{
tree from_type = TREE_TYPE (@1);
tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
enum tree_code code = ERROR_MARK;
if (INTEGRAL_TYPE_P (from_type)
&& int_fits_type_p (@2, from_type)
&& (types_match (c1_type, from_type)
|| (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
&& (TYPE_UNSIGNED (from_type)
|| TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
&& (types_match (c2_type, from_type)
|| (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
&& (TYPE_UNSIGNED (from_type)
|| TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
{
if (cmp != EQ_EXPR)
{
if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
{
/* X <= Y - 1 equals to X < Y. */
if (cmp == LE_EXPR)
code = LT_EXPR;
/* X > Y - 1 equals to X >= Y. */
if (cmp == GT_EXPR)
code = GE_EXPR;
}
if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
{
/* X < Y + 1 equals to X <= Y. */
if (cmp == LT_EXPR)
code = LE_EXPR;
/* X >= Y + 1 equals to X > Y. */
if (cmp == GE_EXPR)
code = GT_EXPR;
}
if (code != ERROR_MARK
|| wi::to_widest (@2) == wi::to_widest (@3))
{
if (cmp == LT_EXPR || cmp == LE_EXPR)
code = MIN_EXPR;
if (cmp == GT_EXPR || cmp == GE_EXPR)
code = MAX_EXPR;
}
}
/* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
else if (int_fits_type_p (@3, from_type))
code = EQ_EXPR;
}
}
(if (code == MAX_EXPR)
(convert (max @1 (convert @2)))
(if (code == MIN_EXPR)
(convert (min @1 (convert @2)))
(if (code == EQ_EXPR)
(convert (cond (eq @1 (convert @3))
(convert:from_type @3) (convert:from_type @2)))))))))
/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
1) OP is PLUS or MINUS.
2) CMP is LT, LE, GT or GE.
3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
This pattern also handles special cases like:
A) Operand x is a unsigned to signed type conversion and c1 is
integer zero. In this case,
(signed type)x < 0 <=> x > MAX_VAL(signed type)
(signed type)x >= 0 <=> x <= MAX_VAL(signed type)
B) Const c1 may not equal to (C3 op' C2). In this case we also
check equality for (c1+1) and (c1-1) by adjusting comparison
code.
TODO: Though signed type is handled by this pattern, it cannot be
simplified at the moment because C standard requires additional
type promotion. In order to match&simplify it here, the IR needs
to be cleaned up by other optimizers, i.e, VRP. */
(for op (plus minus)
(for cmp (lt le gt ge)
(simplify
(cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
(with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
(if (types_match (from_type, to_type)
/* Check if it is special case A). */
|| (TYPE_UNSIGNED (from_type)
&& !TYPE_UNSIGNED (to_type)
&& TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
&& integer_zerop (@1)
&& (cmp == LT_EXPR || cmp == GE_EXPR)))
(with
{
bool overflow = false;
enum tree_code code, cmp_code = cmp;
wide_int real_c1;
wide_int c1 = wi::to_wide (@1);
wide_int c2 = wi::to_wide (@2);
wide_int c3 = wi::to_wide (@3);
signop sgn = TYPE_SIGN (from_type);
/* Handle special case A), given x of unsigned type:
((signed type)x < 0) <=> (x > MAX_VAL(signed type))
((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
if (!types_match (from_type, to_type))
{
if (cmp_code == LT_EXPR)
cmp_code = GT_EXPR;
if (cmp_code == GE_EXPR)
cmp_code = LE_EXPR;
c1 = wi::max_value (to_type);
}
/* To simplify this pattern, we require c3 = (c1 op c2). Here we
compute (c3 op' c2) and check if it equals to c1 with op' being
the inverted operator of op. Make sure overflow doesn't happen
if it is undefined. */
if (op == PLUS_EXPR)
real_c1 = wi::sub (c3, c2, sgn, &overflow);
else
real_c1 = wi::add (c3, c2, sgn, &overflow);
code = cmp_code;
if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
{
/* Check if c1 equals to real_c1. Boundary condition is handled