blob: a57ad0739fb02d63e65ff9fedf65fa9c4004f4fe [file] [log] [blame]
/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
@@ Perhaps the routines could also be used for bc/dc, and made a lib.
@@ The routines that translate from the ap rep should
@@ warn if precision et. al. is lost.
@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
/* The entry points in this file are fold, size_int_wide and size_binop.
fold takes a tree as argument and returns a simplified tree.
size_binop takes a tree code for an arithmetic operation
and two operands that are trees, and produces a tree for the
result, assuming the type comes from `sizetype'.
size_int takes an integer value, and creates a tree constant
with type from `sizetype'.
Note: Since the folders get called on non-gimple code as well as
gimple code, we need to handle GIMPLE tuples as well as their
corresponding tree equivalents. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "predict.h"
#include "memmodel.h"
#include "tm_p.h"
#include "tree-ssa-operands.h"
#include "optabs-query.h"
#include "cgraph.h"
#include "diagnostic-core.h"
#include "flags.h"
#include "alias.h"
#include "fold-const.h"
#include "fold-const-call.h"
#include "stor-layout.h"
#include "calls.h"
#include "tree-iterator.h"
#include "expr.h"
#include "intl.h"
#include "langhooks.h"
#include "tree-eh.h"
#include "gimplify.h"
#include "tree-dfa.h"
#include "builtins.h"
#include "generic-match.h"
#include "gimple-fold.h"
#include "tree-into-ssa.h"
#include "md5.h"
#include "case-cfn-macros.h"
#include "stringpool.h"
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "selftest.h"
#include "stringpool.h"
#include "attribs.h"
#include "tree-vector-builder.h"
#include "vec-perm-indices.h"
#include "asan.h"
#include "gimple-range.h"
/* Nonzero if we are folding constants inside an initializer or a C++
manifestly-constant-evaluated context; zero otherwise.
Should be used when folding in initializer enables additional
optimizations. */
int folding_initializer = 0;
/* Nonzero if we are folding C++ manifestly-constant-evaluated context; zero
otherwise.
Should be used when certain constructs shouldn't be optimized
during folding in that context. */
bool folding_cxx_constexpr = false;
/* The following constants represent a bit based encoding of GCC's
comparison operators. This encoding simplifies transformations
on relational comparison operators, such as AND and OR. */
enum comparison_code {
COMPCODE_FALSE = 0,
COMPCODE_LT = 1,
COMPCODE_EQ = 2,
COMPCODE_LE = 3,
COMPCODE_GT = 4,
COMPCODE_LTGT = 5,
COMPCODE_GE = 6,
COMPCODE_ORD = 7,
COMPCODE_UNORD = 8,
COMPCODE_UNLT = 9,
COMPCODE_UNEQ = 10,
COMPCODE_UNLE = 11,
COMPCODE_UNGT = 12,
COMPCODE_NE = 13,
COMPCODE_UNGE = 14,
COMPCODE_TRUE = 15
};
static bool negate_expr_p (tree);
static tree negate_expr (tree);
static tree associate_trees (location_t, tree, tree, enum tree_code, tree);
static enum comparison_code comparison_to_compcode (enum tree_code);
static enum tree_code compcode_to_comparison (enum comparison_code);
static bool twoval_comparison_p (tree, tree *, tree *);
static tree eval_subst (location_t, tree, tree, tree, tree, tree);
static tree optimize_bit_field_compare (location_t, enum tree_code,
tree, tree, tree);
static bool simple_operand_p (const_tree);
static bool simple_operand_p_2 (tree);
static tree range_binop (enum tree_code, tree, tree, int, tree, int);
static tree range_predecessor (tree);
static tree range_successor (tree);
static tree fold_range_test (location_t, enum tree_code, tree, tree, tree);
static tree fold_cond_expr_with_comparison (location_t, tree, enum tree_code,
tree, tree, tree, tree);
static tree unextend (tree, int, int, tree);
static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
static tree fold_binary_op_with_conditional_arg (location_t,
enum tree_code, tree,
tree, tree,
tree, tree, int);
static tree fold_negate_const (tree, tree);
static tree fold_not_const (const_tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
static tree fold_convert_const (enum tree_code, tree, tree);
static tree fold_view_convert_expr (tree, tree);
static tree fold_negate_expr (location_t, tree);
/* Return EXPR_LOCATION of T if it is not UNKNOWN_LOCATION.
Otherwise, return LOC. */
static location_t
expr_location_or (tree t, location_t loc)
{
location_t tloc = EXPR_LOCATION (t);
return tloc == UNKNOWN_LOCATION ? loc : tloc;
}
/* Similar to protected_set_expr_location, but never modify x in place,
if location can and needs to be set, unshare it. */
static inline tree
protected_set_expr_location_unshare (tree x, location_t loc)
{
if (CAN_HAVE_LOCATION_P (x)
&& EXPR_LOCATION (x) != loc
&& !(TREE_CODE (x) == SAVE_EXPR
|| TREE_CODE (x) == TARGET_EXPR
|| TREE_CODE (x) == BIND_EXPR))
{
x = copy_node (x);
SET_EXPR_LOCATION (x, loc);
}
return x;
}
/* If ARG2 divides ARG1 with zero remainder, carries out the exact
division and returns the quotient. Otherwise returns
NULL_TREE. */
tree
div_if_zero_remainder (const_tree arg1, const_tree arg2)
{
widest_int quo;
if (wi::multiple_of_p (wi::to_widest (arg1), wi::to_widest (arg2),
SIGNED, &quo))
return wide_int_to_tree (TREE_TYPE (arg1), quo);
return NULL_TREE;
}
/* This is nonzero if we should defer warnings about undefined
overflow. This facility exists because these warnings are a
special case. The code to estimate loop iterations does not want
to issue any warnings, since it works with expressions which do not
occur in user code. Various bits of cleanup code call fold(), but
only use the result if it has certain characteristics (e.g., is a
constant); that code only wants to issue a warning if the result is
used. */
static int fold_deferring_overflow_warnings;
/* If a warning about undefined overflow is deferred, this is the
warning. Note that this may cause us to turn two warnings into
one, but that is fine since it is sufficient to only give one
warning per expression. */
static const char* fold_deferred_overflow_warning;
/* If a warning about undefined overflow is deferred, this is the
level at which the warning should be emitted. */
static enum warn_strict_overflow_code fold_deferred_overflow_code;
/* Start deferring overflow warnings. We could use a stack here to
permit nested calls, but at present it is not necessary. */
void
fold_defer_overflow_warnings (void)
{
++fold_deferring_overflow_warnings;
}
/* Stop deferring overflow warnings. If there is a pending warning,
and ISSUE is true, then issue the warning if appropriate. STMT is
the statement with which the warning should be associated (used for
location information); STMT may be NULL. CODE is the level of the
warning--a warn_strict_overflow_code value. This function will use
the smaller of CODE and the deferred code when deciding whether to
issue the warning. CODE may be zero to mean to always use the
deferred code. */
void
fold_undefer_overflow_warnings (bool issue, const gimple *stmt, int code)
{
const char *warnmsg;
location_t locus;
gcc_assert (fold_deferring_overflow_warnings > 0);
--fold_deferring_overflow_warnings;
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning != NULL
&& code != 0
&& code < (int) fold_deferred_overflow_code)
fold_deferred_overflow_code = (enum warn_strict_overflow_code) code;
return;
}
warnmsg = fold_deferred_overflow_warning;
fold_deferred_overflow_warning = NULL;
if (!issue || warnmsg == NULL)
return;
if (warning_suppressed_p (stmt, OPT_Wstrict_overflow))
return;
/* Use the smallest code level when deciding to issue the
warning. */
if (code == 0 || code > (int) fold_deferred_overflow_code)
code = fold_deferred_overflow_code;
if (!issue_strict_overflow_warning (code))
return;
if (stmt == NULL)
locus = input_location;
else
locus = gimple_location (stmt);
warning_at (locus, OPT_Wstrict_overflow, "%s", warnmsg);
}
/* Stop deferring overflow warnings, ignoring any deferred
warnings. */
void
fold_undefer_and_ignore_overflow_warnings (void)
{
fold_undefer_overflow_warnings (false, NULL, 0);
}
/* Whether we are deferring overflow warnings. */
bool
fold_deferring_overflow_warnings_p (void)
{
return fold_deferring_overflow_warnings > 0;
}
/* This is called when we fold something based on the fact that signed
overflow is undefined. */
void
fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
{
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning == NULL
|| wc < fold_deferred_overflow_code)
{
fold_deferred_overflow_warning = gmsgid;
fold_deferred_overflow_code = wc;
}
}
else if (issue_strict_overflow_warning (wc))
warning (OPT_Wstrict_overflow, gmsgid);
}
/* Return true if the built-in mathematical function specified by CODE
is odd, i.e. -f(x) == f(-x). */
bool
negate_mathfn_p (combined_fn fn)
{
switch (fn)
{
CASE_CFN_ASIN:
CASE_CFN_ASINH:
CASE_CFN_ATAN:
CASE_CFN_ATANH:
CASE_CFN_CASIN:
CASE_CFN_CASINH:
CASE_CFN_CATAN:
CASE_CFN_CATANH:
CASE_CFN_CBRT:
CASE_CFN_CPROJ:
CASE_CFN_CSIN:
CASE_CFN_CSINH:
CASE_CFN_CTAN:
CASE_CFN_CTANH:
CASE_CFN_ERF:
CASE_CFN_LLROUND:
CASE_CFN_LROUND:
CASE_CFN_ROUND:
CASE_CFN_ROUNDEVEN:
CASE_CFN_ROUNDEVEN_FN:
CASE_CFN_SIN:
CASE_CFN_SINH:
CASE_CFN_TAN:
CASE_CFN_TANH:
CASE_CFN_TRUNC:
return true;
CASE_CFN_LLRINT:
CASE_CFN_LRINT:
CASE_CFN_NEARBYINT:
CASE_CFN_RINT:
return !flag_rounding_math;
default:
break;
}
return false;
}
/* Check whether we may negate an integer constant T without causing
overflow. */
bool
may_negate_without_overflow_p (const_tree t)
{
tree type;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
type = TREE_TYPE (t);
if (TYPE_UNSIGNED (type))
return false;
return !wi::only_sign_bit_p (wi::to_wide (t));
}
/* Determine whether an expression T can be cheaply negated using
the function negate_expr without introducing undefined overflow. */
static bool
negate_expr_p (tree t)
{
tree type;
if (t == 0)
return false;
type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
switch (TREE_CODE (t))
{
case INTEGER_CST:
if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type))
return true;
/* Check that -CST will not overflow type. */
return may_negate_without_overflow_p (t);
case BIT_NOT_EXPR:
return (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type));
case FIXED_CST:
return true;
case NEGATE_EXPR:
return !TYPE_OVERFLOW_SANITIZED (type);
case REAL_CST:
/* We want to canonicalize to positive real constants. Pretend
that only negative ones can be easily negated. */
return REAL_VALUE_NEGATIVE (TREE_REAL_CST (t));
case COMPLEX_CST:
return negate_expr_p (TREE_REALPART (t))
&& negate_expr_p (TREE_IMAGPART (t));
case VECTOR_CST:
{
if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))
return true;
/* Steps don't prevent negation. */
unsigned int count = vector_cst_encoded_nelts (t);
for (unsigned int i = 0; i < count; ++i)
if (!negate_expr_p (VECTOR_CST_ENCODED_ELT (t, i)))
return false;
return true;
}
case COMPLEX_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0))
&& negate_expr_p (TREE_OPERAND (t, 1));
case CONJ_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0));
case PLUS_EXPR:
if (HONOR_SIGN_DEPENDENT_ROUNDING (type)
|| HONOR_SIGNED_ZEROS (type)
|| (ANY_INTEGRAL_TYPE_P (type)
&& ! TYPE_OVERFLOW_WRAPS (type)))
return false;
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1)))
return true;
/* -(A + B) -> (-A) - B. */
return negate_expr_p (TREE_OPERAND (t, 0));
case MINUS_EXPR:
/* We can't turn -(A-B) into B-A when we honor signed zeros. */
return !HONOR_SIGN_DEPENDENT_ROUNDING (type)
&& !HONOR_SIGNED_ZEROS (type)
&& (! ANY_INTEGRAL_TYPE_P (type)
|| TYPE_OVERFLOW_WRAPS (type));
case MULT_EXPR:
if (TYPE_UNSIGNED (type))
break;
/* INT_MIN/n * n doesn't overflow while negating one operand it does
if n is a (negative) power of two. */
if (INTEGRAL_TYPE_P (TREE_TYPE (t))
&& ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (t))
&& ! ((TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
&& (wi::popcount
(wi::abs (wi::to_wide (TREE_OPERAND (t, 0))))) != 1)
|| (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& (wi::popcount
(wi::abs (wi::to_wide (TREE_OPERAND (t, 1))))) != 1)))
break;
/* Fall through. */
case RDIV_EXPR:
if (! HONOR_SIGN_DEPENDENT_ROUNDING (t))
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
break;
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
if (TYPE_UNSIGNED (type))
break;
/* In general we can't negate A in A / B, because if A is INT_MIN and
B is not 1 we change the sign of the result. */
if (TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
&& negate_expr_p (TREE_OPERAND (t, 0)))
return true;
/* In general we can't negate B in A / B, because if A is INT_MIN and
B is 1, we may turn this into INT_MIN / -1 which is undefined
and actually traps on some architectures. */
if (! ANY_INTEGRAL_TYPE_P (TREE_TYPE (t))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (t))
|| (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& ! integer_onep (TREE_OPERAND (t, 1))))
return negate_expr_p (TREE_OPERAND (t, 1));
break;
case NOP_EXPR:
/* Negate -((double)float) as (double)(-float). */
if (TREE_CODE (type) == REAL_TYPE)
{
tree tem = strip_float_extensions (t);
if (tem != t)
return negate_expr_p (tem);
}
break;
case CALL_EXPR:
/* Negate -f(x) as f(-x). */
if (negate_mathfn_p (get_call_combined_fn (t)))
return negate_expr_p (CALL_EXPR_ARG (t, 0));
break;
case RSHIFT_EXPR:
/* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
if (wi::to_wide (op1) == element_precision (type) - 1)
return true;
}
break;
default:
break;
}
return false;
}
/* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
simplification is possible.
If negate_expr_p would return true for T, NULL_TREE will never be
returned. */
static tree
fold_negate_expr_1 (location_t loc, tree t)
{
tree type = TREE_TYPE (t);
tree tem;
switch (TREE_CODE (t))
{
/* Convert - (~A) to A + 1. */
case BIT_NOT_EXPR:
if (INTEGRAL_TYPE_P (type))
return fold_build2_loc (loc, PLUS_EXPR, type, TREE_OPERAND (t, 0),
build_one_cst (type));
break;
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
|| (ANY_INTEGRAL_TYPE_P (type)
&& !TYPE_OVERFLOW_TRAPS (type)
&& TYPE_OVERFLOW_WRAPS (type))
|| (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0)
return tem;
break;
case POLY_INT_CST:
case REAL_CST:
case FIXED_CST:
tem = fold_negate_const (t, type);
return tem;
case COMPLEX_CST:
{
tree rpart = fold_negate_expr (loc, TREE_REALPART (t));
tree ipart = fold_negate_expr (loc, TREE_IMAGPART (t));
if (rpart && ipart)
return build_complex (type, rpart, ipart);
}
break;
case VECTOR_CST:
{
tree_vector_builder elts;
elts.new_unary_operation (type, t, true);
unsigned int count = elts.encoded_nelts ();
for (unsigned int i = 0; i < count; ++i)
{
tree elt = fold_negate_expr (loc, VECTOR_CST_ELT (t, i));
if (elt == NULL_TREE)
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
case COMPLEX_EXPR:
if (negate_expr_p (t))
return fold_build2_loc (loc, COMPLEX_EXPR, type,
fold_negate_expr (loc, TREE_OPERAND (t, 0)),
fold_negate_expr (loc, TREE_OPERAND (t, 1)));
break;
case CONJ_EXPR:
if (negate_expr_p (t))
return fold_build1_loc (loc, CONJ_EXPR, type,
fold_negate_expr (loc, TREE_OPERAND (t, 0)));
break;
case NEGATE_EXPR:
if (!TYPE_OVERFLOW_SANITIZED (type))
return TREE_OPERAND (t, 0);
break;
case PLUS_EXPR:
if (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
&& !HONOR_SIGNED_ZEROS (type))
{
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1)))
{
tem = negate_expr (TREE_OPERAND (t, 1));
return fold_build2_loc (loc, MINUS_EXPR, type,
tem, TREE_OPERAND (t, 0));
}
/* -(A + B) -> (-A) - B. */
if (negate_expr_p (TREE_OPERAND (t, 0)))
{
tem = negate_expr (TREE_OPERAND (t, 0));
return fold_build2_loc (loc, MINUS_EXPR, type,
tem, TREE_OPERAND (t, 1));
}
}
break;
case MINUS_EXPR:
/* - (A - B) -> B - A */
if (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
&& !HONOR_SIGNED_ZEROS (type))
return fold_build2_loc (loc, MINUS_EXPR, type,
TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
break;
case MULT_EXPR:
if (TYPE_UNSIGNED (type))
break;
/* Fall through. */
case RDIV_EXPR:
if (! HONOR_SIGN_DEPENDENT_ROUNDING (type))
{
tem = TREE_OPERAND (t, 1);
if (negate_expr_p (tem))
return fold_build2_loc (loc, TREE_CODE (t), type,
TREE_OPERAND (t, 0), negate_expr (tem));
tem = TREE_OPERAND (t, 0);
if (negate_expr_p (tem))
return fold_build2_loc (loc, TREE_CODE (t), type,
negate_expr (tem), TREE_OPERAND (t, 1));
}
break;
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
if (TYPE_UNSIGNED (type))
break;
/* In general we can't negate A in A / B, because if A is INT_MIN and
B is not 1 we change the sign of the result. */
if (TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
&& negate_expr_p (TREE_OPERAND (t, 0)))
return fold_build2_loc (loc, TREE_CODE (t), type,
negate_expr (TREE_OPERAND (t, 0)),
TREE_OPERAND (t, 1));
/* In general we can't negate B in A / B, because if A is INT_MIN and
B is 1, we may turn this into INT_MIN / -1 which is undefined
and actually traps on some architectures. */
if ((! ANY_INTEGRAL_TYPE_P (TREE_TYPE (t))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (t))
|| (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
&& ! integer_onep (TREE_OPERAND (t, 1))))
&& negate_expr_p (TREE_OPERAND (t, 1)))
return fold_build2_loc (loc, TREE_CODE (t), type,
TREE_OPERAND (t, 0),
negate_expr (TREE_OPERAND (t, 1)));
break;
case NOP_EXPR:
/* Convert -((double)float) into (double)(-float). */
if (TREE_CODE (type) == REAL_TYPE)
{
tem = strip_float_extensions (t);
if (tem != t && negate_expr_p (tem))
return fold_convert_loc (loc, type, negate_expr (tem));
}
break;
case CALL_EXPR:
/* Negate -f(x) as f(-x). */
if (negate_mathfn_p (get_call_combined_fn (t))
&& negate_expr_p (CALL_EXPR_ARG (t, 0)))
{
tree fndecl, arg;
fndecl = get_callee_fndecl (t);
arg = negate_expr (CALL_EXPR_ARG (t, 0));
return build_call_expr_loc (loc, fndecl, 1, arg);
}
break;
case RSHIFT_EXPR:
/* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
if (wi::to_wide (op1) == element_precision (type) - 1)
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
: unsigned_type_for (type);
tree temp = fold_convert_loc (loc, ntype, TREE_OPERAND (t, 0));
temp = fold_build2_loc (loc, RSHIFT_EXPR, ntype, temp, op1);
return fold_convert_loc (loc, type, temp);
}
}
break;
default:
break;
}
return NULL_TREE;
}
/* A wrapper for fold_negate_expr_1. */
static tree
fold_negate_expr (location_t loc, tree t)
{
tree type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
tree tem = fold_negate_expr_1 (loc, t);
if (tem == NULL_TREE)
return NULL_TREE;
return fold_convert_loc (loc, type, tem);
}
/* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T cannot be
negated in a simpler way. Also allow for T to be NULL_TREE, in which case
return NULL_TREE. */
static tree
negate_expr (tree t)
{
tree type, tem;
location_t loc;
if (t == NULL_TREE)
return NULL_TREE;
loc = EXPR_LOCATION (t);
type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
tem = fold_negate_expr (loc, t);
if (!tem)
tem = build1_loc (loc, NEGATE_EXPR, TREE_TYPE (t), t);
return fold_convert_loc (loc, type, tem);
}
/* Split a tree IN into a constant, literal and variable parts that could be
combined with CODE to make IN. "constant" means an expression with
TREE_CONSTANT but that isn't an actual constant. CODE must be a
commutative arithmetic operation. Store the constant part into *CONP,
the literal in *LITP and return the variable part. If a part isn't
present, set it to null. If the tree does not decompose in this way,
return the entire tree as the variable part and the other parts as null.
If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
case, we negate an operand that was subtracted. Except if it is a
literal for which we use *MINUS_LITP instead.
If NEGATE_P is true, we are negating all of IN, again except a literal
for which we use *MINUS_LITP instead. If a variable part is of pointer
type, it is negated after converting to TYPE. This prevents us from
generating illegal MINUS pointer expression. LOC is the location of
the converted variable part.
If IN is itself a literal or constant, return it as appropriate.
Note that we do not guarantee that any of the three values will be the
same type as IN, but they will have the same signedness and mode. */
static tree
split_tree (tree in, tree type, enum tree_code code,
tree *minus_varp, tree *conp, tree *minus_conp,
tree *litp, tree *minus_litp, int negate_p)
{
tree var = 0;
*minus_varp = 0;
*conp = 0;
*minus_conp = 0;
*litp = 0;
*minus_litp = 0;
/* Strip any conversions that don't change the machine mode or signedness. */
STRIP_SIGN_NOPS (in);
if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
|| TREE_CODE (in) == FIXED_CST)
*litp = in;
else if (TREE_CODE (in) == code
|| ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
&& ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
/* We can associate addition and subtraction together (even
though the C standard doesn't say so) for integers because
the value is not affected. For reals, the value might be
affected, so we can't. */
&& ((code == PLUS_EXPR && TREE_CODE (in) == POINTER_PLUS_EXPR)
|| (code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
|| (code == MINUS_EXPR
&& (TREE_CODE (in) == PLUS_EXPR
|| TREE_CODE (in) == POINTER_PLUS_EXPR)))))
{
tree op0 = TREE_OPERAND (in, 0);
tree op1 = TREE_OPERAND (in, 1);
int neg1_p = TREE_CODE (in) == MINUS_EXPR;
int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
/* First see if either of the operands is a literal, then a constant. */
if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
|| TREE_CODE (op0) == FIXED_CST)
*litp = op0, op0 = 0;
else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
|| TREE_CODE (op1) == FIXED_CST)
*litp = op1, neg_litp_p = neg1_p, op1 = 0;
if (op0 != 0 && TREE_CONSTANT (op0))
*conp = op0, op0 = 0;
else if (op1 != 0 && TREE_CONSTANT (op1))
*conp = op1, neg_conp_p = neg1_p, op1 = 0;
/* If we haven't dealt with either operand, this is not a case we can
decompose. Otherwise, VAR is either of the ones remaining, if any. */
if (op0 != 0 && op1 != 0)
var = in;
else if (op0 != 0)
var = op0;
else
var = op1, neg_var_p = neg1_p;
/* Now do any needed negations. */
if (neg_litp_p)
*minus_litp = *litp, *litp = 0;
if (neg_conp_p && *conp)
*minus_conp = *conp, *conp = 0;
if (neg_var_p && var)
*minus_varp = var, var = 0;
}
else if (TREE_CONSTANT (in))
*conp = in;
else if (TREE_CODE (in) == BIT_NOT_EXPR
&& code == PLUS_EXPR)
{
/* -1 - X is folded to ~X, undo that here. Do _not_ do this
when IN is constant. */
*litp = build_minus_one_cst (type);
*minus_varp = TREE_OPERAND (in, 0);
}
else
var = in;
if (negate_p)
{
if (*litp)
*minus_litp = *litp, *litp = 0;
else if (*minus_litp)
*litp = *minus_litp, *minus_litp = 0;
if (*conp)
*minus_conp = *conp, *conp = 0;
else if (*minus_conp)
*conp = *minus_conp, *minus_conp = 0;
if (var)
*minus_varp = var, var = 0;
else if (*minus_varp)
var = *minus_varp, *minus_varp = 0;
}
if (*litp
&& TREE_OVERFLOW_P (*litp))
*litp = drop_tree_overflow (*litp);
if (*minus_litp
&& TREE_OVERFLOW_P (*minus_litp))
*minus_litp = drop_tree_overflow (*minus_litp);
return var;
}
/* Re-associate trees split by the above function. T1 and T2 are
either expressions to associate or null. Return the new
expression, if any. LOC is the location of the new expression. If
we build an operation, do it in TYPE and with CODE. */
static tree
associate_trees (location_t loc, tree t1, tree t2, enum tree_code code, tree type)
{
if (t1 == 0)
{
gcc_assert (t2 == 0 || code != MINUS_EXPR);
return t2;
}
else if (t2 == 0)
return t1;
/* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
try to fold this since we will have infinite recursion. But do
deal with any NEGATE_EXPRs. */
if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
|| TREE_CODE (t1) == PLUS_EXPR || TREE_CODE (t2) == PLUS_EXPR
|| TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
{
if (code == PLUS_EXPR)
{
if (TREE_CODE (t1) == NEGATE_EXPR)
return build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type, t2),
fold_convert_loc (loc, type,
TREE_OPERAND (t1, 0)));
else if (TREE_CODE (t2) == NEGATE_EXPR)
return build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type, t1),
fold_convert_loc (loc, type,
TREE_OPERAND (t2, 0)));
else if (integer_zerop (t2))
return fold_convert_loc (loc, type, t1);
}
else if (code == MINUS_EXPR)
{
if (integer_zerop (t2))
return fold_convert_loc (loc, type, t1);
}
return build2_loc (loc, code, type, fold_convert_loc (loc, type, t1),
fold_convert_loc (loc, type, t2));
}
return fold_build2_loc (loc, code, type, fold_convert_loc (loc, type, t1),
fold_convert_loc (loc, type, t2));
}
/* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
for use in int_const_binop, size_binop and size_diffop. */
static bool
int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
{
if (!INTEGRAL_TYPE_P (type1) && !POINTER_TYPE_P (type1))
return false;
if (!INTEGRAL_TYPE_P (type2) && !POINTER_TYPE_P (type2))
return false;
switch (code)
{
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
return true;
default:
break;
}
return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
&& TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
&& TYPE_MODE (type1) == TYPE_MODE (type2);
}
/* Combine two wide ints ARG1 and ARG2 under operation CODE to produce
a new constant in RES. Return FALSE if we don't know how to
evaluate CODE at compile-time. */
bool
wide_int_binop (wide_int &res,
enum tree_code code, const wide_int &arg1, const wide_int &arg2,
signop sign, wi::overflow_type *overflow)
{
wide_int tmp;
*overflow = wi::OVF_NONE;
switch (code)
{
case BIT_IOR_EXPR:
res = wi::bit_or (arg1, arg2);
break;
case BIT_XOR_EXPR:
res = wi::bit_xor (arg1, arg2);
break;
case BIT_AND_EXPR:
res = wi::bit_and (arg1, arg2);
break;
case LSHIFT_EXPR:
if (wi::neg_p (arg2))
return false;
res = wi::lshift (arg1, arg2);
break;
case RSHIFT_EXPR:
if (wi::neg_p (arg2))
return false;
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
res = wi::rshift (arg1, arg2, sign);
break;
case RROTATE_EXPR:
case LROTATE_EXPR:
if (wi::neg_p (arg2))
{
tmp = -arg2;
if (code == RROTATE_EXPR)
code = LROTATE_EXPR;
else
code = RROTATE_EXPR;
}
else
tmp = arg2;
if (code == RROTATE_EXPR)
res = wi::rrotate (arg1, tmp);
else
res = wi::lrotate (arg1, tmp);
break;
case PLUS_EXPR:
res = wi::add (arg1, arg2, sign, overflow);
break;
case MINUS_EXPR:
res = wi::sub (arg1, arg2, sign, overflow);
break;
case MULT_EXPR:
res = wi::mul (arg1, arg2, sign, overflow);
break;
case MULT_HIGHPART_EXPR:
res = wi::mul_high (arg1, arg2, sign);
break;
case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR:
if (arg2 == 0)
return false;
res = wi::div_trunc (arg1, arg2, sign, overflow);
break;
case FLOOR_DIV_EXPR:
if (arg2 == 0)
return false;
res = wi::div_floor (arg1, arg2, sign, overflow);
break;
case CEIL_DIV_EXPR:
if (arg2 == 0)
return false;
res = wi::div_ceil (arg1, arg2, sign, overflow);
break;
case ROUND_DIV_EXPR:
if (arg2 == 0)
return false;
res = wi::div_round (arg1, arg2, sign, overflow);
break;
case TRUNC_MOD_EXPR:
if (arg2 == 0)
return false;
res = wi::mod_trunc (arg1, arg2, sign, overflow);
break;
case FLOOR_MOD_EXPR:
if (arg2 == 0)
return false;
res = wi::mod_floor (arg1, arg2, sign, overflow);
break;
case CEIL_MOD_EXPR:
if (arg2 == 0)
return false;
res = wi::mod_ceil (arg1, arg2, sign, overflow);
break;
case ROUND_MOD_EXPR:
if (arg2 == 0)
return false;
res = wi::mod_round (arg1, arg2, sign, overflow);
break;
case MIN_EXPR:
res = wi::min (arg1, arg2, sign);
break;
case MAX_EXPR:
res = wi::max (arg1, arg2, sign);
break;
default:
return false;
}
return true;
}
/* Combine two poly int's ARG1 and ARG2 under operation CODE to
produce a new constant in RES. Return FALSE if we don't know how
to evaluate CODE at compile-time. */
static bool
poly_int_binop (poly_wide_int &res, enum tree_code code,
const_tree arg1, const_tree arg2,
signop sign, wi::overflow_type *overflow)
{
gcc_assert (NUM_POLY_INT_COEFFS != 1);
gcc_assert (poly_int_tree_p (arg1) && poly_int_tree_p (arg2));
switch (code)
{
case PLUS_EXPR:
res = wi::add (wi::to_poly_wide (arg1),
wi::to_poly_wide (arg2), sign, overflow);
break;
case MINUS_EXPR:
res = wi::sub (wi::to_poly_wide (arg1),
wi::to_poly_wide (arg2), sign, overflow);
break;
case MULT_EXPR:
if (TREE_CODE (arg2) == INTEGER_CST)
res = wi::mul (wi::to_poly_wide (arg1),
wi::to_wide (arg2), sign, overflow);
else if (TREE_CODE (arg1) == INTEGER_CST)
res = wi::mul (wi::to_poly_wide (arg2),
wi::to_wide (arg1), sign, overflow);
else
return NULL_TREE;
break;
case LSHIFT_EXPR:
if (TREE_CODE (arg2) == INTEGER_CST)
res = wi::to_poly_wide (arg1) << wi::to_wide (arg2);
else
return false;
break;
case BIT_IOR_EXPR:
if (TREE_CODE (arg2) != INTEGER_CST
|| !can_ior_p (wi::to_poly_wide (arg1), wi::to_wide (arg2),
&res))
return false;
break;
default:
return false;
}
return true;
}
/* Combine two integer constants ARG1 and ARG2 under operation CODE to
produce a new constant. Return NULL_TREE if we don't know how to
evaluate CODE at compile-time. */
tree
int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2,
int overflowable)
{
poly_wide_int poly_res;
tree type = TREE_TYPE (arg1);
signop sign = TYPE_SIGN (type);
wi::overflow_type overflow = wi::OVF_NONE;
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST)
{
wide_int warg1 = wi::to_wide (arg1), res;
wide_int warg2 = wi::to_wide (arg2, TYPE_PRECISION (type));
if (!wide_int_binop (res, code, warg1, warg2, sign, &overflow))
return NULL_TREE;
poly_res = res;
}
else if (!poly_int_tree_p (arg1)
|| !poly_int_tree_p (arg2)
|| !poly_int_binop (poly_res, code, arg1, arg2, sign, &overflow))
return NULL_TREE;
return force_fit_type (type, poly_res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)));
}
/* Return true if binary operation OP distributes over addition in operand
OPNO, with the other operand being held constant. OPNO counts from 1. */
static bool
distributes_over_addition_p (tree_code op, int opno)
{
switch (op)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
return true;
case LSHIFT_EXPR:
return opno == 1;
default:
return false;
}
}
/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
constant. We assume ARG1 and ARG2 have the same data type, or at least
are the same kind of constant and the same machine mode. Return zero if
combining the constants is not allowed in the current operating mode. */
static tree
const_binop (enum tree_code code, tree arg1, tree arg2)
{
/* Sanity check for the recursive cases. */
if (!arg1 || !arg2)
return NULL_TREE;
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
{
if (code == POINTER_PLUS_EXPR)
return int_const_binop (PLUS_EXPR,
arg1, fold_convert (TREE_TYPE (arg1), arg2));
return int_const_binop (code, arg1, arg2);
}
if (TREE_CODE (arg1) == REAL_CST && TREE_CODE (arg2) == REAL_CST)
{
machine_mode mode;
REAL_VALUE_TYPE d1;
REAL_VALUE_TYPE d2;
REAL_VALUE_TYPE value;
REAL_VALUE_TYPE result;
bool inexact;
tree t, type;
/* The following codes are handled by real_arithmetic. */
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case RDIV_EXPR:
case MIN_EXPR:
case MAX_EXPR:
break;
default:
return NULL_TREE;
}
d1 = TREE_REAL_CST (arg1);
d2 = TREE_REAL_CST (arg2);
type = TREE_TYPE (arg1);
mode = TYPE_MODE (type);
/* Don't perform operation if we honor signaling NaNs and
either operand is a signaling NaN. */
if (HONOR_SNANS (mode)
&& (REAL_VALUE_ISSIGNALING_NAN (d1)
|| REAL_VALUE_ISSIGNALING_NAN (d2)))
return NULL_TREE;
/* Don't perform operation if it would raise a division
by zero exception. */
if (code == RDIV_EXPR
&& real_equal (&d2, &dconst0)
&& (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
return NULL_TREE;
/* If either operand is a NaN, just return it. Otherwise, set up
for floating-point trap; we return an overflow. */
if (REAL_VALUE_ISNAN (d1))
{
/* Make resulting NaN value to be qNaN when flag_signaling_nans
is off. */
d1.signalling = 0;
t = build_real (type, d1);
return t;
}
else if (REAL_VALUE_ISNAN (d2))
{
/* Make resulting NaN value to be qNaN when flag_signaling_nans
is off. */
d2.signalling = 0;
t = build_real (type, d2);
return t;
}
inexact = real_arithmetic (&value, code, &d1, &d2);
real_convert (&result, mode, &value);
/* Don't constant fold this floating point operation if
both operands are not NaN but the result is NaN, and
flag_trapping_math. Such operations should raise an
invalid operation exception. */
if (flag_trapping_math
&& MODE_HAS_NANS (mode)
&& REAL_VALUE_ISNAN (result)
&& !REAL_VALUE_ISNAN (d1)
&& !REAL_VALUE_ISNAN (d2))
return NULL_TREE;
/* Don't constant fold this floating point operation if
the result has overflowed and flag_trapping_math. */
if (flag_trapping_math
&& MODE_HAS_INFINITIES (mode)
&& REAL_VALUE_ISINF (result)
&& !REAL_VALUE_ISINF (d1)
&& !REAL_VALUE_ISINF (d2))
return NULL_TREE;
/* Don't constant fold this floating point operation if the
result may dependent upon the run-time rounding mode and
flag_rounding_math is set, or if GCC's software emulation
is unable to accurately represent the result. */
if ((flag_rounding_math
|| (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_TREE;
t = build_real (type, result);
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
return t;
}
if (TREE_CODE (arg1) == FIXED_CST)
{
FIXED_VALUE_TYPE f1;
FIXED_VALUE_TYPE f2;
FIXED_VALUE_TYPE result;
tree t, type;
int sat_p;
bool overflow_p;
/* The following codes are handled by fixed_arithmetic. */
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
if (TREE_CODE (arg2) != FIXED_CST)
return NULL_TREE;
f2 = TREE_FIXED_CST (arg2);
break;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
{
if (TREE_CODE (arg2) != INTEGER_CST)
return NULL_TREE;
wi::tree_to_wide_ref w2 = wi::to_wide (arg2);
f2.data.high = w2.elt (1);
f2.data.low = w2.ulow ();
f2.mode = SImode;
}
break;
default:
return NULL_TREE;
}
f1 = TREE_FIXED_CST (arg1);
type = TREE_TYPE (arg1);
sat_p = TYPE_SATURATING (type);
overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
t = build_fixed (type, result);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
TREE_OVERFLOW (t) = 1;
return t;
}
if (TREE_CODE (arg1) == COMPLEX_CST && TREE_CODE (arg2) == COMPLEX_CST)
{
tree type = TREE_TYPE (arg1);
tree r1 = TREE_REALPART (arg1);
tree i1 = TREE_IMAGPART (arg1);
tree r2 = TREE_REALPART (arg2);
tree i2 = TREE_IMAGPART (arg2);
tree real, imag;
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
real = const_binop (code, r1, r2);
imag = const_binop (code, i1, i2);
break;
case MULT_EXPR:
if (COMPLEX_FLOAT_TYPE_P (type))
return do_mpc_arg2 (arg1, arg2, type,
/* do_nonfinite= */ folding_initializer,
mpc_mul);
real = const_binop (MINUS_EXPR,
const_binop (MULT_EXPR, r1, r2),
const_binop (MULT_EXPR, i1, i2));
imag = const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r1, i2),
const_binop (MULT_EXPR, i1, r2));
break;
case RDIV_EXPR:
if (COMPLEX_FLOAT_TYPE_P (type))
return do_mpc_arg2 (arg1, arg2, type,
/* do_nonfinite= */ folding_initializer,
mpc_div);
/* Fallthru. */
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
if (flag_complex_method == 0)
{
/* Keep this algorithm in sync with
tree-complex.cc:expand_complex_div_straight().
Expand complex division to scalars, straightforward algorithm.
a / b = ((ar*br + ai*bi)/t) + i((ai*br - ar*bi)/t)
t = br*br + bi*bi
*/
tree magsquared
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r2, r2),
const_binop (MULT_EXPR, i2, i2));
tree t1
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r1, r2),
const_binop (MULT_EXPR, i1, i2));
tree t2
= const_binop (MINUS_EXPR,
const_binop (MULT_EXPR, i1, r2),
const_binop (MULT_EXPR, r1, i2));
real = const_binop (code, t1, magsquared);
imag = const_binop (code, t2, magsquared);
}
else
{
/* Keep this algorithm in sync with
tree-complex.cc:expand_complex_div_wide().
Expand complex division to scalars, modified algorithm to minimize
overflow with wide input ranges. */
tree compare = fold_build2 (LT_EXPR, boolean_type_node,
fold_abs_const (r2, TREE_TYPE (type)),
fold_abs_const (i2, TREE_TYPE (type)));
if (integer_nonzerop (compare))
{
/* In the TRUE branch, we compute
ratio = br/bi;
div = (br * ratio) + bi;
tr = (ar * ratio) + ai;
ti = (ai * ratio) - ar;
tr = tr / div;
ti = ti / div; */
tree ratio = const_binop (code, r2, i2);
tree div = const_binop (PLUS_EXPR, i2,
const_binop (MULT_EXPR, r2, ratio));
real = const_binop (MULT_EXPR, r1, ratio);
real = const_binop (PLUS_EXPR, real, i1);
real = const_binop (code, real, div);
imag = const_binop (MULT_EXPR, i1, ratio);
imag = const_binop (MINUS_EXPR, imag, r1);
imag = const_binop (code, imag, div);
}
else
{
/* In the FALSE branch, we compute
ratio = d/c;
divisor = (d * ratio) + c;
tr = (b * ratio) + a;
ti = b - (a * ratio);
tr = tr / div;
ti = ti / div; */
tree ratio = const_binop (code, i2, r2);
tree div = const_binop (PLUS_EXPR, r2,
const_binop (MULT_EXPR, i2, ratio));
real = const_binop (MULT_EXPR, i1, ratio);
real = const_binop (PLUS_EXPR, real, r1);
real = const_binop (code, real, div);
imag = const_binop (MULT_EXPR, r1, ratio);
imag = const_binop (MINUS_EXPR, i1, imag);
imag = const_binop (code, imag, div);
}
}
break;
default:
return NULL_TREE;
}
if (real && imag)
return build_complex (type, real, imag);
}
if (TREE_CODE (arg1) == VECTOR_CST
&& TREE_CODE (arg2) == VECTOR_CST
&& known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)),
TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg2))))
{
tree type = TREE_TYPE (arg1);
bool step_ok_p;
if (VECTOR_CST_STEPPED_P (arg1)
&& VECTOR_CST_STEPPED_P (arg2))
/* We can operate directly on the encoding if:
a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
implies
(a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
Addition and subtraction are the supported operators
for which this is true. */
step_ok_p = (code == PLUS_EXPR || code == MINUS_EXPR);
else if (VECTOR_CST_STEPPED_P (arg1))
/* We can operate directly on stepped encodings if:
a3 - a2 == a2 - a1
implies:
(a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
which is true if (x -> x op c) distributes over addition. */
step_ok_p = distributes_over_addition_p (code, 1);
else
/* Similarly in reverse. */
step_ok_p = distributes_over_addition_p (code, 2);
tree_vector_builder elts;
if (!elts.new_binary_operation (type, arg1, arg2, step_ok_p))
return NULL_TREE;
unsigned int count = elts.encoded_nelts ();
for (unsigned int i = 0; i < count; ++i)
{
tree elem1 = VECTOR_CST_ELT (arg1, i);
tree elem2 = VECTOR_CST_ELT (arg2, i);
tree elt = const_binop (code, elem1, elem2);
/* It is possible that const_binop cannot handle the given
code and return NULL_TREE */
if (elt == NULL_TREE)
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
/* Shifts allow a scalar offset for a vector. */
if (TREE_CODE (arg1) == VECTOR_CST
&& TREE_CODE (arg2) == INTEGER_CST)
{
tree type = TREE_TYPE (arg1);
bool step_ok_p = distributes_over_addition_p (code, 1);
tree_vector_builder elts;
if (!elts.new_unary_operation (type, arg1, step_ok_p))
return NULL_TREE;
unsigned int count = elts.encoded_nelts ();
for (unsigned int i = 0; i < count; ++i)
{
tree elem1 = VECTOR_CST_ELT (arg1, i);
tree elt = const_binop (code, elem1, arg2);
/* It is possible that const_binop cannot handle the given
code and return NULL_TREE. */
if (elt == NULL_TREE)
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
return NULL_TREE;
}
/* Overload that adds a TYPE parameter to be able to dispatch
to fold_relational_const. */
tree
const_binop (enum tree_code code, tree type, tree arg1, tree arg2)
{
if (TREE_CODE_CLASS (code) == tcc_comparison)
return fold_relational_const (code, type, arg1, arg2);
/* ??? Until we make the const_binop worker take the type of the
result as argument put those cases that need it here. */
switch (code)
{
case VEC_SERIES_EXPR:
if (CONSTANT_CLASS_P (arg1)
&& CONSTANT_CLASS_P (arg2))
return build_vec_series (type, arg1, arg2);
return NULL_TREE;
case COMPLEX_EXPR:
if ((TREE_CODE (arg1) == REAL_CST
&& TREE_CODE (arg2) == REAL_CST)
|| (TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg2) == INTEGER_CST))
return build_complex (type, arg1, arg2);
return NULL_TREE;
case POINTER_DIFF_EXPR:
if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
{
poly_offset_int res = (wi::to_poly_offset (arg1)
- wi::to_poly_offset (arg2));
return force_fit_type (type, res, 1,
TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
}
return NULL_TREE;
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
case VEC_PACK_FLOAT_EXPR:
{
unsigned int HOST_WIDE_INT out_nelts, in_nelts, i;
if (TREE_CODE (arg1) != VECTOR_CST
|| TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
return NULL_TREE;
out_nelts = in_nelts * 2;
gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
&& known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
tree_vector_builder elts (type, out_nelts, 1);
for (i = 0; i < out_nelts; i++)
{
tree elt = (i < in_nelts
? VECTOR_CST_ELT (arg1, i)
: VECTOR_CST_ELT (arg2, i - in_nelts));
elt = fold_convert_const (code == VEC_PACK_TRUNC_EXPR
? NOP_EXPR
: code == VEC_PACK_FLOAT_EXPR
? FLOAT_EXPR : FIX_TRUNC_EXPR,
TREE_TYPE (type), elt);
if (elt == NULL_TREE || !CONSTANT_CLASS_P (elt))
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
{
unsigned HOST_WIDE_INT out_nelts, in_nelts, out, ofs, scale;
if (TREE_CODE (arg1) != VECTOR_CST || TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
return NULL_TREE;
out_nelts = in_nelts / 2;
gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
&& known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
if (code == VEC_WIDEN_MULT_LO_EXPR)
scale = 0, ofs = BYTES_BIG_ENDIAN ? out_nelts : 0;
else if (code == VEC_WIDEN_MULT_HI_EXPR)
scale = 0, ofs = BYTES_BIG_ENDIAN ? 0 : out_nelts;
else if (code == VEC_WIDEN_MULT_EVEN_EXPR)
scale = 1, ofs = 0;
else /* if (code == VEC_WIDEN_MULT_ODD_EXPR) */
scale = 1, ofs = 1;
tree_vector_builder elts (type, out_nelts, 1);
for (out = 0; out < out_nelts; out++)
{
unsigned int in = (out << scale) + ofs;
tree t1 = fold_convert_const (NOP_EXPR, TREE_TYPE (type),
VECTOR_CST_ELT (arg1, in));
tree t2 = fold_convert_const (NOP_EXPR, TREE_TYPE (type),
VECTOR_CST_ELT (arg2, in));
if (t1 == NULL_TREE || t2 == NULL_TREE)
return NULL_TREE;
tree elt = const_binop (MULT_EXPR, t1, t2);
if (elt == NULL_TREE || !CONSTANT_CLASS_P (elt))
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
default:;
}
if (TREE_CODE_CLASS (code) != tcc_binary)
return NULL_TREE;
/* Make sure type and arg0 have the same saturating flag. */
gcc_checking_assert (TYPE_SATURATING (type)
== TYPE_SATURATING (TREE_TYPE (arg1)));
return const_binop (code, arg1, arg2);
}
/* Compute CODE ARG1 with resulting type TYPE with ARG1 being constant.
Return zero if computing the constants is not possible. */
tree
const_unop (enum tree_code code, tree type, tree arg0)
{
/* Don't perform the operation, other than NEGATE and ABS, if
flag_signaling_nans is on and the operand is a signaling NaN. */
if (TREE_CODE (arg0) == REAL_CST
&& HONOR_SNANS (arg0)
&& REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0))
&& code != NEGATE_EXPR
&& code != ABS_EXPR
&& code != ABSU_EXPR)
return NULL_TREE;
switch (code)
{
CASE_CONVERT:
case FLOAT_EXPR:
case FIX_TRUNC_EXPR:
case FIXED_CONVERT_EXPR:
return fold_convert_const (code, type, arg0);
case ADDR_SPACE_CONVERT_EXPR:
/* If the source address is 0, and the source address space
cannot have a valid object at 0, fold to dest type null. */
if (integer_zerop (arg0)
&& !(targetm.addr_space.zero_address_valid
(TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (arg0))))))
return fold_convert_const (code, type, arg0);
break;
case VIEW_CONVERT_EXPR:
return fold_view_convert_expr (type, arg0);
case NEGATE_EXPR:
{
/* Can't call fold_negate_const directly here as that doesn't
handle all cases and we might not be able to negate some
constants. */
tree tem = fold_negate_expr (UNKNOWN_LOCATION, arg0);
if (tem && CONSTANT_CLASS_P (tem))
return tem;
break;
}
case ABS_EXPR:
case ABSU_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST)
return fold_abs_const (arg0, type);
break;
case CONJ_EXPR:
if (TREE_CODE (arg0) == COMPLEX_CST)
{
tree ipart = fold_negate_const (TREE_IMAGPART (arg0),
TREE_TYPE (type));
return build_complex (type, TREE_REALPART (arg0), ipart);
}
break;
case BIT_NOT_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST)
return fold_not_const (arg0, type);
else if (POLY_INT_CST_P (arg0))
return wide_int_to_tree (type, -poly_int_cst_value (arg0));
/* Perform BIT_NOT_EXPR on each element individually. */
else if (TREE_CODE (arg0) == VECTOR_CST)
{
tree elem;
/* This can cope with stepped encodings because ~x == -1 - x. */
tree_vector_builder elements;
elements.new_unary_operation (type, arg0, true);
unsigned int i, count = elements.encoded_nelts ();
for (i = 0; i < count; ++i)
{
elem = VECTOR_CST_ELT (arg0, i);
elem = const_unop (BIT_NOT_EXPR, TREE_TYPE (type), elem);
if (elem == NULL_TREE)
break;
elements.quick_push (elem);
}
if (i == count)
return elements.build ();
}
break;
case TRUTH_NOT_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST)
return constant_boolean_node (integer_zerop (arg0), type);
break;
case REALPART_EXPR:
if (TREE_CODE (arg0) == COMPLEX_CST)
return fold_convert (type, TREE_REALPART (arg0));
break;
case IMAGPART_EXPR:
if (TREE_CODE (arg0) == COMPLEX_CST)
return fold_convert (type, TREE_IMAGPART (arg0));
break;
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
{
unsigned HOST_WIDE_INT out_nelts, in_nelts, i;
enum tree_code subcode;
if (TREE_CODE (arg0) != VECTOR_CST)
return NULL_TREE;
if (!VECTOR_CST_NELTS (arg0).is_constant (&in_nelts))
return NULL_TREE;
out_nelts = in_nelts / 2;
gcc_assert (known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
unsigned int offset = 0;
if ((!BYTES_BIG_ENDIAN) ^ (code == VEC_UNPACK_LO_EXPR
|| code == VEC_UNPACK_FLOAT_LO_EXPR
|| code == VEC_UNPACK_FIX_TRUNC_LO_EXPR))
offset = out_nelts;
if (code == VEC_UNPACK_LO_EXPR || code == VEC_UNPACK_HI_EXPR)
subcode = NOP_EXPR;
else if (code == VEC_UNPACK_FLOAT_LO_EXPR
|| code == VEC_UNPACK_FLOAT_HI_EXPR)
subcode = FLOAT_EXPR;
else
subcode = FIX_TRUNC_EXPR;
tree_vector_builder elts (type, out_nelts, 1);
for (i = 0; i < out_nelts; i++)
{
tree elt = fold_convert_const (subcode, TREE_TYPE (type),
VECTOR_CST_ELT (arg0, i + offset));
if (elt == NULL_TREE || !CONSTANT_CLASS_P (elt))
return NULL_TREE;
elts.quick_push (elt);
}
return elts.build ();
}
case VEC_DUPLICATE_EXPR:
if (CONSTANT_CLASS_P (arg0))
return build_vector_from_val (type, arg0);
return NULL_TREE;
default:
break;
}
return NULL_TREE;
}
/* Create a sizetype INT_CST node with NUMBER sign extended. KIND
indicates which particular sizetype to create. */
tree
size_int_kind (poly_int64 number, enum size_type_kind kind)
{
return build_int_cst (sizetype_tab[(int) kind], number);
}
/* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
is a tree code. The type of the result is taken from the operands.
Both must be equivalent integer types, ala int_binop_types_match_p.
If the operands are constant, so is the result. */
tree
size_binop_loc (location_t loc, enum tree_code code, tree arg0, tree arg1)
{
tree type = TREE_TYPE (arg0);
if (arg0 == error_mark_node || arg1 == error_mark_node)
return error_mark_node;
gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
TREE_TYPE (arg1)));
/* Handle the special case of two poly_int constants faster. */
if (poly_int_tree_p (arg0) && poly_int_tree_p (arg1))
{
/* And some specific cases even faster than that. */
if (code == PLUS_EXPR)
{
if (integer_zerop (arg0)
&& !TREE_OVERFLOW (tree_strip_any_location_wrapper (arg0)))
return arg1;
if (integer_zerop (arg1)
&& !TREE_OVERFLOW (tree_strip_any_location_wrapper (arg1)))
return arg0;
}
else if (code == MINUS_EXPR)
{
if (integer_zerop (arg1)
&& !TREE_OVERFLOW (tree_strip_any_location_wrapper (arg1)))
return arg0;
}
else if (code == MULT_EXPR)
{
if (integer_onep (arg0)
&& !TREE_OVERFLOW (tree_strip_any_location_wrapper (arg0)))
return arg1;
}
/* Handle general case of two integer constants. For sizetype
constant calculations we always want to know about overflow,
even in the unsigned case. */
tree res = int_const_binop (code, arg0, arg1, -1);
if (res != NULL_TREE)
return res;
}
return fold_build2_loc (loc, code, type, arg0, arg1);
}
/* Given two values, either both of sizetype or both of bitsizetype,
compute the difference between the two values. Return the value
in signed type corresponding to the type of the operands. */
tree
size_diffop_loc (location_t loc, tree arg0, tree arg1)
{
tree type = TREE_TYPE (arg0);
tree ctype;
gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
TREE_TYPE (arg1)));
/* If the type is already signed, just do the simple thing. */
if (!TYPE_UNSIGNED (type))
return size_binop_loc (loc, MINUS_EXPR, arg0, arg1);
if (type == sizetype)
ctype = ssizetype;
else if (type == bitsizetype)
ctype = sbitsizetype;
else
ctype = signed_type_for (type);
/* If either operand is not a constant, do the conversions to the signed
type and subtract. The hardware will do the right thing with any
overflow in the subtraction. */
if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
return size_binop_loc (loc, MINUS_EXPR,
fold_convert_loc (loc, ctype, arg0),
fold_convert_loc (loc, ctype, arg1));
/* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
Otherwise, subtract the other way, convert to CTYPE (we know that can't
overflow) and negate (which can't either). Special-case a result
of zero while we're here. */
if (tree_int_cst_equal (arg0, arg1))
return build_int_cst (ctype, 0);
else if (tree_int_cst_lt (arg1, arg0))
return fold_convert_loc (loc, ctype,
size_binop_loc (loc, MINUS_EXPR, arg0, arg1));
else
return size_binop_loc (loc, MINUS_EXPR, build_int_cst (ctype, 0),
fold_convert_loc (loc, ctype,
size_binop_loc (loc,
MINUS_EXPR,
arg1, arg0)));
}
/* A subroutine of fold_convert_const handling conversions of an
INTEGER_CST to another integer type. */
static tree
fold_convert_const_int_from_int (tree type, const_tree arg1)
{
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. Use widest_int
so that any extension is done according ARG1's type. */
return force_fit_type (type, wi::to_widest (arg1),
!POINTER_TYPE_P (TREE_TYPE (arg1)),
TREE_OVERFLOW (arg1));
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to an integer type. */
static tree
fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
{
bool overflow = false;
tree t;
/* The following code implements the floating point to integer
conversion rules required by the Java Language Specification,
that IEEE NaNs are mapped to zero and values that overflow
the target precision saturate, i.e. values greater than
INT_MAX are mapped to INT_MAX, and values less than INT_MIN
are mapped to INT_MIN. These semantics are allowed by the
C and C++ standards that simply state that the behavior of
FP-to-integer conversion is unspecified upon overflow. */
wide_int val;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
switch (code)
{
case FIX_TRUNC_EXPR:
real_trunc (&r, VOIDmode, &x);
break;
default:
gcc_unreachable ();
}
/* If R is NaN, return zero and show we have an overflow. */
if (REAL_VALUE_ISNAN (r))
{
overflow = true;
val = wi::zero (TYPE_PRECISION (type));
}
/* See if R is less than the lower bound or greater than the
upper bound. */
if (! overflow)
{
tree lt = TYPE_MIN_VALUE (type);
REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
if (real_less (&r, &l))
{
overflow = true;
val = wi::to_wide (lt);
}
}
if (! overflow)
{
tree ut = TYPE_MAX_VALUE (type);
if (ut)
{
REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
if (real_less (&u, &r))
{
overflow = true;
val = wi::to_wide (ut);
}
}
}
if (! overflow)
val = real_to_integer (&r, &overflow, TYPE_PRECISION (type));
t = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (arg1));
return t;
}
/* A subroutine of fold_convert_const handling conversions of a
FIXED_CST to an integer type. */
static tree
fold_convert_const_int_from_fixed (tree type, const_tree arg1)
{
tree t;
double_int temp, temp_trunc;
scalar_mode mode;
/* Right shift FIXED_CST to temp by fbit. */
temp = TREE_FIXED_CST (arg1).data;
mode = TREE_FIXED_CST (arg1).mode;
if (GET_MODE_FBIT (mode) < HOST_BITS_PER_DOUBLE_INT)
{
temp = temp.rshift (GET_MODE_FBIT (mode),
HOST_BITS_PER_DOUBLE_INT,
SIGNED_FIXED_POINT_MODE_P (mode));
/* Left shift temp to temp_trunc by fbit. */
temp_trunc = temp.lshift (GET_MODE_FBIT (mode),
HOST_BITS_PER_DOUBLE_INT,
SIGNED_FIXED_POINT_MODE_P (mode));
}
else
{
temp = double_int_zero;
temp_trunc = double_int_zero;
}
/* If FIXED_CST is negative, we need to round the value toward 0.
By checking if the fractional bits are not zero to add 1 to temp. */
if (SIGNED_FIXED_POINT_MODE_P (mode)
&& temp_trunc.is_negative ()
&& TREE_FIXED_CST (arg1).data != temp_trunc)
temp += double_int_one;
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
t = force_fit_type (type, temp, -1,
(temp.is_negative ()
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
| TREE_OVERFLOW (arg1));
return t;
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to another floating point type. */
static tree
fold_convert_const_real_from_real (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
/* Don't perform the operation if flag_signaling_nans is on
and the operand is a signaling NaN. */
if (HONOR_SNANS (arg1)
&& REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1)))
return NULL_TREE;
/* With flag_rounding_math we should respect the current rounding mode
unless the conversion is exact. */
if (HONOR_SIGN_DEPENDENT_ROUNDING (arg1)
&& !exact_real_truncate (TYPE_MODE (type), &TREE_REAL_CST (arg1)))
return NULL_TREE;
real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
t = build_real (type, value);
/* If converting an infinity or NAN to a representation that doesn't
have one, set the overflow bit so that we can produce some kind of
error message at the appropriate point if necessary. It's not the
most user-friendly message, but it's better than nothing. */
if (REAL_VALUE_ISINF (TREE_REAL_CST (arg1))
&& !MODE_HAS_INFINITIES (TYPE_MODE (type)))
TREE_OVERFLOW (t) = 1;
else if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))
&& !MODE_HAS_NANS (TYPE_MODE (type)))
TREE_OVERFLOW (t) = 1;
/* Regular overflow, conversion produced an infinity in a mode that
can't represent them. */
else if (!MODE_HAS_INFINITIES (TYPE_MODE (type))
&& REAL_VALUE_ISINF (value)
&& !REAL_VALUE_ISINF (TREE_REAL_CST (arg1)))
TREE_OVERFLOW (t) = 1;
else
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
return t;
}
/* A subroutine of fold_convert_const handling conversions a FIXED_CST
to a floating point type. */
static tree
fold_convert_const_real_from_fixed (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
real_convert_from_fixed (&value, SCALAR_FLOAT_TYPE_MODE (type),
&TREE_FIXED_CST (arg1));
t = build_real (type, value);
TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
return t;
}
/* A subroutine of fold_convert_const handling conversions a FIXED_CST
to another fixed-point type. */
static tree
fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
overflow_p = fixed_convert (&value, SCALAR_TYPE_MODE (type),
&TREE_FIXED_CST (arg1), TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
TREE_OVERFLOW (t) = 1;
return t;
}
/* A subroutine of fold_convert_const handling conversions an INTEGER_CST
to a fixed-point type. */
static tree
fold_convert_const_fixed_from_int (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
double_int di;
gcc_assert (TREE_INT_CST_NUNITS (arg1) <= 2);
di.low = TREE_INT_CST_ELT (arg1, 0);
if (TREE_INT_CST_NUNITS (arg1) == 1)
di.high = (HOST_WIDE_INT) di.low < 0 ? HOST_WIDE_INT_M1 : 0;
else
di.high = TREE_INT_CST_ELT (arg1, 1);
overflow_p = fixed_convert_from_int (&value, SCALAR_TYPE_MODE (type), di,
TYPE_UNSIGNED (TREE_TYPE (arg1)),
TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
TREE_OVERFLOW (t) = 1;
return t;
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
to a fixed-point type. */
static tree
fold_convert_const_fixed_from_real (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
overflow_p = fixed_convert_from_real (&value, SCALAR_TYPE_MODE (type),
&TREE_REAL_CST (arg1),
TYPE_SATURATING (type));
t = build_fixed (type, value);
/* Propagate overflow flags. */
if (overflow_p | TREE_OVERFLOW (arg1))
TREE_OVERFLOW (t) = 1;
return t;
}
/* Attempt to fold type conversion operation CODE of expression ARG1 to
type TYPE. If no simplification can be done return NULL_TREE. */
static tree
fold_convert_const (enum tree_code code, tree type, tree arg1)
{
tree arg_type = TREE_TYPE (arg1);
if (arg_type == type)
return arg1;
/* We can't widen types, since the runtime value could overflow the
original type before being extended to the new type. */
if (POLY_INT_CST_P (arg1)
&& (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
&& TYPE_PRECISION (type) <= TYPE_PRECISION (arg_type))
return build_poly_int_cst (type,
poly_wide_int::from (poly_int_cst_value (arg1),
TYPE_PRECISION (type),
TYPE_SIGN (arg_type)));
if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == OFFSET_TYPE)
{
if (TREE_CODE (arg1) == INTEGER_CST)
return fold_convert_const_int_from_int (type, arg1);
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_int_from_real (code, type, arg1);
else if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_int_from_fixed (type, arg1);
}
else if (TREE_CODE (type) == REAL_TYPE)
{
if (TREE_CODE (arg1) == INTEGER_CST)
{
tree res = build_real_from_int_cst (type, arg1);
/* Avoid the folding if flag_rounding_math is on and the
conversion is not exact. */
if (HONOR_SIGN_DEPENDENT_ROUNDING (type))
{
bool fail = false;
wide_int w = real_to_integer (&TREE_REAL_CST (res), &fail,
TYPE_PRECISION (TREE_TYPE (arg1)));
if (fail || wi::ne_p (w, wi::to_wide (arg1)))
return NULL_TREE;
}
return res;
}
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_real_from_real (type, arg1);
else if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_real_from_fixed (type, arg1);
}
else if (TREE_CODE (type) == FIXED_POINT_TYPE)
{
if (TREE_CODE (arg1) == FIXED_CST)
return fold_convert_const_fixed_from_fixed (type, arg1);
else if (TREE_CODE (arg1) == INTEGER_CST)
return fold_convert_const_fixed_from_int (type, arg1);
else if (TREE_CODE (arg1) == REAL_CST)
return fold_convert_const_fixed_from_real (type, arg1);
}
else if (TREE_CODE (type) == VECTOR_TYPE)
{
if (TREE_CODE (arg1) == VECTOR_CST
&& known_eq (TYPE_VECTOR_SUBPARTS (type), VECTOR_CST_NELTS (arg1)))
{
tree elttype = TREE_TYPE (type);
tree arg1_elttype = TREE_TYPE (TREE_TYPE (arg1));
/* We can't handle steps directly when extending, since the
values need to wrap at the original precision first. */
bool step_ok_p
= (INTEGRAL_TYPE_P (elttype)
&& INTEGRAL_TYPE_P (arg1_elttype)
&& TYPE_PRECISION (elttype) <= TYPE_PRECISION (arg1_elttype));
tree_vector_builder v;
if (!v.new_unary_operation (type, arg1, step_ok_p))
return NULL_TREE;
unsigned int len = v.encoded_nelts ();
for (unsigned int i = 0; i < len; ++i)
{
tree elt = VECTOR_CST_ELT (arg1, i);
tree cvt = fold_convert_const (code, elttype, elt);
if (cvt == NULL_TREE)
return NULL_TREE;
v.quick_push (cvt);
}
return v.build ();
}
}
return NULL_TREE;
}
/* Construct a vector of zero elements of vector type TYPE. */
static tree
build_zero_vector (tree type)
{
tree t;
t = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
return build_vector_from_val (type, t);
}
/* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
bool
fold_convertible_p (const_tree type, const_tree arg)
{
const_tree orig = TREE_TYPE (arg);
if (type == orig)
return true;
if (TREE_CODE (arg) == ERROR_MARK
|| TREE_CODE (type) == ERROR_MARK
|| TREE_CODE (orig) == ERROR_MARK)
return false;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
return true;
switch (TREE_CODE (type))
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
case OFFSET_TYPE:
return (INTEGRAL_TYPE_P (orig)
|| (POINTER_TYPE_P (orig)
&& TYPE_PRECISION (type) <= TYPE_PRECISION (orig))
|| TREE_CODE (orig) == OFFSET_TYPE);
case REAL_TYPE:
case FIXED_POINT_TYPE:
case VOID_TYPE:
return TREE_CODE (type) == TREE_CODE (orig);
case VECTOR_TYPE:
return (VECTOR_TYPE_P (orig)
&& known_eq (TYPE_VECTOR_SUBPARTS (type),
TYPE_VECTOR_SUBPARTS (orig))
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
default:
return false;
}
}
/* Convert expression ARG to type TYPE. Used by the middle-end for
simple conversions in preference to calling the front-end's convert. */
tree
fold_convert_loc (location_t loc, tree type, tree arg)
{
tree orig = TREE_TYPE (arg);
tree tem;
if (type == orig)
return arg;
if (TREE_CODE (arg) == ERROR_MARK
|| TREE_CODE (type) == ERROR_MARK
|| TREE_CODE (orig) == ERROR_MARK)
return error_mark_node;
switch (TREE_CODE (type))
{
case POINTER_TYPE:
case REFERENCE_TYPE:
/* Handle conversions between pointers to different address spaces. */
if (POINTER_TYPE_P (orig)
&& (TYPE_ADDR_SPACE (TREE_TYPE (type))
!= TYPE_ADDR_SPACE (TREE_TYPE (orig))))
return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, arg);
/* fall through */
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case OFFSET_TYPE:
if (TREE_CODE (arg) == INTEGER_CST)
{
tem = fold_convert_const (NOP_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
|| TREE_CODE (orig) == OFFSET_TYPE)
return fold_build1_loc (loc, NOP_EXPR, type, arg);
if (TREE_CODE (orig) == COMPLEX_TYPE)
return fold_convert_loc (loc, type,
fold_build1_loc (loc, REALPART_EXPR,
TREE_TYPE (orig), arg));
gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
return fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, arg);
case REAL_TYPE:
if (TREE_CODE (arg) == INTEGER_CST)
{
tem = fold_convert_const (FLOAT_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
else if (TREE_CODE (arg) == REAL_CST)
{
tem = fold_convert_const (NOP_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
else if (TREE_CODE (arg) == FIXED_CST)
{
tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
if (tem != NULL_TREE)
return tem;
}
switch (TREE_CODE (orig))
{
case INTEGER_TYPE:
case BOOLEAN_TYPE: case ENUMERAL_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
return fold_build1_loc (loc, FLOAT_EXPR, type, arg);
case REAL_TYPE:
return fold_build1_loc (loc, NOP_EXPR, type, arg);
case FIXED_POINT_TYPE:
return fold_build1_loc (loc, FIXED_CONVERT_EXPR, type, arg);
case COMPLEX_TYPE:
tem = fold_build1_loc (loc, REALPART_EXPR, TREE_TYPE (orig), arg);
return fold_convert_loc (loc, type, tem);
default:
gcc_unreachable ();
}
case FIXED_POINT_TYPE:
if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
|| TREE_CODE (arg) == REAL_CST)
{
tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
if (tem != NULL_TREE)
goto fold_convert_exit;
}
switch (TREE_CODE (orig))
{
case FIXED_POINT_TYPE:
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
return fold_build1_loc (loc, FIXED_CONVERT_EXPR, type, arg);
case COMPLEX_TYPE:
tem = fold_build1_loc (loc, REALPART_EXPR, TREE_TYPE (orig), arg);
return fold_convert_loc (loc, type, tem);
default:
gcc_unreachable ();
}
case COMPLEX_TYPE:
switch (TREE_CODE (orig))
{
case INTEGER_TYPE:
case BOOLEAN_TYPE: case ENUMERAL_TYPE:
case POINTER_TYPE: case REFERENCE_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
return fold_build2_loc (loc, COMPLEX_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (type), arg),
fold_convert_loc (loc, TREE_TYPE (type),
integer_zero_node));
case COMPLEX_TYPE:
{
tree rpart, ipart;
if (TREE_CODE (arg) == COMPLEX_EXPR)
{
rpart = fold_convert_loc (loc, TREE_TYPE (type),
TREE_OPERAND (arg, 0));
ipart = fold_convert_loc (loc, TREE_TYPE (type),
TREE_OPERAND (arg, 1));
return fold_build2_loc (loc, COMPLEX_EXPR, type, rpart, ipart);
}
arg = save_expr (arg);
rpart = fold_build1_loc (loc, REALPART_EXPR, TREE_TYPE (orig), arg);
ipart = fold_build1_loc (loc, IMAGPART_EXPR, TREE_TYPE (orig), arg);
rpart = fold_convert_loc (loc, TREE_TYPE (type), rpart);
ipart = fold_convert_loc (loc, TREE_TYPE (type), ipart);
return fold_build2_loc (loc, COMPLEX_EXPR, type, rpart, ipart);
}
default:
gcc_unreachable ();
}
case VECTOR_TYPE:
if (integer_zerop (arg))
return build_zero_vector (type);
gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
|| TREE_CODE (orig) == VECTOR_TYPE);
return fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, arg);
case VOID_TYPE:
tem = fold_ignored_result (arg);
return fold_build1_loc (loc, NOP_EXPR, type, tem);
default:
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
return fold_build1_loc (loc, NOP_EXPR, type, arg);
gcc_unreachable ();
}
fold_convert_exit:
protected_set_expr_location_unshare (tem, loc);
return tem;
}
/* Return false if expr can be assumed not to be an lvalue, true
otherwise. */
static bool
maybe_lvalue_p (const_tree x)
{
/* We only need to wrap lvalue tree codes. */
switch (TREE_CODE (x))
{
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case LABEL_DECL:
case FUNCTION_DECL:
case SSA_NAME:
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case BIT_FIELD_REF:
case OBJ_TYPE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case SAVE_EXPR:
case TRY_CATCH_EXPR:
case WITH_CLEANUP_EXPR:
case COMPOUND_EXPR:
case MODIFY_EXPR:
case TARGET_EXPR:
case COND_EXPR:
case BIND_EXPR:
case VIEW_CONVERT_EXPR:
break;
default:
/* Assume the worst for front-end tree codes. */
if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
break;
return false;
}
return true;
}
/* Return an expr equal to X but certainly not valid as an lvalue. */
tree
non_lvalue_loc (location_t loc, tree x)
{
/* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
us. */
if (in_gimple_form)
return x;
if (! maybe_lvalue_p (x))
return x;
return build1_loc (loc, NON_LVALUE_EXPR, TREE_TYPE (x), x);
}
/* Given a tree comparison code, return the code that is the logical inverse.
It is generally not safe to do this for floating-point comparisons, except
for EQ_EXPR, NE_EXPR, ORDERED_EXPR and UNORDERED_EXPR, so we return
ERROR_MARK in this case. */
enum tree_code
invert_tree_comparison (enum tree_code code, bool honor_nans)
{
if (honor_nans && flag_trapping_math && code != EQ_EXPR && code != NE_EXPR
&& code != ORDERED_EXPR && code != UNORDERED_EXPR)
return ERROR_MARK;
switch (code)
{
case EQ_EXPR:
return NE_EXPR;
case NE_EXPR:
return EQ_EXPR;
case GT_EXPR:
return honor_nans ? UNLE_EXPR : LE_EXPR;
case GE_EXPR:
return honor_nans ? UNLT_EXPR : LT_EXPR;
case LT_EXPR:
return honor_nans ? UNGE_EXPR : GE_EXPR;
case LE_EXPR:
return honor_nans ? UNGT_EXPR : GT_EXPR;
case LTGT_EXPR:
return UNEQ_EXPR;
case UNEQ_EXPR:
return LTGT_EXPR;
case UNGT_EXPR:
return LE_EXPR;
case UNGE_EXPR:
return LT_EXPR;
case UNLT_EXPR:
return GE_EXPR;
case UNLE_EXPR:
return GT_EXPR;
case ORDERED_EXPR:
return UNORDERED_EXPR;
case UNORDERED_EXPR:
return ORDERED_EXPR;
default:
gcc_unreachable ();
}
}
/* Similar, but return the comparison that results if the operands are
swapped. This is safe for floating-point. */
enum tree_code
swap_tree_comparison (enum tree_code code)
{
switch (code)
{
case EQ_EXPR:
case NE_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
case LTGT_EXPR:
case UNEQ_EXPR:
return code;
case GT_EXPR:
return LT_EXPR;
case GE_EXPR:
return LE_EXPR;
case LT_EXPR:
return GT_EXPR;
case LE_EXPR:
return GE_EXPR;
case UNGT_EXPR:
return UNLT_EXPR;
case UNGE_EXPR:
return UNLE_EXPR;
case UNLT_EXPR:
return UNGT_EXPR;
case UNLE_EXPR:
return UNGE_EXPR;
default:
gcc_unreachable ();
}
}
/* Convert a comparison tree code from an enum tree_code representation
into a compcode bit-based encoding. This function is the inverse of
compcode_to_comparison. */
static enum comparison_code
comparison_to_compcode (enum tree_code code)
{
switch (code)
{
case LT_EXPR:
return COMPCODE_LT;
case EQ_EXPR:
return COMPCODE_EQ;
case LE_EXPR:
return COMPCODE_LE;
case GT_EXPR:
return COMPCODE_GT;
case NE_EXPR:
return COMPCODE_NE;
case GE_EXPR:
return COMPCODE_GE;
case ORDERED_EXPR:
return COMPCODE_ORD;
case UNORDERED_EXPR:
return COMPCODE_UNORD;
case UNLT_EXPR:
return COMPCODE_UNLT;
case UNEQ_EXPR:
return COMPCODE_UNEQ;
case UNLE_EXPR:
return COMPCODE_UNLE;
case UNGT_EXPR:
return COMPCODE_UNGT;
case LTGT_EXPR:
return COMPCODE_LTGT;
case UNGE_EXPR:
return COMPCODE_UNGE;
default:
gcc_unreachable ();
}
}
/* Convert a compcode bit-based encoding of a comparison operator back
to GCC's enum tree_code representation. This function is the
inverse of comparison_to_compcode. */
static enum tree_code
compcode_to_comparison (enum comparison_code code)
{
switch (code)
{
case COMPCODE_LT:
return LT_EXPR;
case COMPCODE_EQ:
return EQ_EXPR;
case COMPCODE_LE:
return LE_EXPR;
case COMPCODE_GT:
return GT_EXPR;
case COMPCODE_NE:
return NE_EXPR;
case COMPCODE_GE:
return GE_EXPR;
case COMPCODE_ORD:
return ORDERED_EXPR;
case COMPCODE_UNORD:
return UNORDERED_EXPR;
case COMPCODE_UNLT:
return UNLT_EXPR;
case COMPCODE_UNEQ:
return UNEQ_EXPR;
case COMPCODE_UNLE:
return UNLE_EXPR;
case COMPCODE_UNGT:
return UNGT_EXPR;
case COMPCODE_LTGT:
return LTGT_EXPR;
case COMPCODE_UNGE:
return UNGE_EXPR;
default:
gcc_unreachable ();
}
}
/* Return true if COND1 tests the opposite condition of COND2. */
bool
inverse_conditions_p (const_tree cond1, const_tree cond2)
{
return (COMPARISON_CLASS_P (cond1)
&& COMPARISON_CLASS_P (cond2)
&& (invert_tree_comparison
(TREE_CODE (cond1),
HONOR_NANS (TREE_OPERAND (cond1, 0))) == TREE_CODE (cond2))
&& operand_equal_p (TREE_OPERAND (cond1, 0),
TREE_OPERAND (cond2, 0), 0)
&& operand_equal_p (TREE_OPERAND (cond1, 1),
TREE_OPERAND (cond2, 1), 0));
}
/* Return a tree for the comparison which is the combination of
doing the AND or OR (depending on CODE) of the two operations LCODE
and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
the possibility of trapping if the mode has NaNs, and return NULL_TREE
if this makes the transformation invalid. */
tree
combine_comparisons (location_t loc,
enum tree_code code, enum tree_code lcode,
enum tree_code rcode, tree truth_type,
tree ll_arg, tree lr_arg)
{
bool honor_nans = HONOR_NANS (ll_arg);
enum comparison_code lcompcode = comparison_to_compcode (lcode);
enum comparison_code rcompcode = comparison_to_compcode (rcode);
int compcode;
switch (code)
{
case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
compcode = lcompcode & rcompcode;
break;
case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
compcode = lcompcode | rcompcode;
break;
default:
return NULL_TREE;
}
if (!honor_nans)
{
/* Eliminate unordered comparisons, as well as LTGT and ORD
which are not used unless the mode has NaNs. */
compcode &= ~COMPCODE_UNORD;
if (compcode == COMPCODE_LTGT)
compcode = COMPCODE_NE;
else if (compcode == COMPCODE_ORD)
compcode = COMPCODE_TRUE;
}
else if (flag_trapping_math)
{
/* Check that the original operation and the optimized ones will trap
under the same condition. */
bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
&& (lcompcode != COMPCODE_EQ)
&& (lcompcode != COMPCODE_ORD);
bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
&& (rcompcode != COMPCODE_EQ)
&& (rcompcode != COMPCODE_ORD);
bool trap = (compcode & COMPCODE_UNORD) == 0
&& (compcode != COMPCODE_EQ)
&& (compcode != COMPCODE_ORD);
/* In a short-circuited boolean expression the LHS might be
such that the RHS, if evaluated, will never trap. For
example, in ORD (x, y) && (x < y), we evaluate the RHS only
if neither x nor y is NaN. (This is a mixed blessing: for
example, the expression above will never trap, hence
optimizing it to x < y would be invalid). */
if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
|| (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
rtrap = false;
/* If the comparison was short-circuited, and only the RHS
trapped, we may now generate a spurious trap. */
if (rtrap && !ltrap
&& (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
return NULL_TREE;
/* If we changed the conditions that cause a trap, we lose. */
if ((ltrap || rtrap) != trap)
return NULL_TREE;
}
if (compcode == COMPCODE_TRUE)
return constant_boolean_node (true, truth_type);
else if (compcode == COMPCODE_FALSE)
return constant_boolean_node (false, truth_type);
else
{
enum tree_code tcode;
tcode = compcode_to_comparison ((enum comparison_code) compcode);
return fold_build2_loc (loc, tcode, truth_type, ll_arg, lr_arg);
}
}
/* Return nonzero if two operands (typically of the same tree node)
are necessarily equal. FLAGS modifies behavior as follows:
If OEP_ONLY_CONST is set, only return nonzero for constants.
This function tests whether the operands are indistinguishable;
it does not test whether they are equal using C's == operation.
The distinction is important for IEEE floating point, because
(1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
(2) two NaNs may be indistinguishable, but NaN!=NaN.
If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
even though it may hold multiple values during a function.
This is because a GCC tree node guarantees that nothing else is
executed between the evaluation of its "operands" (which may often
be evaluated in arbitrary order). Hence if the operands themselves
don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
unset means assuming isochronic (or instantaneous) tree equivalence.
Unless comparing arbitrary expression trees, such as from different
statements, this flag can usually be left unset.
If OEP_PURE_SAME is set, then pure functions with identical arguments
are considered the same. It is used when the caller has other ways
to ensure that global memory is unchanged in between.
If OEP_ADDRESS_OF is set, we are actually comparing addresses of objects,
not values of expressions.
If OEP_LEXICOGRAPHIC is set, then also handle expressions with side-effects
such as MODIFY_EXPR, RETURN_EXPR, as well as STATEMENT_LISTs.
If OEP_BITWISE is set, then require the values to be bitwise identical
rather than simply numerically equal. Do not take advantage of things
like math-related flags or undefined behavior; only return true for
values that are provably bitwise identical in all circumstances.
Unless OEP_MATCH_SIDE_EFFECTS is set, the function returns false on
any operand with side effect. This is unnecesarily conservative in the
case we know that arg0 and arg1 are in disjoint code paths (such as in
?: operator). In addition OEP_MATCH_SIDE_EFFECTS is used when comparing
addresses with TREE_CONSTANT flag set so we know that &var == &var
even if var is volatile. */
bool
operand_compare::operand_equal_p (const_tree arg0, const_tree arg1,
unsigned int flags)
{
bool r;
if (verify_hash_value (arg0, arg1, flags, &r))
return r;
STRIP_ANY_LOCATION_WRAPPER (arg0);
STRIP_ANY_LOCATION_WRAPPER (arg1);
/* If either is ERROR_MARK, they aren't equal. */
if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK
|| TREE_TYPE (arg0) == error_mark_node
|| TREE_TYPE (arg1) == error_mark_node)
return false;
/* Similar, if either does not have a type (like a template id),
they aren't equal. */
if (!TREE_TYPE (arg0) || !TREE_TYPE (arg1))
return false;
/* Bitwise identity makes no sense if the values have different layouts. */
if ((flags & OEP_BITWISE)
&& !tree_nop_conversion_p (TREE_TYPE (arg0), TREE_TYPE (arg1)))
return false;
/* We cannot consider pointers to different address space equal. */
if (POINTER_TYPE_P (TREE_TYPE (arg0))
&& POINTER_TYPE_P (TREE_TYPE (arg1))
&& (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (arg0)))
!= TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (arg1)))))
return false;
/* Check equality of integer constants before bailing out due to
precision differences. */
if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
{
/* Address of INTEGER_CST is not defined; check that we did not forget
to drop the OEP_ADDRESS_OF flags. */
gcc_checking_assert (!(flags & OEP_ADDRESS_OF));
return tree_int_cst_equal (arg0, arg1);
}
if (!(flags & OEP_ADDRESS_OF))
{
/* If both types don't have the same signedness, then we can't consider
them equal. We must check this before the STRIP_NOPS calls
because they may change the signedness of the arguments. As pointers
strictly don't have a signedness, require either two pointers or
two non-pointers as well. */
if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1))
|| POINTER_TYPE_P (TREE_TYPE (arg0))
!= POINTER_TYPE_P (TREE_TYPE (arg1)))
return false;
/* If both types don't have the same precision, then it is not safe
to strip NOPs. */
if (element_precision (TREE_TYPE (arg0))
!= element_precision (TREE_TYPE (arg1)))
return false;
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
}
#if 0
/* FIXME: Fortran FE currently produce ADDR_EXPR of NOP_EXPR. Enable the
sanity check once the issue is solved. */
else
/* Addresses of conversions and SSA_NAMEs (and many other things)
are not defined. Check that we did not forget to drop the
OEP_ADDRESS_OF/OEP_CONSTANT_ADDRESS_OF flags. */
gcc_checking_assert (!CONVERT_EXPR_P (arg0) && !CONVERT_EXPR_P (arg1)
&& TREE_CODE (arg0) != SSA_NAME);
#endif
/* In case both args are comparisons but with different comparison
code, try to swap the comparison operands of one arg to produce
a match and compare that variant. */
if (TREE_CODE (arg0) != TREE_CODE (arg1)
&& COMPARISON_CLASS_P (arg0)
&& COMPARISON_CLASS_P (arg1))
{
enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
if (TREE_CODE (arg0) == swap_code)
return operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 1), flags)
&& operand_equal_p (TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg1, 0), flags);
}
if (TREE_CODE (arg0) != TREE_CODE (arg1))
{
/* NOP_EXPR and CONVERT_EXPR are considered equal. */
if (CONVERT_EXPR_P (arg0) && CONVERT_EXPR_P (arg1))
;
else if (flags & OEP_ADDRESS_OF)
{
/* If we are interested in comparing addresses ignore
MEM_REF wrappings of the base that can appear just for
TBAA reasons. */
if (TREE_CODE (arg0) == MEM_REF
&& DECL_P (arg1)
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == ADDR_EXPR
&& TREE_OPERAND (TREE_OPERAND (arg0, 0), 0) == arg1
&& integer_zerop (TREE_OPERAND (arg0, 1)))
return true;
else if (TREE_CODE (arg1) == MEM_REF
&& DECL_P (arg0)
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == ADDR_EXPR
&& TREE_OPERAND (TREE_OPERAND (arg1, 0), 0) == arg0
&& integer_zerop (TREE_OPERAND (arg1, 1)))
return true;
return false;
}
else
return false;
}
/* When not checking adddresses, this is needed for conversions and for
COMPONENT_REF. Might as well play it safe and always test this. */
if (TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
|| (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1))
&& !(flags & OEP_ADDRESS_OF)))
return false;
/* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
We don't care about side effects in that case because the SAVE_EXPR
takes care of that for us. In all other cases, two expressions are
equal if they have no side effects. If we have two identical
expressions with side effects that should be treated the same due
to the only side effects being identical SAVE_EXPR's, that will
be detected in the recursive calls below.
If we are taking an invariant address of two identical objects
they are necessarily equal as well. */
if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
&& (TREE_CODE (arg0) == SAVE_EXPR
|| (flags & OEP_MATCH_SIDE_EFFECTS)
|| (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
return true;
/* Next handle constant cases, those for which we can return 1 even
if ONLY_CONST is set. */
if (TREE_CONSTANT (arg0)