blob: 5d3abab1e6a8902de5c3abc41f6767504688618c [file] [log] [blame]
/* Tree inlining.
Copyright (C) 2001-2019 Free Software Foundation, Inc.
Contributed by Alexandre Oliva <aoliva@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "tree-pretty-print.h"
#include "diagnostic-core.h"
#include "gimple-predict.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "calls.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "cfganal.h"
#include "tree-iterator.h"
#include "intl.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "except.h"
#include "debug.h"
#include "params.h"
#include "value-prof.h"
#include "cfgloop.h"
#include "builtins.h"
#include "stringpool.h"
#include "attribs.h"
#include "sreal.h"
#include "tree-cfgcleanup.h"
/* I'm not real happy about this, but we need to handle gimple and
non-gimple trees. */
/* Inlining, Cloning, Versioning, Parallelization
Inlining: a function body is duplicated, but the PARM_DECLs are
remapped into VAR_DECLs, and non-void RETURN_EXPRs become
MODIFY_EXPRs that store to a dedicated returned-value variable.
The duplicated eh_region info of the copy will later be appended
to the info for the caller; the eh_region info in copied throwing
statements and RESX statements are adjusted accordingly.
Cloning: (only in C++) We have one body for a con/de/structor, and
multiple function decls, each with a unique parameter list.
Duplicate the body, using the given splay tree; some parameters
will become constants (like 0 or 1).
Versioning: a function body is duplicated and the result is a new
function rather than into blocks of an existing function as with
inlining. Some parameters will become constants.
Parallelization: a region of a function is duplicated resulting in
a new function. Variables may be replaced with complex expressions
to enable shared variable semantics.
All of these will simultaneously lookup any callgraph edges. If
we're going to inline the duplicated function body, and the given
function has some cloned callgraph nodes (one for each place this
function will be inlined) those callgraph edges will be duplicated.
If we're cloning the body, those callgraph edges will be
updated to point into the new body. (Note that the original
callgraph node and edge list will not be altered.)
See the CALL_EXPR handling case in copy_tree_body_r (). */
/* To Do:
o In order to make inlining-on-trees work, we pessimized
function-local static constants. In particular, they are now
always output, even when not addressed. Fix this by treating
function-local static constants just like global static
constants; the back-end already knows not to output them if they
are not needed.
o Provide heuristics to clamp inlining of recursive template
calls? */
/* Weights that estimate_num_insns uses to estimate the size of the
produced code. */
eni_weights eni_size_weights;
/* Weights that estimate_num_insns uses to estimate the time necessary
to execute the produced code. */
eni_weights eni_time_weights;
/* Prototypes. */
static tree declare_return_variable (copy_body_data *, tree, tree,
basic_block);
static void remap_block (tree *, copy_body_data *);
static void copy_bind_expr (tree *, int *, copy_body_data *);
static void declare_inline_vars (tree, tree);
static void remap_save_expr (tree *, hash_map<tree, tree> *, int *);
static void prepend_lexical_block (tree current_block, tree new_block);
static tree copy_decl_to_var (tree, copy_body_data *);
static tree copy_result_decl_to_var (tree, copy_body_data *);
static tree copy_decl_maybe_to_var (tree, copy_body_data *);
static gimple_seq remap_gimple_stmt (gimple *, copy_body_data *);
static void insert_init_stmt (copy_body_data *, basic_block, gimple *);
/* Insert a tree->tree mapping for ID. Despite the name suggests
that the trees should be variables, it is used for more than that. */
void
insert_decl_map (copy_body_data *id, tree key, tree value)
{
id->decl_map->put (key, value);
/* Always insert an identity map as well. If we see this same new
node again, we won't want to duplicate it a second time. */
if (key != value)
id->decl_map->put (value, value);
}
/* Insert a tree->tree mapping for ID. This is only used for
variables. */
static void
insert_debug_decl_map (copy_body_data *id, tree key, tree value)
{
if (!gimple_in_ssa_p (id->src_cfun))
return;
if (!opt_for_fn (id->dst_fn, flag_var_tracking_assignments))
return;
if (!target_for_debug_bind (key))
return;
gcc_assert (TREE_CODE (key) == PARM_DECL);
gcc_assert (VAR_P (value));
if (!id->debug_map)
id->debug_map = new hash_map<tree, tree>;
id->debug_map->put (key, value);
}
/* If nonzero, we're remapping the contents of inlined debug
statements. If negative, an error has occurred, such as a
reference to a variable that isn't available in the inlined
context. */
static int processing_debug_stmt = 0;
/* Construct new SSA name for old NAME. ID is the inline context. */
static tree
remap_ssa_name (tree name, copy_body_data *id)
{
tree new_tree, var;
tree *n;
gcc_assert (TREE_CODE (name) == SSA_NAME);
n = id->decl_map->get (name);
if (n)
return unshare_expr (*n);
if (processing_debug_stmt)
{
if (SSA_NAME_IS_DEFAULT_DEF (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
&& id->entry_bb == NULL
&& single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
gimple *def_temp;
gimple_stmt_iterator gsi;
tree val = SSA_NAME_VAR (name);
n = id->decl_map->get (val);
if (n != NULL)
val = *n;
if (TREE_CODE (val) != PARM_DECL
&& !(VAR_P (val) && DECL_ABSTRACT_ORIGIN (val)))
{
processing_debug_stmt = -1;
return name;
}
n = id->decl_map->get (val);
if (n && TREE_CODE (*n) == DEBUG_EXPR_DECL)
return *n;
def_temp = gimple_build_debug_source_bind (vexpr, val, NULL);
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (name);
SET_DECL_MODE (vexpr, DECL_MODE (SSA_NAME_VAR (name)));
gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
insert_decl_map (id, val, vexpr);
return vexpr;
}
processing_debug_stmt = -1;
return name;
}
/* Remap anonymous SSA names or SSA names of anonymous decls. */
var = SSA_NAME_VAR (name);
if (!var
|| (!SSA_NAME_IS_DEFAULT_DEF (name)
&& VAR_P (var)
&& !VAR_DECL_IS_VIRTUAL_OPERAND (var)
&& DECL_ARTIFICIAL (var)
&& DECL_IGNORED_P (var)
&& !DECL_NAME (var)))
{
struct ptr_info_def *pi;
new_tree = make_ssa_name (remap_type (TREE_TYPE (name), id));
if (!var && SSA_NAME_IDENTIFIER (name))
SET_SSA_NAME_VAR_OR_IDENTIFIER (new_tree, SSA_NAME_IDENTIFIER (name));
insert_decl_map (id, name, new_tree);
SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree)
= SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name);
/* At least IPA points-to info can be directly transferred. */
if (id->src_cfun->gimple_df
&& id->src_cfun->gimple_df->ipa_pta
&& POINTER_TYPE_P (TREE_TYPE (name))
&& (pi = SSA_NAME_PTR_INFO (name))
&& !pi->pt.anything)
{
struct ptr_info_def *new_pi = get_ptr_info (new_tree);
new_pi->pt = pi->pt;
}
/* So can range-info. */
if (!POINTER_TYPE_P (TREE_TYPE (name))
&& SSA_NAME_RANGE_INFO (name))
duplicate_ssa_name_range_info (new_tree, SSA_NAME_RANGE_TYPE (name),
SSA_NAME_RANGE_INFO (name));
return new_tree;
}
/* Do not set DEF_STMT yet as statement is not copied yet. We do that
in copy_bb. */
new_tree = remap_decl (var, id);
/* We might've substituted constant or another SSA_NAME for
the variable.
Replace the SSA name representing RESULT_DECL by variable during
inlining: this saves us from need to introduce PHI node in a case
return value is just partly initialized. */
if ((VAR_P (new_tree) || TREE_CODE (new_tree) == PARM_DECL)
&& (!SSA_NAME_VAR (name)
|| TREE_CODE (SSA_NAME_VAR (name)) != RESULT_DECL
|| !id->transform_return_to_modify))
{
struct ptr_info_def *pi;
new_tree = make_ssa_name (new_tree);
insert_decl_map (id, name, new_tree);
SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree)
= SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name);
/* At least IPA points-to info can be directly transferred. */
if (id->src_cfun->gimple_df
&& id->src_cfun->gimple_df->ipa_pta
&& POINTER_TYPE_P (TREE_TYPE (name))
&& (pi = SSA_NAME_PTR_INFO (name))
&& !pi->pt.anything)
{
struct ptr_info_def *new_pi = get_ptr_info (new_tree);
new_pi->pt = pi->pt;
}
/* So can range-info. */
if (!POINTER_TYPE_P (TREE_TYPE (name))
&& SSA_NAME_RANGE_INFO (name))
duplicate_ssa_name_range_info (new_tree, SSA_NAME_RANGE_TYPE (name),
SSA_NAME_RANGE_INFO (name));
if (SSA_NAME_IS_DEFAULT_DEF (name))
{
/* By inlining function having uninitialized variable, we might
extend the lifetime (variable might get reused). This cause
ICE in the case we end up extending lifetime of SSA name across
abnormal edge, but also increase register pressure.
We simply initialize all uninitialized vars by 0 except
for case we are inlining to very first BB. We can avoid
this for all BBs that are not inside strongly connected
regions of the CFG, but this is expensive to test. */
if (id->entry_bb
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)
&& (!SSA_NAME_VAR (name)
|| TREE_CODE (SSA_NAME_VAR (name)) != PARM_DECL)
&& (id->entry_bb != EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun),
0)->dest
|| EDGE_COUNT (id->entry_bb->preds) != 1))
{
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
gimple *init_stmt;
tree zero = build_zero_cst (TREE_TYPE (new_tree));
init_stmt = gimple_build_assign (new_tree, zero);
gsi_insert_after (&gsi, init_stmt, GSI_NEW_STMT);
SSA_NAME_IS_DEFAULT_DEF (new_tree) = 0;
}
else
{
SSA_NAME_DEF_STMT (new_tree) = gimple_build_nop ();
set_ssa_default_def (cfun, SSA_NAME_VAR (new_tree), new_tree);
}
}
}
else
insert_decl_map (id, name, new_tree);
return new_tree;
}
/* Remap DECL during the copying of the BLOCK tree for the function. */
tree
remap_decl (tree decl, copy_body_data *id)
{
tree *n;
/* We only remap local variables in the current function. */
/* See if we have remapped this declaration. */
n = id->decl_map->get (decl);
if (!n && processing_debug_stmt)
{
processing_debug_stmt = -1;
return decl;
}
/* When remapping a type within copy_gimple_seq_and_replace_locals, all
necessary DECLs have already been remapped and we do not want to duplicate
a decl coming from outside of the sequence we are copying. */
if (!n
&& id->prevent_decl_creation_for_types
&& id->remapping_type_depth > 0
&& (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL))
return decl;
/* If we didn't already have an equivalent for this declaration, create one
now. */
if (!n)
{
/* Make a copy of the variable or label. */
tree t = id->copy_decl (decl, id);
/* Remember it, so that if we encounter this local entity again
we can reuse this copy. Do this early because remap_type may
need this decl for TYPE_STUB_DECL. */
insert_decl_map (id, decl, t);
if (!DECL_P (t))
return t;
/* Remap types, if necessary. */
TREE_TYPE (t) = remap_type (TREE_TYPE (t), id);
if (TREE_CODE (t) == TYPE_DECL)
{
DECL_ORIGINAL_TYPE (t) = remap_type (DECL_ORIGINAL_TYPE (t), id);
/* Preserve the invariant that DECL_ORIGINAL_TYPE != TREE_TYPE,
which is enforced in gen_typedef_die when DECL_ABSTRACT_ORIGIN
is not set on the TYPE_DECL, for example in LTO mode. */
if (DECL_ORIGINAL_TYPE (t) == TREE_TYPE (t))
{
tree x = build_variant_type_copy (TREE_TYPE (t));
TYPE_STUB_DECL (x) = TYPE_STUB_DECL (TREE_TYPE (t));
TYPE_NAME (x) = TYPE_NAME (TREE_TYPE (t));
DECL_ORIGINAL_TYPE (t) = x;
}
}
/* Remap sizes as necessary. */
walk_tree (&DECL_SIZE (t), copy_tree_body_r, id, NULL);
walk_tree (&DECL_SIZE_UNIT (t), copy_tree_body_r, id, NULL);
/* If fields, do likewise for offset and qualifier. */
if (TREE_CODE (t) == FIELD_DECL)
{
walk_tree (&DECL_FIELD_OFFSET (t), copy_tree_body_r, id, NULL);
if (TREE_CODE (DECL_CONTEXT (t)) == QUAL_UNION_TYPE)
walk_tree (&DECL_QUALIFIER (t), copy_tree_body_r, id, NULL);
}
return t;
}
if (id->do_not_unshare)
return *n;
else
return unshare_expr (*n);
}
static tree
remap_type_1 (tree type, copy_body_data *id)
{
tree new_tree, t;
/* We do need a copy. build and register it now. If this is a pointer or
reference type, remap the designated type and make a new pointer or
reference type. */
if (TREE_CODE (type) == POINTER_TYPE)
{
new_tree = build_pointer_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
if (TYPE_ATTRIBUTES (type) || TYPE_QUALS (type))
new_tree = build_type_attribute_qual_variant (new_tree,
TYPE_ATTRIBUTES (type),
TYPE_QUALS (type));
insert_decl_map (id, type, new_tree);
return new_tree;
}
else if (TREE_CODE (type) == REFERENCE_TYPE)
{
new_tree = build_reference_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
if (TYPE_ATTRIBUTES (type) || TYPE_QUALS (type))
new_tree = build_type_attribute_qual_variant (new_tree,
TYPE_ATTRIBUTES (type),
TYPE_QUALS (type));
insert_decl_map (id, type, new_tree);
return new_tree;
}
else
new_tree = copy_node (type);
insert_decl_map (id, type, new_tree);
/* This is a new type, not a copy of an old type. Need to reassociate
variants. We can handle everything except the main variant lazily. */
t = TYPE_MAIN_VARIANT (type);
if (type != t)
{
t = remap_type (t, id);
TYPE_MAIN_VARIANT (new_tree) = t;
TYPE_NEXT_VARIANT (new_tree) = TYPE_NEXT_VARIANT (t);
TYPE_NEXT_VARIANT (t) = new_tree;
}
else
{
TYPE_MAIN_VARIANT (new_tree) = new_tree;
TYPE_NEXT_VARIANT (new_tree) = NULL;
}
if (TYPE_STUB_DECL (type))
TYPE_STUB_DECL (new_tree) = remap_decl (TYPE_STUB_DECL (type), id);
/* Lazily create pointer and reference types. */
TYPE_POINTER_TO (new_tree) = NULL;
TYPE_REFERENCE_TO (new_tree) = NULL;
/* Copy all types that may contain references to local variables; be sure to
preserve sharing in between type and its main variant when possible. */
switch (TREE_CODE (new_tree))
{
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
if (TYPE_MAIN_VARIANT (new_tree) != new_tree)
{
gcc_checking_assert (TYPE_MIN_VALUE (type) == TYPE_MIN_VALUE (TYPE_MAIN_VARIANT (type)));
gcc_checking_assert (TYPE_MAX_VALUE (type) == TYPE_MAX_VALUE (TYPE_MAIN_VARIANT (type)));
TYPE_MIN_VALUE (new_tree) = TYPE_MIN_VALUE (TYPE_MAIN_VARIANT (new_tree));
TYPE_MAX_VALUE (new_tree) = TYPE_MAX_VALUE (TYPE_MAIN_VARIANT (new_tree));
}
else
{
t = TYPE_MIN_VALUE (new_tree);
if (t && TREE_CODE (t) != INTEGER_CST)
walk_tree (&TYPE_MIN_VALUE (new_tree), copy_tree_body_r, id, NULL);
t = TYPE_MAX_VALUE (new_tree);
if (t && TREE_CODE (t) != INTEGER_CST)
walk_tree (&TYPE_MAX_VALUE (new_tree), copy_tree_body_r, id, NULL);
}
return new_tree;
case FUNCTION_TYPE:
if (TYPE_MAIN_VARIANT (new_tree) != new_tree
&& TREE_TYPE (type) == TREE_TYPE (TYPE_MAIN_VARIANT (type)))
TREE_TYPE (new_tree) = TREE_TYPE (TYPE_MAIN_VARIANT (new_tree));
else
TREE_TYPE (new_tree) = remap_type (TREE_TYPE (new_tree), id);
if (TYPE_MAIN_VARIANT (new_tree) != new_tree
&& TYPE_ARG_TYPES (type) == TYPE_ARG_TYPES (TYPE_MAIN_VARIANT (type)))
TYPE_ARG_TYPES (new_tree) = TYPE_ARG_TYPES (TYPE_MAIN_VARIANT (new_tree));
else
walk_tree (&TYPE_ARG_TYPES (new_tree), copy_tree_body_r, id, NULL);
return new_tree;
case ARRAY_TYPE:
if (TYPE_MAIN_VARIANT (new_tree) != new_tree
&& TREE_TYPE (type) == TREE_TYPE (TYPE_MAIN_VARIANT (type)))
TREE_TYPE (new_tree) = TREE_TYPE (TYPE_MAIN_VARIANT (new_tree));
else
TREE_TYPE (new_tree) = remap_type (TREE_TYPE (new_tree), id);
if (TYPE_MAIN_VARIANT (new_tree) != new_tree)
{
gcc_checking_assert (TYPE_DOMAIN (type)
== TYPE_DOMAIN (TYPE_MAIN_VARIANT (type)));
TYPE_DOMAIN (new_tree) = TYPE_DOMAIN (TYPE_MAIN_VARIANT (new_tree));
}
else
{
TYPE_DOMAIN (new_tree) = remap_type (TYPE_DOMAIN (new_tree), id);
/* For array bounds where we have decided not to copy over the bounds
variable which isn't used in OpenMP/OpenACC region, change them to
an uninitialized VAR_DECL temporary. */
if (id->adjust_array_error_bounds
&& TYPE_DOMAIN (new_tree)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) == error_mark_node
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
{
tree v = create_tmp_var (TREE_TYPE (TYPE_DOMAIN (new_tree)));
DECL_ATTRIBUTES (v)
= tree_cons (get_identifier ("omp dummy var"), NULL_TREE,
DECL_ATTRIBUTES (v));
TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) = v;
}
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
if (TYPE_MAIN_VARIANT (type) != type
&& TYPE_FIELDS (type) == TYPE_FIELDS (TYPE_MAIN_VARIANT (type)))
TYPE_FIELDS (new_tree) = TYPE_FIELDS (TYPE_MAIN_VARIANT (new_tree));
else
{
tree f, nf = NULL;
for (f = TYPE_FIELDS (new_tree); f ; f = DECL_CHAIN (f))
{
t = remap_decl (f, id);
DECL_CONTEXT (t) = new_tree;
DECL_CHAIN (t) = nf;
nf = t;
}
TYPE_FIELDS (new_tree) = nreverse (nf);
}
break;
case OFFSET_TYPE:
default:
/* Shouldn't have been thought variable sized. */
gcc_unreachable ();
}
/* All variants of type share the same size, so use the already remaped data. */
if (TYPE_MAIN_VARIANT (new_tree) != new_tree)
{
tree s = TYPE_SIZE (type);
tree mvs = TYPE_SIZE (TYPE_MAIN_VARIANT (type));
tree su = TYPE_SIZE_UNIT (type);
tree mvsu = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
gcc_checking_assert ((TREE_CODE (s) == PLACEHOLDER_EXPR
&& (TREE_CODE (mvs) == PLACEHOLDER_EXPR))
|| s == mvs);
gcc_checking_assert ((TREE_CODE (su) == PLACEHOLDER_EXPR
&& (TREE_CODE (mvsu) == PLACEHOLDER_EXPR))
|| su == mvsu);
TYPE_SIZE (new_tree) = TYPE_SIZE (TYPE_MAIN_VARIANT (new_tree));
TYPE_SIZE_UNIT (new_tree) = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (new_tree));
}
else
{
walk_tree (&TYPE_SIZE (new_tree), copy_tree_body_r, id, NULL);
walk_tree (&TYPE_SIZE_UNIT (new_tree), copy_tree_body_r, id, NULL);
}
return new_tree;
}
/* Helper function for remap_type_2, called through walk_tree. */
static tree
remap_type_3 (tree *tp, int *walk_subtrees, void *data)
{
copy_body_data *id = (copy_body_data *) data;
if (TYPE_P (*tp))
*walk_subtrees = 0;
else if (DECL_P (*tp) && remap_decl (*tp, id) != *tp)
return *tp;
return NULL_TREE;
}
/* Return true if TYPE needs to be remapped because remap_decl on any
needed embedded decl returns something other than that decl. */
static bool
remap_type_2 (tree type, copy_body_data *id)
{
tree t;
#define RETURN_TRUE_IF_VAR(T) \
do \
{ \
tree _t = (T); \
if (_t) \
{ \
if (DECL_P (_t) && remap_decl (_t, id) != _t) \
return true; \
if (!TYPE_SIZES_GIMPLIFIED (type) \
&& walk_tree (&_t, remap_type_3, id, NULL)) \
return true; \
} \
} \
while (0)
switch (TREE_CODE (type))
{
case POINTER_TYPE:
case REFERENCE_TYPE:
case FUNCTION_TYPE:
case METHOD_TYPE:
return remap_type_2 (TREE_TYPE (type), id);
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
RETURN_TRUE_IF_VAR (TYPE_MIN_VALUE (type));
RETURN_TRUE_IF_VAR (TYPE_MAX_VALUE (type));
return false;
case ARRAY_TYPE:
if (remap_type_2 (TREE_TYPE (type), id)
|| (TYPE_DOMAIN (type) && remap_type_2 (TYPE_DOMAIN (type), id)))
return true;
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
if (TREE_CODE (t) == FIELD_DECL)
{
RETURN_TRUE_IF_VAR (DECL_FIELD_OFFSET (t));
RETURN_TRUE_IF_VAR (DECL_SIZE (t));
RETURN_TRUE_IF_VAR (DECL_SIZE_UNIT (t));
if (TREE_CODE (type) == QUAL_UNION_TYPE)
RETURN_TRUE_IF_VAR (DECL_QUALIFIER (t));
}
break;
default:
return false;
}
RETURN_TRUE_IF_VAR (TYPE_SIZE (type));
RETURN_TRUE_IF_VAR (TYPE_SIZE_UNIT (type));
return false;
#undef RETURN_TRUE_IF_VAR
}
tree
remap_type (tree type, copy_body_data *id)
{
tree *node;
tree tmp;
if (type == NULL)
return type;
/* See if we have remapped this type. */
node = id->decl_map->get (type);
if (node)
return *node;
/* The type only needs remapping if it's variably modified. */
if (! variably_modified_type_p (type, id->src_fn)
/* Don't remap if copy_decl method doesn't always return a new
decl and for all embedded decls returns the passed in decl. */
|| (id->dont_remap_vla_if_no_change && !remap_type_2 (type, id)))
{
insert_decl_map (id, type, type);
return type;
}
id->remapping_type_depth++;
tmp = remap_type_1 (type, id);
id->remapping_type_depth--;
return tmp;
}
/* Decide if DECL can be put into BLOCK_NONLOCAL_VARs. */
static bool
can_be_nonlocal (tree decl, copy_body_data *id)
{
/* We cannot duplicate function decls. */
if (TREE_CODE (decl) == FUNCTION_DECL)
return true;
/* Local static vars must be non-local or we get multiple declaration
problems. */
if (VAR_P (decl) && !auto_var_in_fn_p (decl, id->src_fn))
return true;
return false;
}
static tree
remap_decls (tree decls, vec<tree, va_gc> **nonlocalized_list,
copy_body_data *id)
{
tree old_var;
tree new_decls = NULL_TREE;
/* Remap its variables. */
for (old_var = decls; old_var; old_var = DECL_CHAIN (old_var))
{
tree new_var;
if (can_be_nonlocal (old_var, id))
{
/* We need to add this variable to the local decls as otherwise
nothing else will do so. */
if (VAR_P (old_var) && ! DECL_EXTERNAL (old_var) && cfun)
add_local_decl (cfun, old_var);
if ((!optimize || debug_info_level > DINFO_LEVEL_TERSE)
&& !DECL_IGNORED_P (old_var)
&& nonlocalized_list)
vec_safe_push (*nonlocalized_list, old_var);
continue;
}
/* Remap the variable. */
new_var = remap_decl (old_var, id);
/* If we didn't remap this variable, we can't mess with its
TREE_CHAIN. If we remapped this variable to the return slot, it's
already declared somewhere else, so don't declare it here. */
if (new_var == id->retvar)
;
else if (!new_var)
{
if ((!optimize || debug_info_level > DINFO_LEVEL_TERSE)
&& !DECL_IGNORED_P (old_var)
&& nonlocalized_list)
vec_safe_push (*nonlocalized_list, old_var);
}
else
{
gcc_assert (DECL_P (new_var));
DECL_CHAIN (new_var) = new_decls;
new_decls = new_var;
/* Also copy value-expressions. */
if (VAR_P (new_var) && DECL_HAS_VALUE_EXPR_P (new_var))
{
tree tem = DECL_VALUE_EXPR (new_var);
bool old_regimplify = id->regimplify;
id->remapping_type_depth++;
walk_tree (&tem, copy_tree_body_r, id, NULL);
id->remapping_type_depth--;
id->regimplify = old_regimplify;
SET_DECL_VALUE_EXPR (new_var, tem);
}
}
}
return nreverse (new_decls);
}
/* Copy the BLOCK to contain remapped versions of the variables
therein. And hook the new block into the block-tree. */
static void
remap_block (tree *block, copy_body_data *id)
{
tree old_block;
tree new_block;
/* Make the new block. */
old_block = *block;
new_block = make_node (BLOCK);
TREE_USED (new_block) = TREE_USED (old_block);
BLOCK_ABSTRACT_ORIGIN (new_block) = BLOCK_ORIGIN (old_block);
BLOCK_SOURCE_LOCATION (new_block) = BLOCK_SOURCE_LOCATION (old_block);
BLOCK_NONLOCALIZED_VARS (new_block)
= vec_safe_copy (BLOCK_NONLOCALIZED_VARS (old_block));
*block = new_block;
/* Remap its variables. */
BLOCK_VARS (new_block) = remap_decls (BLOCK_VARS (old_block),
&BLOCK_NONLOCALIZED_VARS (new_block),
id);
if (id->transform_lang_insert_block)
id->transform_lang_insert_block (new_block);
/* Remember the remapped block. */
insert_decl_map (id, old_block, new_block);
}
/* Copy the whole block tree and root it in id->block. */
static tree
remap_blocks (tree block, copy_body_data *id)
{
tree t;
tree new_tree = block;
if (!block)
return NULL;
remap_block (&new_tree, id);
gcc_assert (new_tree != block);
for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
prepend_lexical_block (new_tree, remap_blocks (t, id));
/* Blocks are in arbitrary order, but make things slightly prettier and do
not swap order when producing a copy. */
BLOCK_SUBBLOCKS (new_tree) = blocks_nreverse (BLOCK_SUBBLOCKS (new_tree));
return new_tree;
}
/* Remap the block tree rooted at BLOCK to nothing. */
static void
remap_blocks_to_null (tree block, copy_body_data *id)
{
tree t;
insert_decl_map (id, block, NULL_TREE);
for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
remap_blocks_to_null (t, id);
}
/* Remap the location info pointed to by LOCUS. */
static location_t
remap_location (location_t locus, copy_body_data *id)
{
if (LOCATION_BLOCK (locus))
{
tree *n = id->decl_map->get (LOCATION_BLOCK (locus));
gcc_assert (n);
if (*n)
return set_block (locus, *n);
}
locus = LOCATION_LOCUS (locus);
if (locus != UNKNOWN_LOCATION && id->block)
return set_block (locus, id->block);
return locus;
}
static void
copy_statement_list (tree *tp)
{
tree_stmt_iterator oi, ni;
tree new_tree;
new_tree = alloc_stmt_list ();
ni = tsi_start (new_tree);
oi = tsi_start (*tp);
TREE_TYPE (new_tree) = TREE_TYPE (*tp);
*tp = new_tree;
for (; !tsi_end_p (oi); tsi_next (&oi))
{
tree stmt = tsi_stmt (oi);
if (TREE_CODE (stmt) == STATEMENT_LIST)
/* This copy is not redundant; tsi_link_after will smash this
STATEMENT_LIST into the end of the one we're building, and we
don't want to do that with the original. */
copy_statement_list (&stmt);
tsi_link_after (&ni, stmt, TSI_CONTINUE_LINKING);
}
}
static void
copy_bind_expr (tree *tp, int *walk_subtrees, copy_body_data *id)
{
tree block = BIND_EXPR_BLOCK (*tp);
/* Copy (and replace) the statement. */
copy_tree_r (tp, walk_subtrees, NULL);
if (block)
{
remap_block (&block, id);
BIND_EXPR_BLOCK (*tp) = block;
}
if (BIND_EXPR_VARS (*tp))
/* This will remap a lot of the same decls again, but this should be
harmless. */
BIND_EXPR_VARS (*tp) = remap_decls (BIND_EXPR_VARS (*tp), NULL, id);
}
/* Create a new gimple_seq by remapping all the statements in BODY
using the inlining information in ID. */
static gimple_seq
remap_gimple_seq (gimple_seq body, copy_body_data *id)
{
gimple_stmt_iterator si;
gimple_seq new_body = NULL;
for (si = gsi_start (body); !gsi_end_p (si); gsi_next (&si))
{
gimple_seq new_stmts = remap_gimple_stmt (gsi_stmt (si), id);
gimple_seq_add_seq (&new_body, new_stmts);
}
return new_body;
}
/* Copy a GIMPLE_BIND statement STMT, remapping all the symbols in its
block using the mapping information in ID. */
static gimple *
copy_gimple_bind (gbind *stmt, copy_body_data *id)
{
gimple *new_bind;
tree new_block, new_vars;
gimple_seq body, new_body;
/* Copy the statement. Note that we purposely don't use copy_stmt
here because we need to remap statements as we copy. */
body = gimple_bind_body (stmt);
new_body = remap_gimple_seq (body, id);
new_block = gimple_bind_block (stmt);
if (new_block)
remap_block (&new_block, id);
/* This will remap a lot of the same decls again, but this should be
harmless. */
new_vars = gimple_bind_vars (stmt);
if (new_vars)
new_vars = remap_decls (new_vars, NULL, id);
new_bind = gimple_build_bind (new_vars, new_body, new_block);
return new_bind;
}
/* Return true if DECL is a parameter or a SSA_NAME for a parameter. */
static bool
is_parm (tree decl)
{
if (TREE_CODE (decl) == SSA_NAME)
{
decl = SSA_NAME_VAR (decl);
if (!decl)
return false;
}
return (TREE_CODE (decl) == PARM_DECL);
}
/* Remap the dependence CLIQUE from the source to the destination function
as specified in ID. */
static unsigned short
remap_dependence_clique (copy_body_data *id, unsigned short clique)
{
if (clique == 0 || processing_debug_stmt)
return 0;
if (!id->dependence_map)
id->dependence_map = new hash_map<dependence_hash, unsigned short>;
bool existed;
unsigned short &newc = id->dependence_map->get_or_insert (clique, &existed);
if (!existed)
{
/* Clique 1 is reserved for local ones set by PTA. */
if (cfun->last_clique == 0)
cfun->last_clique = 1;
newc = ++cfun->last_clique;
}
return newc;
}
/* Remap the GIMPLE operand pointed to by *TP. DATA is really a
'struct walk_stmt_info *'. DATA->INFO is a 'copy_body_data *'.
WALK_SUBTREES is used to indicate walk_gimple_op whether to keep
recursing into the children nodes of *TP. */
static tree
remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi_p = (struct walk_stmt_info *) data;
copy_body_data *id = (copy_body_data *) wi_p->info;
tree fn = id->src_fn;
/* For recursive invocations this is no longer the LHS itself. */
bool is_lhs = wi_p->is_lhs;
wi_p->is_lhs = false;
if (TREE_CODE (*tp) == SSA_NAME)
{
*tp = remap_ssa_name (*tp, id);
*walk_subtrees = 0;
if (is_lhs)
SSA_NAME_DEF_STMT (*tp) = wi_p->stmt;
return NULL;
}
else if (auto_var_in_fn_p (*tp, fn))
{
/* Local variables and labels need to be replaced by equivalent
variables. We don't want to copy static variables; there's
only one of those, no matter how many times we inline the
containing function. Similarly for globals from an outer
function. */
tree new_decl;
/* Remap the declaration. */
new_decl = remap_decl (*tp, id);
gcc_assert (new_decl);
/* Replace this variable with the copy. */
STRIP_TYPE_NOPS (new_decl);
/* ??? The C++ frontend uses void * pointer zero to initialize
any other type. This confuses the middle-end type verification.
As cloned bodies do not go through gimplification again the fixup
there doesn't trigger. */
if (TREE_CODE (new_decl) == INTEGER_CST
&& !useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (new_decl)))
new_decl = fold_convert (TREE_TYPE (*tp), new_decl);
*tp = new_decl;
*walk_subtrees = 0;
}
else if (TREE_CODE (*tp) == STATEMENT_LIST)
gcc_unreachable ();
else if (TREE_CODE (*tp) == SAVE_EXPR)
gcc_unreachable ();
else if (TREE_CODE (*tp) == LABEL_DECL
&& (!DECL_CONTEXT (*tp)
|| decl_function_context (*tp) == id->src_fn))
/* These may need to be remapped for EH handling. */
*tp = remap_decl (*tp, id);
else if (TREE_CODE (*tp) == FIELD_DECL)
{
/* If the enclosing record type is variably_modified_type_p, the field
has already been remapped. Otherwise, it need not be. */
tree *n = id->decl_map->get (*tp);
if (n)
*tp = *n;
*walk_subtrees = 0;
}
else if (TYPE_P (*tp))
/* Types may need remapping as well. */
*tp = remap_type (*tp, id);
else if (CONSTANT_CLASS_P (*tp))
{
/* If this is a constant, we have to copy the node iff the type
will be remapped. copy_tree_r will not copy a constant. */
tree new_type = remap_type (TREE_TYPE (*tp), id);
if (new_type == TREE_TYPE (*tp))
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
*tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
TREE_TYPE (*tp) = new_type;
}
}
else
{
/* Otherwise, just copy the node. Note that copy_tree_r already
knows not to copy VAR_DECLs, etc., so this is safe. */
if (TREE_CODE (*tp) == MEM_REF && !id->do_not_fold)
{
/* We need to re-canonicalize MEM_REFs from inline substitutions
that can happen when a pointer argument is an ADDR_EXPR.
Recurse here manually to allow that. */
tree ptr = TREE_OPERAND (*tp, 0);
tree type = remap_type (TREE_TYPE (*tp), id);
tree old = *tp;
walk_tree (&ptr, remap_gimple_op_r, data, NULL);
*tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
if (MR_DEPENDENCE_CLIQUE (old) != 0)
{
MR_DEPENDENCE_CLIQUE (*tp)
= remap_dependence_clique (id, MR_DEPENDENCE_CLIQUE (old));
MR_DEPENDENCE_BASE (*tp) = MR_DEPENDENCE_BASE (old);
}
/* We cannot propagate the TREE_THIS_NOTRAP flag if we have
remapped a parameter as the property might be valid only
for the parameter itself. */
if (TREE_THIS_NOTRAP (old)
&& (!is_parm (TREE_OPERAND (old, 0))
|| (!id->transform_parameter && is_parm (ptr))))
TREE_THIS_NOTRAP (*tp) = 1;
REF_REVERSE_STORAGE_ORDER (*tp) = REF_REVERSE_STORAGE_ORDER (old);
*walk_subtrees = 0;
return NULL;
}
/* Here is the "usual case". Copy this tree node, and then
tweak some special cases. */
copy_tree_r (tp, walk_subtrees, NULL);
if (TREE_CODE (*tp) != OMP_CLAUSE)
TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
if (TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3))
{
/* The copied TARGET_EXPR has never been expanded, even if the
original node was expanded already. */
TREE_OPERAND (*tp, 1) = TREE_OPERAND (*tp, 3);
TREE_OPERAND (*tp, 3) = NULL_TREE;
}
else if (TREE_CODE (*tp) == ADDR_EXPR)
{
/* Variable substitution need not be simple. In particular,
the MEM_REF substitution above. Make sure that
TREE_CONSTANT and friends are up-to-date. */
int invariant = is_gimple_min_invariant (*tp);
walk_tree (&TREE_OPERAND (*tp, 0), remap_gimple_op_r, data, NULL);
recompute_tree_invariant_for_addr_expr (*tp);
/* If this used to be invariant, but is not any longer,
then regimplification is probably needed. */
if (invariant && !is_gimple_min_invariant (*tp))
id->regimplify = true;
*walk_subtrees = 0;
}
}
/* Update the TREE_BLOCK for the cloned expr. */
if (EXPR_P (*tp))
{
tree new_block = id->remapping_type_depth == 0 ? id->block : NULL;
tree old_block = TREE_BLOCK (*tp);
if (old_block)
{
tree *n;
n = id->decl_map->get (TREE_BLOCK (*tp));
if (n)
new_block = *n;
}
TREE_SET_BLOCK (*tp, new_block);
}
/* Keep iterating. */
return NULL_TREE;
}
/* Called from copy_body_id via walk_tree. DATA is really a
`copy_body_data *'. */
tree
copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
{
copy_body_data *id = (copy_body_data *) data;
tree fn = id->src_fn;
tree new_block;
/* Begin by recognizing trees that we'll completely rewrite for the
inlining context. Our output for these trees is completely
different from out input (e.g. RETURN_EXPR is deleted, and morphs
into an edge). Further down, we'll handle trees that get
duplicated and/or tweaked. */
/* When requested, RETURN_EXPRs should be transformed to just the
contained MODIFY_EXPR. The branch semantics of the return will
be handled elsewhere by manipulating the CFG rather than a statement. */
if (TREE_CODE (*tp) == RETURN_EXPR && id->transform_return_to_modify)
{
tree assignment = TREE_OPERAND (*tp, 0);
/* If we're returning something, just turn that into an
assignment into the equivalent of the original RESULT_DECL.
If the "assignment" is just the result decl, the result
decl has already been set (e.g. a recent "foo (&result_decl,
...)"); just toss the entire RETURN_EXPR. */
if (assignment && TREE_CODE (assignment) == MODIFY_EXPR)
{
/* Replace the RETURN_EXPR with (a copy of) the
MODIFY_EXPR hanging underneath. */
*tp = copy_node (assignment);
}
else /* Else the RETURN_EXPR returns no value. */
{
*tp = NULL;
return (tree) (void *)1;
}
}
else if (TREE_CODE (*tp) == SSA_NAME)
{
*tp = remap_ssa_name (*tp, id);
*walk_subtrees = 0;
return NULL;
}
/* Local variables and labels need to be replaced by equivalent
variables. We don't want to copy static variables; there's only
one of those, no matter how many times we inline the containing
function. Similarly for globals from an outer function. */
else if (auto_var_in_fn_p (*tp, fn))
{
tree new_decl;
/* Remap the declaration. */
new_decl = remap_decl (*tp, id);
gcc_assert (new_decl);
/* Replace this variable with the copy. */
STRIP_TYPE_NOPS (new_decl);
*tp = new_decl;
*walk_subtrees = 0;
}
else if (TREE_CODE (*tp) == STATEMENT_LIST)
copy_statement_list (tp);
else if (TREE_CODE (*tp) == SAVE_EXPR
|| TREE_CODE (*tp) == TARGET_EXPR)
remap_save_expr (tp, id->decl_map, walk_subtrees);
else if (TREE_CODE (*tp) == LABEL_DECL
&& (! DECL_CONTEXT (*tp)
|| decl_function_context (*tp) == id->src_fn))
/* These may need to be remapped for EH handling. */
*tp = remap_decl (*tp, id);
else if (TREE_CODE (*tp) == BIND_EXPR)
copy_bind_expr (tp, walk_subtrees, id);
/* Types may need remapping as well. */
else if (TYPE_P (*tp))
*tp = remap_type (*tp, id);
/* If this is a constant, we have to copy the node iff the type will be
remapped. copy_tree_r will not copy a constant. */
else if (CONSTANT_CLASS_P (*tp))
{
tree new_type = remap_type (TREE_TYPE (*tp), id);
if (new_type == TREE_TYPE (*tp))
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
*tp = wide_int_to_tree (new_type, wi::to_wide (*tp));
else
{
*tp = copy_node (*tp);
TREE_TYPE (*tp) = new_type;
}
}
/* Otherwise, just copy the node. Note that copy_tree_r already
knows not to copy VAR_DECLs, etc., so this is safe. */
else
{
/* Here we handle trees that are not completely rewritten.
First we detect some inlining-induced bogosities for
discarding. */
if (TREE_CODE (*tp) == MODIFY_EXPR
&& TREE_OPERAND (*tp, 0) == TREE_OPERAND (*tp, 1)
&& (auto_var_in_fn_p (TREE_OPERAND (*tp, 0), fn)))
{
/* Some assignments VAR = VAR; don't generate any rtl code
and thus don't count as variable modification. Avoid
keeping bogosities like 0 = 0. */
tree decl = TREE_OPERAND (*tp, 0), value;
tree *n;
n = id->decl_map->get (decl);
if (n)
{
value = *n;
STRIP_TYPE_NOPS (value);
if (TREE_CONSTANT (value) || TREE_READONLY (value))
{
*tp = build_empty_stmt (EXPR_LOCATION (*tp));
return copy_tree_body_r (tp, walk_subtrees, data);
}
}
}
else if (TREE_CODE (*tp) == INDIRECT_REF)
{
/* Get rid of *& from inline substitutions that can happen when a
pointer argument is an ADDR_EXPR. */
tree decl = TREE_OPERAND (*tp, 0);
tree *n = id->decl_map->get (decl);
if (n)
{
/* If we happen to get an ADDR_EXPR in n->value, strip
it manually here as we'll eventually get ADDR_EXPRs
which lie about their types pointed to. In this case
build_fold_indirect_ref wouldn't strip the INDIRECT_REF,
but we absolutely rely on that. As fold_indirect_ref
does other useful transformations, try that first, though. */
tree type = TREE_TYPE (*tp);
tree ptr = id->do_not_unshare ? *n : unshare_expr (*n);
tree old = *tp;
*tp = id->do_not_fold ? NULL : gimple_fold_indirect_ref (ptr);
if (! *tp)
{
type = remap_type (type, id);
if (TREE_CODE (ptr) == ADDR_EXPR && !id->do_not_fold)
{
*tp
= fold_indirect_ref_1 (EXPR_LOCATION (ptr), type, ptr);
/* ??? We should either assert here or build
a VIEW_CONVERT_EXPR instead of blindly leaking
incompatible types to our IL. */
if (! *tp)
*tp = TREE_OPERAND (ptr, 0);
}
else
{
*tp = build1 (INDIRECT_REF, type, ptr);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
TREE_READONLY (*tp) = TREE_READONLY (old);
/* We cannot propagate the TREE_THIS_NOTRAP flag if we
have remapped a parameter as the property might be
valid only for the parameter itself. */
if (TREE_THIS_NOTRAP (old)
&& (!is_parm (TREE_OPERAND (old, 0))
|| (!id->transform_parameter && is_parm (ptr))))
TREE_THIS_NOTRAP (*tp) = 1;
}
}
*walk_subtrees = 0;
return NULL;
}
}
else if (TREE_CODE (*tp) == MEM_REF && !id->do_not_fold)
{
/* We need to re-canonicalize MEM_REFs from inline substitutions
that can happen when a pointer argument is an ADDR_EXPR.
Recurse here manually to allow that. */
tree ptr = TREE_OPERAND (*tp, 0);
tree type = remap_type (TREE_TYPE (*tp), id);
tree old = *tp;
walk_tree (&ptr, copy_tree_body_r, data, NULL);
*tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
if (MR_DEPENDENCE_CLIQUE (old) != 0)
{
MR_DEPENDENCE_CLIQUE (*tp)
= remap_dependence_clique (id, MR_DEPENDENCE_CLIQUE (old));
MR_DEPENDENCE_BASE (*tp) = MR_DEPENDENCE_BASE (old);
}
/* We cannot propagate the TREE_THIS_NOTRAP flag if we have
remapped a parameter as the property might be valid only
for the parameter itself. */
if (TREE_THIS_NOTRAP (old)
&& (!is_parm (TREE_OPERAND (old, 0))
|| (!id->transform_parameter && is_parm (ptr))))
TREE_THIS_NOTRAP (*tp) = 1;
REF_REVERSE_STORAGE_ORDER (*tp) = REF_REVERSE_STORAGE_ORDER (old);
*walk_subtrees = 0;
return NULL;
}
/* Here is the "usual case". Copy this tree node, and then
tweak some special cases. */
copy_tree_r (tp, walk_subtrees, NULL);
/* If EXPR has block defined, map it to newly constructed block.
When inlining we want EXPRs without block appear in the block
of function call if we are not remapping a type. */
if (EXPR_P (*tp))
{
new_block = id->remapping_type_depth == 0 ? id->block : NULL;
if (TREE_BLOCK (*tp))
{
tree *n;
n = id->decl_map->get (TREE_BLOCK (*tp));
if (n)
new_block = *n;
}
TREE_SET_BLOCK (*tp, new_block);
}
if (TREE_CODE (*tp) != OMP_CLAUSE)
TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
/* The copied TARGET_EXPR has never been expanded, even if the
original node was expanded already. */
if (TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3))
{
TREE_OPERAND (*tp, 1) = TREE_OPERAND (*tp, 3);
TREE_OPERAND (*tp, 3) = NULL_TREE;
}
/* Variable substitution need not be simple. In particular, the
INDIRECT_REF substitution above. Make sure that TREE_CONSTANT
and friends are up-to-date. */
else if (TREE_CODE (*tp) == ADDR_EXPR)
{
int invariant = is_gimple_min_invariant (*tp);
walk_tree (&TREE_OPERAND (*tp, 0), copy_tree_body_r, id, NULL);
/* Handle the case where we substituted an INDIRECT_REF
into the operand of the ADDR_EXPR. */
if (TREE_CODE (TREE_OPERAND (*tp, 0)) == INDIRECT_REF
&& !id->do_not_fold)
{
tree t = TREE_OPERAND (TREE_OPERAND (*tp, 0), 0);
if (TREE_TYPE (t) != TREE_TYPE (*tp))
t = fold_convert (remap_type (TREE_TYPE (*tp), id), t);
*tp = t;
}
else
recompute_tree_invariant_for_addr_expr (*tp);
/* If this used to be invariant, but is not any longer,
then regimplification is probably needed. */
if (invariant && !is_gimple_min_invariant (*tp))
id->regimplify = true;
*walk_subtrees = 0;
}
}
/* Keep iterating. */
return NULL_TREE;
}
/* Helper for remap_gimple_stmt. Given an EH region number for the
source function, map that to the duplicate EH region number in
the destination function. */
static int
remap_eh_region_nr (int old_nr, copy_body_data *id)
{
eh_region old_r, new_r;
old_r = get_eh_region_from_number_fn (id->src_cfun, old_nr);
new_r = static_cast<eh_region> (*id->eh_map->get (old_r));
return new_r->index;
}
/* Similar, but operate on INTEGER_CSTs. */
static tree
remap_eh_region_tree_nr (tree old_t_nr, copy_body_data *id)
{
int old_nr, new_nr;
old_nr = tree_to_shwi (old_t_nr);
new_nr = remap_eh_region_nr (old_nr, id);
return build_int_cst (integer_type_node, new_nr);
}
/* Helper for copy_bb. Remap statement STMT using the inlining
information in ID. Return the new statement copy. */
static gimple_seq
remap_gimple_stmt (gimple *stmt, copy_body_data *id)
{
gimple *copy = NULL;
struct walk_stmt_info wi;
bool skip_first = false;
gimple_seq stmts = NULL;
if (is_gimple_debug (stmt)
&& (gimple_debug_nonbind_marker_p (stmt)
? !DECL_STRUCT_FUNCTION (id->dst_fn)->debug_nonbind_markers
: !opt_for_fn (id->dst_fn, flag_var_tracking_assignments)))
return NULL;
/* Begin by recognizing trees that we'll completely rewrite for the
inlining context. Our output for these trees is completely
different from our input (e.g. RETURN_EXPR is deleted and morphs
into an edge). Further down, we'll handle trees that get
duplicated and/or tweaked. */
/* When requested, GIMPLE_RETURN should be transformed to just the
contained GIMPLE_ASSIGN. The branch semantics of the return will
be handled elsewhere by manipulating the CFG rather than the
statement. */
if (gimple_code (stmt) == GIMPLE_RETURN && id->transform_return_to_modify)
{
tree retval = gimple_return_retval (as_a <greturn *> (stmt));
/* If we're returning something, just turn that into an
assignment to the equivalent of the original RESULT_DECL.
If RETVAL is just the result decl, the result decl has
already been set (e.g. a recent "foo (&result_decl, ...)");
just toss the entire GIMPLE_RETURN. */
if (retval
&& (TREE_CODE (retval) != RESULT_DECL
&& (TREE_CODE (retval) != SSA_NAME
|| ! SSA_NAME_VAR (retval)
|| TREE_CODE (SSA_NAME_VAR (retval)) != RESULT_DECL)))
{
copy = gimple_build_assign (id->do_not_unshare
? id->retvar : unshare_expr (id->retvar),
retval);
/* id->retvar is already substituted. Skip it on later remapping. */
skip_first = true;
}
else
return NULL;
}
else if (gimple_has_substatements (stmt))
{
gimple_seq s1, s2;
/* When cloning bodies from the C++ front end, we will be handed bodies
in High GIMPLE form. Handle here all the High GIMPLE statements that
have embedded statements. */
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
copy = copy_gimple_bind (as_a <gbind *> (stmt), id);
break;
case GIMPLE_CATCH:
{
gcatch *catch_stmt = as_a <gcatch *> (stmt);
s1 = remap_gimple_seq (gimple_catch_handler (catch_stmt), id);
copy = gimple_build_catch (gimple_catch_types (catch_stmt), s1);
}
break;
case GIMPLE_EH_FILTER:
s1 = remap_gimple_seq (gimple_eh_filter_failure (stmt), id);
copy = gimple_build_eh_filter (gimple_eh_filter_types (stmt), s1);
break;
case GIMPLE_TRY:
s1 = remap_gimple_seq (gimple_try_eval (stmt), id);
s2 = remap_gimple_seq (gimple_try_cleanup (stmt), id);
copy = gimple_build_try (s1, s2, gimple_try_kind (stmt));
break;
case GIMPLE_WITH_CLEANUP_EXPR:
s1 = remap_gimple_seq (gimple_wce_cleanup (stmt), id);
copy = gimple_build_wce (s1);
break;
case GIMPLE_OMP_PARALLEL:
{
gomp_parallel *omp_par_stmt = as_a <gomp_parallel *> (stmt);
s1 = remap_gimple_seq (gimple_omp_body (omp_par_stmt), id);
copy = gimple_build_omp_parallel
(s1,
gimple_omp_parallel_clauses (omp_par_stmt),
gimple_omp_parallel_child_fn (omp_par_stmt),
gimple_omp_parallel_data_arg (omp_par_stmt));
}
break;
case GIMPLE_OMP_TASK:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_task
(s1,
gimple_omp_task_clauses (stmt),
gimple_omp_task_child_fn (stmt),
gimple_omp_task_data_arg (stmt),
gimple_omp_task_copy_fn (stmt),
gimple_omp_task_arg_size (stmt),
gimple_omp_task_arg_align (stmt));
break;
case GIMPLE_OMP_FOR:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
s2 = remap_gimple_seq (gimple_omp_for_pre_body (stmt), id);
copy = gimple_build_omp_for (s1, gimple_omp_for_kind (stmt),
gimple_omp_for_clauses (stmt),
gimple_omp_for_collapse (stmt), s2);
{
size_t i;
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
gimple_omp_for_set_index (copy, i,
gimple_omp_for_index (stmt, i));
gimple_omp_for_set_initial (copy, i,
gimple_omp_for_initial (stmt, i));
gimple_omp_for_set_final (copy, i,
gimple_omp_for_final (stmt, i));
gimple_omp_for_set_incr (copy, i,
gimple_omp_for_incr (stmt, i));
gimple_omp_for_set_cond (copy, i,
gimple_omp_for_cond (stmt, i));
}
}
break;
case GIMPLE_OMP_MASTER:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_master (s1);
break;
case GIMPLE_OMP_TASKGROUP:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_taskgroup
(s1, gimple_omp_taskgroup_clauses (stmt));
break;
case GIMPLE_OMP_ORDERED:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_ordered
(s1,
gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt)));
break;
case GIMPLE_OMP_SECTION:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_section (s1);
break;
case GIMPLE_OMP_SECTIONS:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_sections
(s1, gimple_omp_sections_clauses (stmt));
break;
case GIMPLE_OMP_SINGLE:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_single
(s1, gimple_omp_single_clauses (stmt));
break;
case GIMPLE_OMP_TARGET:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_target
(s1, gimple_omp_target_kind (stmt),
gimple_omp_target_clauses (stmt));
break;
case GIMPLE_OMP_TEAMS:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_teams
(s1, gimple_omp_teams_clauses (stmt));
break;
case GIMPLE_OMP_CRITICAL:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_critical (s1,
gimple_omp_critical_name
(as_a <gomp_critical *> (stmt)),
gimple_omp_critical_clauses
(as_a <gomp_critical *> (stmt)));
break;
case GIMPLE_TRANSACTION:
{
gtransaction *old_trans_stmt = as_a <gtransaction *> (stmt);
gtransaction *new_trans_stmt;
s1 = remap_gimple_seq (gimple_transaction_body (old_trans_stmt),
id);
copy = new_trans_stmt = gimple_build_transaction (s1);
gimple_transaction_set_subcode (new_trans_stmt,
gimple_transaction_subcode (old_trans_stmt));
gimple_transaction_set_label_norm (new_trans_stmt,
gimple_transaction_label_norm (old_trans_stmt));
gimple_transaction_set_label_uninst (new_trans_stmt,
gimple_transaction_label_uninst (old_trans_stmt));
gimple_transaction_set_label_over (new_trans_stmt,
gimple_transaction_label_over (old_trans_stmt));
}
break;
default:
gcc_unreachable ();
}
}
else
{
if (gimple_assign_copy_p (stmt)
&& gimple_assign_lhs (stmt) == gimple_assign_rhs1 (stmt)
&& auto_var_in_fn_p (gimple_assign_lhs (stmt), id->src_fn))
{
/* Here we handle statements that are not completely rewritten.
First we detect some inlining-induced bogosities for
discarding. */
/* Some assignments VAR = VAR; don't generate any rtl code
and thus don't count as variable modification. Avoid
keeping bogosities like 0 = 0. */
tree decl = gimple_assign_lhs (stmt), value;
tree *n;
n = id->decl_map->get (decl);
if (n)
{
value = *n;
STRIP_TYPE_NOPS (value);
if (TREE_CONSTANT (value) || TREE_READONLY (value))
return NULL;
}
}
/* For *ptr_N ={v} {CLOBBER}, if ptr_N is SSA_NAME defined
in a block that we aren't copying during tree_function_versioning,
just drop the clobber stmt. */
if (id->blocks_to_copy && gimple_clobber_p (stmt))
{
tree lhs = gimple_assign_lhs (stmt);
if (TREE_CODE (lhs) == MEM_REF
&& TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0));
if (gimple_bb (def_stmt)
&& !bitmap_bit_p (id->blocks_to_copy,
gimple_bb (def_stmt)->index))
return NULL;
}
}
if (gimple_debug_bind_p (stmt))
{
gdebug *copy
= gimple_build_debug_bind (gimple_debug_bind_get_var (stmt),
gimple_debug_bind_get_value (stmt),
stmt);
if (id->reset_location)
gimple_set_location (copy, input_location);
id->debug_stmts.safe_push (copy);
gimple_seq_add_stmt (&stmts, copy);
return stmts;
}
if (gimple_debug_source_bind_p (stmt))
{
gdebug *copy = gimple_build_debug_source_bind
(gimple_debug_source_bind_get_var (stmt),
gimple_debug_source_bind_get_value (stmt),
stmt);
if (id->reset_location)
gimple_set_location (copy, input_location);
id->debug_stmts.safe_push (copy);
gimple_seq_add_stmt (&stmts, copy);
return stmts;
}
if (gimple_debug_nonbind_marker_p (stmt))
{
/* If the inlined function has too many debug markers,
don't copy them. */
if (id->src_cfun->debug_marker_count
> PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
return stmts;
gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
if (id->reset_location)
gimple_set_location (copy, input_location);
id->debug_stmts.safe_push (copy);
gimple_seq_add_stmt (&stmts, copy);
return stmts;
}
/* Create a new deep copy of the statement. */
copy = gimple_copy (stmt);
/* Clear flags that need revisiting. */
if (gcall *call_stmt = dyn_cast <gcall *> (copy))
{
if (gimple_call_tail_p (call_stmt))
gimple_call_set_tail (call_stmt, false);
if (gimple_call_from_thunk_p (call_stmt))
gimple_call_set_from_thunk (call_stmt, false);
if (gimple_call_internal_p (call_stmt))
switch (gimple_call_internal_fn (call_stmt))
{
case IFN_GOMP_SIMD_LANE:
case IFN_GOMP_SIMD_VF:
case IFN_GOMP_SIMD_LAST_LANE:
case IFN_GOMP_SIMD_ORDERED_START:
case IFN_GOMP_SIMD_ORDERED_END:
DECL_STRUCT_FUNCTION (id->dst_fn)->has_simduid_loops = true;
break;
default:
break;
}
}
/* Remap the region numbers for __builtin_eh_{pointer,filter},
RESX and EH_DISPATCH. */
if (id->eh_map)
switch (gimple_code (copy))
{
case GIMPLE_CALL:
{
tree r, fndecl = gimple_call_fndecl (copy);
if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_EH_COPY_VALUES:
r = gimple_call_arg (copy, 1);
r = remap_eh_region_tree_nr (r, id);
gimple_call_set_arg (copy, 1, r);
/* FALLTHRU */
case BUILT_IN_EH_POINTER:
case BUILT_IN_EH_FILTER:
r = gimple_call_arg (copy, 0);
r = remap_eh_region_tree_nr (r, id);
gimple_call_set_arg (copy, 0, r);
break;
default:
break;
}
/* Reset alias info if we didn't apply measures to
keep it valid over inlining by setting DECL_PT_UID. */
if (!id->src_cfun->gimple_df
|| !id->src_cfun->gimple_df->ipa_pta)
gimple_call_reset_alias_info (as_a <gcall *> (copy));
}
break;
case GIMPLE_RESX:
{
gresx *resx_stmt = as_a <gresx *> (copy);
int r = gimple_resx_region (resx_stmt);
r = remap_eh_region_nr (r, id);
gimple_resx_set_region (resx_stmt, r);
}
break;
case GIMPLE_EH_DISPATCH:
{
geh_dispatch *eh_dispatch = as_a <geh_dispatch *> (copy);
int r = gimple_eh_dispatch_region (eh_dispatch);
r = remap_eh_region_nr (r, id);
gimple_eh_dispatch_set_region (eh_dispatch, r);
}
break;
default:
break;
}
}
/* If STMT has a block defined, map it to the newly constructed block. */
if (tree block = gimple_block (copy))
{
tree *n;
n = id->decl_map->get (block);
gcc_assert (n);
gimple_set_block (copy, *n);
}
if (id->reset_location)
gimple_set_location (copy, input_location);
/* Debug statements ought to be rebuilt and not copied. */
gcc_checking_assert (!is_gimple_debug (copy));
/* Remap all the operands in COPY. */
memset (&wi, 0, sizeof (wi));
wi.info = id;
if (skip_first)
walk_tree (gimple_op_ptr (copy, 1), remap_gimple_op_r, &wi, NULL);
else
walk_gimple_op (copy, remap_gimple_op_r, &wi);
/* Clear the copied virtual operands. We are not remapping them here
but are going to recreate them from scratch. */
if (gimple_has_mem_ops (copy))
{
gimple_set_vdef (copy, NULL_TREE);
gimple_set_vuse (copy, NULL_TREE);
}
gimple_seq_add_stmt (&stmts, copy);
return stmts;
}
/* Copy basic block, scale profile accordingly. Edges will be taken care of
later */
static basic_block
copy_bb (copy_body_data *id, basic_block bb,
profile_count num, profile_count den)
{
gimple_stmt_iterator gsi, copy_gsi, seq_gsi;
basic_block copy_basic_block;
tree decl;
basic_block prev;
profile_count::adjust_for_ipa_scaling (&num, &den);
/* Search for previous copied basic block. */
prev = bb->prev_bb;
while (!prev->aux)
prev = prev->prev_bb;
/* create_basic_block() will append every new block to
basic_block_info automatically. */
copy_basic_block = create_basic_block (NULL, (basic_block) prev->aux);
copy_basic_block->count = bb->count.apply_scale (num, den);
copy_gsi = gsi_start_bb (copy_basic_block);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple_seq stmts;
gimple *stmt = gsi_stmt (gsi);
gimple *orig_stmt = stmt;
gimple_stmt_iterator stmts_gsi;
bool stmt_added = false;
id->regimplify = false;
stmts = remap_gimple_stmt (stmt, id);
if (gimple_seq_empty_p (stmts))
continue;
seq_gsi = copy_gsi;
for (stmts_gsi = gsi_start (stmts);
!gsi_end_p (stmts_gsi); )
{
stmt = gsi_stmt (stmts_gsi);
/* Advance iterator now before stmt is moved to seq_gsi. */
gsi_next (&stmts_gsi);
if (gimple_nop_p (stmt))
continue;
gimple_duplicate_stmt_histograms (cfun, stmt, id->src_cfun,
orig_stmt);
/* With return slot optimization we can end up with
non-gimple (foo *)&this->m, fix that here. */
if (is_gimple_assign (stmt)
&& CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
&& !is_gimple_val (gimple_assign_rhs1 (stmt)))
{
tree new_rhs;
new_rhs = force_gimple_operand_gsi (&seq_gsi,
gimple_assign_rhs1 (stmt),
true, NULL, false,
GSI_CONTINUE_LINKING);
gimple_assign_set_rhs1 (stmt, new_rhs);
id->regimplify = false;
}
gsi_insert_after (&seq_gsi, stmt, GSI_NEW_STMT);
if (id->regimplify)
gimple_regimplify_operands (stmt, &seq_gsi);
stmt_added = true;
}
if (!stmt_added)
continue;
/* If copy_basic_block has been empty at the start of this iteration,
call gsi_start_bb again to get at the newly added statements. */
if (gsi_end_p (copy_gsi))
copy_gsi = gsi_start_bb (copy_basic_block);
else
gsi_next (&copy_gsi);
/* Process the new statement. The call to gimple_regimplify_operands
possibly turned the statement into multiple statements, we
need to process all of them. */
do
{
tree fn;
gcall *call_stmt;
stmt = gsi_stmt (copy_gsi);
call_stmt = dyn_cast <gcall *> (stmt);
if (call_stmt
&& gimple_call_va_arg_pack_p (call_stmt)
&& id->call_stmt
&& ! gimple_call_va_arg_pack_p (id->call_stmt))
{
/* __builtin_va_arg_pack () should be replaced by
all arguments corresponding to ... in the caller. */
tree p;
gcall *new_call;
vec<tree> argarray;
size_t nargs_caller = gimple_call_num_args (id->call_stmt);
size_t nargs = nargs_caller;
for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
/* Create the new array of arguments. */
size_t nargs_callee = gimple_call_num_args (call_stmt);
size_t n = nargs + nargs_callee;
argarray.create (n);
argarray.safe_grow_cleared (n);
/* Copy all the arguments before '...' */
if (nargs_callee)
memcpy (argarray.address (),
gimple_call_arg_ptr (call_stmt, 0),
nargs_callee * sizeof (tree));
/* Append the arguments passed in '...' */
if (nargs)
memcpy (argarray.address () + nargs_callee,
gimple_call_arg_ptr (id->call_stmt, 0)
+ (nargs_caller - nargs), nargs * sizeof (tree));
new_call = gimple_build_call_vec (gimple_call_fn (call_stmt),
argarray);
argarray.release ();
/* Copy all GIMPLE_CALL flags, location and block, except
GF_CALL_VA_ARG_PACK. */
gimple_call_copy_flags (new_call, call_stmt);
gimple_call_set_va_arg_pack (new_call, false);
/* location includes block. */
gimple_set_location (new_call, gimple_location (stmt));
gimple_call_set_lhs (new_call, gimple_call_lhs (call_stmt));
gsi_replace (&copy_gsi, new_call, false);
stmt = new_call;
}
else if (call_stmt
&& id->call_stmt
&& (decl = gimple_call_fndecl (stmt))
&& fndecl_built_in_p (decl, BUILT_IN_VA_ARG_PACK_LEN))
{
/* __builtin_va_arg_pack_len () should be replaced by
the number of anonymous arguments. */
size_t nargs = gimple_call_num_args (id->call_stmt);
tree count, p;
gimple *new_stmt;
for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
if (!gimple_call_lhs (stmt))
{
/* Drop unused calls. */
gsi_remove (&copy_gsi, false);
continue;
}
else if (!gimple_call_va_arg_pack_p (id->call_stmt))
{
count = build_int_cst (integer_type_node, nargs);
new_stmt = gimple_build_assign (gimple_call_lhs (stmt), count);
gsi_replace (&copy_gsi, new_stmt, false);
stmt = new_stmt;
}
else if (nargs != 0)
{
tree newlhs = create_tmp_reg_or_ssa_name (integer_type_node);
count = build_int_cst (integer_type_node, nargs);
new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
PLUS_EXPR, newlhs, count);
gimple_call_set_lhs (stmt, newlhs);
gsi_insert_after (&copy_gsi, new_stmt, GSI_NEW_STMT);
}
}
else if (call_stmt
&& id->call_stmt
&& gimple_call_internal_p (stmt)
&& gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT)
{
/* Drop TSAN_FUNC_EXIT () internal calls during inlining. */
gsi_remove (&copy_gsi, false);
continue;
}
/* Statements produced by inlining can be unfolded, especially
when we constant propagated some operands. We can't fold
them right now for two reasons:
1) folding require SSA_NAME_DEF_STMTs to be correct
2) we can't change function calls to builtins.
So we just mark statement for later folding. We mark
all new statements, instead just statements that has changed
by some nontrivial substitution so even statements made
foldable indirectly are updated. If this turns out to be
expensive, copy_body can be told to watch for nontrivial
changes. */
if (id->statements_to_fold)
id->statements_to_fold->add (stmt);
/* We're duplicating a CALL_EXPR. Find any corresponding
callgraph edges and update or duplicate them. */
if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
{
struct cgraph_edge *edge;
switch (id->transform_call_graph_edges)
{
case CB_CGE_DUPLICATE:
edge = id->src_node->get_edge (orig_stmt);
if (edge)
{
struct cgraph_edge *old_edge = edge;
profile_count old_cnt = edge->count;
edge = edge->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
num, den,
true);
/* Speculative calls consist of two edges - direct and
indirect. Duplicate the whole thing and distribute
frequencies accordingly. */
if (edge->speculative)
{
struct cgraph_edge *direct, *indirect;
struct ipa_ref *ref;
gcc_assert (!edge->indirect_unknown_callee);
old_edge->speculative_call_info (direct, indirect, ref);
profile_count indir_cnt = indirect->count;
indirect = indirect->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
num, den,
true);
profile_probability prob
= indir_cnt.probability_in (old_cnt + indir_cnt);
indirect->count
= copy_basic_block->count.apply_probability (prob);
edge->count = copy_basic_block->count - indirect->count;
id->dst_node->clone_reference (ref, stmt);
}
else
edge->count = copy_basic_block->count;
}
break;
case CB_CGE_MOVE_CLONES:
id->dst_node->set_call_stmt_including_clones (orig_stmt,
call_stmt);
edge = id->dst_node->get_edge (stmt);
break;
case CB_CGE_MOVE:
edge = id->dst_node->get_edge (orig_stmt);
if (edge)
edge->set_call_stmt (call_stmt);
break;
default:
gcc_unreachable ();
}
/* Constant propagation on argument done during inlining
may create new direct call. Produce an edge for it. */
if ((!edge
|| (edge->indirect_inlining_edge
&& id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
&& id->dst_node->definition
&& (fn = gimple_call_fndecl (stmt)) != NULL)
{
struct cgraph_node *dest = cgraph_node::get_create (fn);
/* We have missing edge in the callgraph. This can happen
when previous inlining turned an indirect call into a
direct call by constant propagating arguments or we are
producing dead clone (for further cloning). In all
other cases we hit a bug (incorrect node sharing is the
most common reason for missing edges). */
gcc_assert (!dest->definition
|| dest->address_taken
|| !id->src_node->definition
|| !id->dst_node->definition);
if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
id->dst_node->create_edge_including_clones
(dest, orig_stmt, call_stmt, bb->count,
CIF_ORIGINALLY_INDIRECT_CALL);
else
id->dst_node->create_edge (dest, call_stmt,
bb->count)->inline_failed
= CIF_ORIGINALLY_INDIRECT_CALL;
if (dump_file)
{
fprintf (dump_file, "Created new direct edge to %s\n",
dest->name ());
}
}
notice_special_calls (as_a <gcall *> (stmt));
}
maybe_duplicate_eh_stmt_fn (cfun, stmt, id->src_cfun, orig_stmt,
id->eh_map, id->eh_lp_nr);
gsi_next (&copy_gsi);
}
while (!gsi_end_p (copy_gsi));
copy_gsi = gsi_last_bb (copy_basic_block);
}
return copy_basic_block;
}
/* Inserting Single Entry Multiple Exit region in SSA form into code in SSA
form is quite easy, since dominator relationship for old basic blocks does
not change.
There is however exception where inlining might change dominator relation
across EH edges from basic block within inlined functions destinating
to landing pads in function we inline into.
The function fills in PHI_RESULTs of such PHI nodes if they refer
to gimple regs. Otherwise, the function mark PHI_RESULT of such
PHI nodes for renaming. For non-gimple regs, renaming is safe: the
EH edges are abnormal and SSA_NAME_OCCURS_IN_ABNORMAL_PHI must be
set, and this means that there will be no overlapping live ranges
for the underlying symbol.
This might change in future if we allow redirecting of EH edges and
we might want to change way build CFG pre-inlining to include
all the possible edges then. */
static void
update_ssa_across_abnormal_edges (basic_block bb, basic_block ret_bb,
bool can_throw, bool nonlocal_goto)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
if (!e->dest->aux
|| ((basic_block)e->dest->aux)->index == ENTRY_BLOCK)
{
gphi *phi;
gphi_iterator si;
if (!nonlocal_goto)
gcc_assert (e->flags & EDGE_EH);
if (!can_throw)
gcc_assert (!(e->flags & EDGE_EH));
for (si = gsi_start_phis (e->dest); !gsi_end_p (si); gsi_next (&si))
{
edge re;
phi = si.phi ();
/* For abnormal goto/call edges the receiver can be the
ENTRY_BLOCK. Do not assert this cannot happen. */
gcc_assert ((e->flags & EDGE_EH)
|| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)));
re = find_edge (ret_bb, e->dest);
gcc_checking_assert (re);
gcc_assert ((re->flags & (EDGE_EH | EDGE_ABNORMAL))
== (e->flags & (EDGE_EH | EDGE_ABNORMAL)));
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e),
USE_FROM_PTR (PHI_ARG_DEF_PTR_FROM_EDGE (phi, re)));
}
}
}
/* Insert clobbers for automatic variables of inlined ID->src_fn
function at the start of basic block BB. */
static void
add_clobbers_to_eh_landing_pad (basic_block bb, copy_body_data *id)
{
tree var;
unsigned int i;
FOR_EACH_VEC_SAFE_ELT (id->src_cfun->local_decls, i, var)
if (VAR_P (var)
&& !DECL_HARD_REGISTER (var)
&& !TREE_THIS_VOLATILE (var)
&& !DECL_HAS_VALUE_EXPR_P (var)
&& !is_gimple_reg (var)
&& auto_var_in_fn_p (var, id->src_fn)
&& !lookup_attribute ("omp simd array", DECL_ATTRIBUTES (var)))
{
tree *t = id->decl_map->get (var);
if (!t)
continue;
tree new_var = *t;
if (VAR_P (new_var)
&& !DECL_HARD_REGISTER (new_var)
&& !TREE_THIS_VOLATILE (new_var)
&& !DECL_HAS_VALUE_EXPR_P (new_var)
&& !is_gimple_reg (new_var)
&& auto_var_in_fn_p (new_var, id->dst_fn))
{
gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree clobber = build_clobber (TREE_TYPE (new_var));
gimple *clobber_stmt = gimple_build_assign (new_var, clobber);
gsi_insert_before (&gsi, clobber_stmt, GSI_NEW_STMT);
}
}
}
/* Copy edges from BB into its copy constructed earlier, scale profile
accordingly. Edges will be taken care of later. Assume aux
pointers to point to the copies of each BB. Return true if any
debug stmts are left after a statement that must end the basic block. */
static bool
copy_edges_for_bb (basic_block bb, profile_count num, profile_count den,
basic_block ret_bb, basic_block abnormal_goto_dest,
copy_body_data *id)
{
basic_block new_bb = (basic_block) bb->aux;
edge_iterator ei;
edge old_edge;
gimple_stmt_iterator si;
bool need_debug_cleanup = false;
/* Use the indices from the original blocks to create edges for the
new ones. */
FOR_EACH_EDGE (old_edge, ei, bb->succs)
if (!(old_edge->flags & EDGE_EH))
{
edge new_edge;
int flags = old_edge->flags;
location_t locus = old_edge->goto_locus;
/* Return edges do get a FALLTHRU flag when they get inlined. */
if (old_edge->dest->index == EXIT_BLOCK
&& !(flags & (EDGE_TRUE_VALUE|EDGE_FALSE_VALUE|EDGE_FAKE))
&& old_edge->dest->aux != EXIT_BLOCK_PTR_FOR_FN (cfun))
flags |= EDGE_FALLTHRU;
new_edge
= make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
new_edge->probability = old_edge->probability;
if (!id->reset_location)
new_edge->goto_locus = remap_location (locus, id);
}
if (bb->index == ENTRY_BLOCK || bb->index == EXIT_BLOCK)
return false;
/* When doing function splitting, we must decrease count of the return block
which was previously reachable by block we did not copy. */
if (single_succ_p (bb) && single_succ_edge (bb)->dest->index == EXIT_BLOCK)
FOR_EACH_EDGE (old_edge, ei, bb->preds)
if (old_edge->src->index != ENTRY_BLOCK
&& !old_edge->src->aux)
new_bb->count -= old_edge->count ().apply_scale (num, den);
for (si = gsi_start_bb (new_bb); !gsi_end_p (si);)
{
gimple *copy_stmt;
bool can_throw, nonlocal_goto;
copy_stmt = gsi_stmt (si);
if (!is_gimple_debug (copy_stmt))
update_stmt (copy_stmt);
/* Do this before the possible split_block. */
gsi_next (&si);
/* If this tree could throw an exception, there are two
cases where we need to add abnormal edge(s): the
tree wasn't in a region and there is a "current
region" in the caller; or the original tree had
EH edges. In both cases split the block after the tree,
and add abnormal edge(s) as needed; we need both
those from the callee and the caller.
We check whether the copy can throw, because the const
propagation can change an INDIRECT_REF which throws
into a COMPONENT_REF which doesn't. If the copy
can throw, the original could also throw. */
can_throw = stmt_can_throw_internal (cfun, copy_stmt);
nonlocal_goto
= (stmt_can_make_abnormal_goto (copy_stmt)
&& !computed_goto_p (copy_stmt));
if (can_throw || nonlocal_goto)
{
if (!gsi_end_p (si))
{
while (!gsi_end_p (si) && is_gimple_debug (gsi_stmt (si)))
gsi_next (&si);
if (gsi_end_p (si))
need_debug_cleanup = true;
}
if (!gsi_end_p (si))
/* Note that bb's predecessor edges aren't necessarily
right at this point; split_block doesn't care. */
{
edge e = split_block (new_bb, copy_stmt);
new_bb = e->dest;
new_bb->aux = e->src->aux;
si = gsi_start_bb (new_bb);
}
}
bool update_probs = false;
if (gimple_code (copy_stmt) == GIMPLE_EH_DISPATCH)
{
make_eh_dispatch_edges (as_a <geh_dispatch *> (copy_stmt));
update_probs = true;
}
else if (can_throw)
{
make_eh_edges (copy_stmt);
update_probs = true;
}
/* EH edges may not match old edges. Copy as much as possible. */
if (update_probs)
{
edge e;
edge_iterator ei;
basic_block copy_stmt_bb = gimple_bb (copy_stmt);
FOR_EACH_EDGE (old_edge, ei, bb->succs)
if ((old_edge->flags & EDGE_EH)
&& (e = find_edge (copy_stmt_bb,
(basic_block) old_edge->dest->aux))
&& (e->flags & EDGE_EH))
e->probability = old_edge->probability;
FOR_EACH_EDGE (e, ei, copy_stmt_bb->succs)
if (e->flags & EDGE_EH)
{
if (!e->probability.initialized_p ())
e->probability = profile_probability::never ();
if (e->dest->index < id->add_clobbers_to_eh_landing_pads)
{
add_clobbers_to_eh_landing_pad (e->dest, id);
id->add_clobbers_to_eh_landing_pads = 0;
}
}
}
/* If the call we inline cannot make abnormal goto do not add
additional abnormal edges but only retain those already present
in the original function body. */
if (abnormal_goto_dest == NULL)
nonlocal_goto = false;
if (nonlocal_goto)
{
basic_block copy_stmt_bb = gimple_bb (copy_stmt);
if (get_abnormal_succ_dispatcher (copy_stmt_bb))
nonlocal_goto = false;
/* ABNORMAL_DISPATCHER (1) is for longjmp/setjmp or nonlocal gotos
in OpenMP regions which aren't allowed to be left abnormally.
So, no need to add abnormal edge in that case. */
else if (is_gimple_call (copy_stmt)
&& gimple_call_internal_p (copy_stmt)
&& (gimple_call_internal_fn (copy_stmt)
== IFN_ABNORMAL_DISPATCHER)
&& gimple_call_arg (copy_stmt, 0) == boolean_true_node)
nonlocal_goto = false;
else
make_single_succ_edge (copy_stmt_bb, abnormal_goto_dest,
EDGE_ABNORMAL);
}
if ((can_throw || nonlocal_goto)
&& gimple_in_ssa_p (cfun))
update_ssa_across_abnormal_edges (gimple_bb (copy_stmt), ret_bb,
can_throw, nonlocal_goto);
}
return need_debug_cleanup;
}
/* Copy the PHIs. All blocks and edges are copied, some blocks
was possibly split and new outgoing EH edges inserted.
BB points to the block of original function and AUX pointers links
the original and newly copied blocks. */
static void
copy_phis_for_bb (basic_block bb, copy_body_data *id)
{
basic_block const new_bb = (basic_block) bb->aux;
edge_iterator ei;
gphi *phi;
gphi_iterator si;
edge new_edge;
bool inserted = false;
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
tree res, new_res;
gphi *new_phi;
phi = si.phi ();
res = PHI_RESULT (phi);
new_res = res;
if (!virtual_operand_p (res))
{
walk_tree (&new_res, copy_tree_body_r, id, NULL);
if (EDGE_COUNT (new_bb->preds) == 0)
{
/* Technically we'd want a SSA_DEFAULT_DEF here... */
SSA_NAME_DEF_STMT (new_res) = gimple_build_nop ();
}
else
{
new_phi = create_phi_node (new_res, new_bb);
FOR_EACH_EDGE (new_edge, ei, new_bb->preds)
{
edge old_edge = find_edge ((basic_block) new_edge->src->aux,
bb);
tree arg;
tree new_arg;
edge_iterator ei2;
location_t locus;
/* When doing partial cloning, we allow PHIs on the entry
block as long as all the arguments are the same.
Find any input edge to see argument to copy. */
if (!old_edge)
FOR_EACH_EDGE (old_edge, ei2, bb->preds)
if (!old_edge->src->aux)
break;
arg = PHI_ARG_DEF_FROM_EDGE (phi, old_edge);
new_arg = arg;
walk_tree (&new_arg, copy_tree_body_r, id, NULL);
gcc_assert (new_arg);
/* With return slot optimization we can end up with
non-gimple (foo *)&this->m, fix that here. */
if (TREE_CODE (new_arg) != SSA_NAME
&& TREE_CODE (new_arg) != FUNCTION_DECL
&& !is_gimple_val (new_arg))
{
gimple_seq stmts = NULL;
new_arg = force_gimple_operand (new_arg, &stmts, true,
NULL);
gsi_insert_seq_on_edge (new_edge, stmts);
inserted = true;
}
locus = gimple_phi_arg_location_from_edge (phi, old_edge);
if (id->reset_location)
locus = input_location;
else
locus = remap_location (locus, id);
add_phi_arg (new_phi, new_arg, new_edge, locus);
}
}
}
}
/* Commit the delayed edge insertions. */
if (inserted)
FOR_EACH_EDGE (new_edge, ei, new_bb->preds)
gsi_commit_one_edge_insert (new_edge, NULL);
}
/* Wrapper for remap_decl so it can be used as a callback. */
static tree
remap_decl_1 (tree decl, void *data)
{
return remap_decl (decl, (copy_body_data *) data);
}
/* Build struct function and associated datastructures for the new clone
NEW_FNDECL to be build. CALLEE_FNDECL is the original. Function changes
the cfun to the function of new_fndecl (and current_function_decl too). */
static void
initialize_cfun (tree new_fndecl, tree callee_fndecl, profile_count count)
{
struct function *src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
if (!DECL_ARGUMENTS (new_fndecl))
DECL_ARGUMENTS (new_fndecl) = DECL_ARGUMENTS (callee_fndecl);
if (!DECL_RESULT (new_fndecl))
DECL_RESULT (new_fndecl) = DECL_RESULT (callee_fndecl);
/* Register specific tree functions. */
gimple_register_cfg_hooks ();
/* Get clean struct function. */
push_struct_function (new_fndecl);
/* We will rebuild these, so just sanity check that they are empty. */
gcc_assert (VALUE_HISTOGRAMS (cfun) == NULL);
gcc_assert (cfun->local_decls == NULL);
gcc_assert (cfun->cfg == NULL);
gcc_assert (cfun->decl == new_fndecl);
/* Copy items we preserve during cloning. */
cfun->static_chain_decl = src_cfun->static_chain_decl;
cfun->nonlocal_goto_save_area = src_cfun->nonlocal_goto_save_area;
cfun->function_end_locus = src_cfun->function_end_locus;
cfun->curr_properties = src_cfun->curr_properties;
cfun->last_verified = src_cfun->last_verified;
cfun->va_list_gpr_size = src_cfun->va_list_gpr_size;
cfun->va_list_fpr_size = src_cfun->va_list_fpr_size;
cfun->has_nonlocal_label = src_cfun->has_nonlocal_label;
cfun->stdarg = src_cfun->stdarg;
cfun->after_inlining = src_cfun->after_inlining;
cfun->can_throw_non_call_exceptions
= src_cfun->can_throw_non_call_exceptions;
cfun->can_delete_dead_exceptions = src_cfun->can_delete_dead_exceptions;
cfun->returns_struct = src_cfun->returns_struct;
cfun->returns_pcc_struct = src_cfun->returns_pcc_struct;
init_empty_tree_cfg ();
profile_status_for_fn (cfun) = profile_status_for_fn (src_cfun);
profile_count num = count;
profile_count den = ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count;
profile_count::adjust_for_ipa_scaling (&num, &den);
ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count.apply_scale (count,
ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
EXIT_BLOCK_PTR_FOR_FN (src_cfun)->count.apply_scale (count,
ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
if (src_cfun->eh)
init_eh_for_function ();
if (src_cfun->gimple_df)
{
init_tree_ssa (cfun);
cfun->gimple_df->in_ssa_p = src_cfun->gimple_df->in_ssa_p;
if (cfun->gimple_df->in_ssa_p)
init_ssa_operands (cfun);
}
}
/* Helper function for copy_cfg_body. Move debug stmts from the end
of NEW_BB to the beginning of successor basic blocks when needed. If the
successor has multiple predecessors, reset them, otherwise keep
their value. */
static void
maybe_move_debug_stmts_to_successors (copy_body_data *id, basic_block new_bb)
{
edge e;
edge_iterator ei;
gimple_stmt_iterator si = gsi_last_nondebug_bb (new_bb);
if (gsi_end_p (si)
|| gsi_one_before_end_p (si)
|| !(stmt_can_throw_internal (cfun, gsi_stmt (si))
|| stmt_can_make_abnormal_goto (gsi_stmt (si))))
return;
FOR_EACH_EDGE (e, ei, new_bb->succs)
{
gimple_stmt_iterator ssi = gsi_last_bb (new_bb);
gimple_stmt_iterator dsi = gsi_after_labels (e->dest);
while (is_gimple_debug (gsi_stmt (ssi)))
{
gimple *stmt = gsi_stmt (ssi);
gdebug *new_stmt;
tree var;
tree value;
/* For the last edge move the debug stmts instead of copying
them. */
if (ei_one_before_end_p (ei))
{
si = ssi;
gsi_prev (&ssi);
if (!single_pred_p (e->dest) && gimple_debug_bind_p (stmt))
{
gimple_debug_bind_reset_value (stmt);
gimple_set_location (stmt, UNKNOWN_LOCATION);
}
gsi_remove (&si, false);
gsi_insert_before (&dsi, stmt, GSI_SAME_STMT);
continue;
}
if (gimple_debug_bind_p (stmt))
{
var = gimple_debug_bind_get_var (stmt);
if (single_pred_p (e->dest))
{
value = gimple_debug_bind_get_value (stmt);
value = unshare_expr (value);
new_stmt = gimple_build_debug_bind (var, value, stmt);
}
else
new_stmt = gimple_build_debug_bind (var, NULL_TREE, NULL);
}
else if (gimple_debug_source_bind_p (stmt))
{
var = gimple_debug_source_bind_get_var (stmt);
value = gimple_debug_source_bind_get_value (stmt);
new_stmt = gimple_build_debug_source_bind (var, value, stmt);
}
else if (gimple_debug_nonbind_marker_p (stmt))
new_stmt = as_a <gdebug *> (gimple_copy (stmt));
else
gcc_unreachable ();
gsi_insert_before (&dsi, new_stmt, GSI_SAME_STMT);
id->debug_stmts.safe_push (new_stmt);
gsi_prev (&ssi);
}
}
}
/* Make a copy of the sub-loops of SRC_PARENT and place them
as siblings of DEST_PARENT. */
static void
copy_loops (copy_body_data *id,
struct loop *dest_parent, struct loop *src_parent)
{
struct loop *src_loop = src_parent->inner;
while (src_loop)
{
if (!id->blocks_to_copy
|| bitmap_bit_p (id->blocks_to_copy, src_loop->header->index))
{
struct loop *dest_loop = alloc_loop ();
/* Assign the new loop its header and latch and associate
those with the new loop. */
dest_loop->header = (basic_block)src_loop->header->aux;
dest_loop->header->loop_father = dest_loop;
if (src_loop->latch != NULL)
{
dest_loop->latch = (basic_block)src_loop->latch->aux;
dest_loop->latch->loop_father = dest_loop;
}
/* Copy loop meta-data. */
copy_loop_info (src_loop, dest_loop);
if (dest_loop->unroll)
cfun->has_unroll = true;
if (dest_loop->force_vectorize)
cfun->has_force_vectorize_loops = true;
if (id->src_cfun->last_clique != 0)
dest_loop->owned_clique
= remap_dependence_clique (id,
src_loop->owned_clique
? src_loop->owned_clique : 1);
/* Finally place it into the loop array and the loop tree. */
place_new_loop (cfun, dest_loop);
flow_loop_tree_node_add (dest_parent, dest_loop);
if (src_loop->simduid)
{
dest_loop->simduid = remap_decl (src_loop->simduid, id);
cfun->has_simduid_loops = true;
}
/* Recurse. */
copy_loops (id, dest_loop, src_loop);
}
src_loop = src_loop->next;
}
}
/* Call redirect_call_stmt_to_callee on all calls in BB. */
void
redirect_all_calls (copy_body_data * id, basic_block bb)
{
gimple_stmt_iterator si;
gimple *last = last_stmt (bb);
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
if (is_gimple_call (stmt))
{
struct cgraph_edge *edge = id->dst_node->get_edge (stmt);
if (edge)
{
edge->redirect_call_stmt_to_callee ();
if (stmt == last && id->call_stmt && maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (bb);
}
}
}
}
/* Make a copy of the body of FN so that it can be inserted inline in
another function. Walks FN via CFG, returns new fndecl. */
static tree
copy_cfg_body (copy_body_data * id,
basic_block entry_block_map, basic_block exit_block_map,
basic_block new_entry)
{
tree callee_fndecl = id->src_fn;
/* Original cfun for the callee, doesn't change. */
struct function *src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
struct function *cfun_to_copy;
basic_block bb;
tree new_fndecl = NULL;
bool need_debug_cleanup = false;
int last;
profile_count den = ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count;
profile_count num = entry_block_map->count;
cfun_to_copy = id->src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
/* Register specific tree functions. */
gimple_register_cfg_hooks ();
/* If we are inlining just region of the function, make sure to connect
new entry to ENTRY_BLOCK_PTR_FOR_FN (cfun). Since new entry can be
part of loop, we must compute frequency and probability of
ENTRY_BLOCK_PTR_FOR_FN (cfun) based on the frequencies and
probabilities of edges incoming from nonduplicated region. */
if (new_entry)
{
edge e;
edge_iterator ei;
den = profile_count::zero ();
FOR_EACH_EDGE (e, ei, new_entry->preds)
if (!e->src->aux)
den += e->count ();
ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = den;
}
profile_count::adjust_for_ipa_scaling (&num, &den);
/* Must have a CFG here at this point. */
gcc_assert (ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (callee_fndecl)));
ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = entry_block_map;
EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = exit_block_map;
entry_block_map->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy);
exit_block_map->aux = EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy);
/* Duplicate any exception-handling regions. */
if (cfun->eh)
id->eh_map = duplicate_eh_regions (cfun_to_copy, NULL, id->eh_lp_nr,
remap_decl_1, id);
/* Use aux pointers to map the original blocks to copy. */
FOR_EACH_BB_FN (bb, cfun_to_copy)
if (!id->blocks_to_copy || bitmap_bit_p (id->blocks_to_copy, bb->index))
{
basic_block new_bb = copy_bb (id, bb, num, den);
bb->aux = new_bb;
new_bb->aux = bb;
new_bb->loop_father = entry_block_map->loop_father;
}
last = last_basic_block_for_fn (cfun);
/* Now that we've duplicated the blocks, duplicate their edges. */
basic_block abnormal_goto_dest = NULL;
if (id->call_stmt
&& stmt_can_make_abnormal_goto (id->call_stmt))
{
gimple_stmt_iterator gsi = gsi_for_stmt (id->call_stmt);
bb = gimple_bb (id->call_stmt);
gsi_next (&gsi);
if (gsi_end_p (gsi))
abnormal_goto_dest = get_abnormal_succ_dispatcher (bb);
}
FOR_ALL_BB_FN (bb, cfun_to_copy)
if (!id->blocks_to_copy
|| (bb->index > 0 && bitmap_bit_p (id->blocks_to_copy, bb->index)))
need_debug_cleanup |= copy_edges_for_bb (bb, num, den, exit_block_map,
abnormal_goto_dest, id);
if (new_entry)
{
edge e = make_edge (entry_block_map, (basic_block)new_entry->aux,
EDGE_FALLTHRU);
e->probability = profile_probability::always ();
}
/* Duplicate the loop tree, if available and wanted. */
if (loops_for_fn (src_cfun) != NULL
&& current_loops != NULL)
{
copy_loops (id, entry_block_map->loop_father,
get_loop (src_cfun, 0));
/* Defer to cfgcleanup to update loop-father fields of basic-blocks. */
loops_state_set (LOOPS_NEED_FIXUP);
}
/* If the loop tree in the source function needed fixup, mark the
destination loop tree for fixup, too. */
if (loops_for_fn (src_cfun)->state & LOOPS_NEED_FIXUP)
loops_state_set (LOOPS_NEED_FIXUP);
if (gimple_in_ssa_p (cfun))
FOR_ALL_BB_FN (bb, cfun_to_copy)
if (!id->blocks_to_copy
|| (bb->index > 0 && bitmap_bit_p (id->blocks_to_copy, bb->index)))
copy_phis_for_bb (bb, id);
FOR_ALL_BB_FN (bb, cfun_to_copy)
if (bb->aux)
{
if (need_debug_cleanup
&& bb->index != ENTRY_BLOCK
&& bb->index != EXIT_BLOCK)
maybe_move_debug_stmts_to_successors (id, (basic_block) bb->aux);
/* Update call edge destinations. This cannot be done before loop
info is updated, because we may split basic blocks. */
if (id->transform_call_graph_edges == CB_CGE_DUPLICATE
&& bb->index != ENTRY_BLOCK
&& bb->index != EXIT_BLOCK)
redirect_all_calls (id, (basic_block)bb->aux);
((basic_block)bb->aux)->aux = NULL;
bb->aux = NULL;
}
/* Zero out AUX fields of newly created block during EH edge
insertion. */
for (; last < last_basic_block_for_fn (cfun); last++)
{
if (need_debug_cleanup)
maybe_move_debug_stmts_to_successors (id,
BASIC_BLOCK_FOR_FN (cfun, last));
BASIC_BLOCK_FOR_FN (cfun, last)->aux = NULL;
/* Update call edge destinations. This cannot be done before loop
info is updated, because we may split basic blocks. */
if (id->transform_call_graph_edges == CB_CGE_DUPLICATE)
redirect_all_calls (id, BASIC_BLOCK_FOR_FN (cfun, last));
}
entry_block_map->aux = NULL;
exit_block_map->aux = NULL;
if (id->eh_map)
{
delete id->eh_map;
id->eh_map = NULL;
}
if (id->dependence_map)
{
delete id->dependence_map;
id->dependence_map = NULL;
}
return new_fndecl;
}
/* Copy the debug STMT using ID. We deal with these statements in a
special way: if any variable in their VALUE expression wasn't
remapped yet, we won't remap it, because that would get decl uids
out of sync, causing codegen differences between -g and -g0. If
this arises, we drop the VALUE expression altogether. */
static void
copy_debug_stmt (gdebug *stmt, copy_body_data *id)
{
tree t, *n;
struct walk_stmt_info wi;
if (tree block = gimple_block (stmt))
{
n = id->decl_map->get (block);
gimple_set_block (stmt, n ? *n : id->block);
}
if (gimple_debug_nonbind_marker_p (stmt))
return;
/* Remap all the operands in COPY. */
memset (&wi, 0, sizeof (wi));
wi.info = id;
processing_debug_stmt = 1;
if (gimple_debug_source_bind_p (stmt))
t = gimple_debug_source_bind_get_var (stmt);
else if (gimple_debug_bind_p (stmt))
t = gimple_debug_bind_get_var (stmt);
else
gcc_unreachable ();
if (TREE_CODE (t) == PARM_DECL && id->debug_map
&& (n = id->debug_map->get (t)))
{
gcc_assert (VAR_P (*n));
t = *n;
}
else if (VAR_P (t) && !is_global_var (t) && !id->decl_map->get (t))
/* T is a non-localized variable. */;
else
walk_tree (&t, remap_gimple_op_r, &wi, NULL);
if (gimple_debug_bind_p (stmt))
{
gimple_debug_bind_set_var (stmt, t);
if (gimple_debug_bind_has_value_p (stmt))
walk_tree (gimple_debug_bind_get_value_ptr (stmt),
remap_gimple_op_r, &wi, NULL);
/* Punt if any decl couldn't be remapped. */
if (processing_debug_stmt < 0)
gimple_debug_bind_reset_value (stmt);
}
else if (gimple_debug_source_bind_p (stmt))
{
gimple_debug_source_bind_set_var (stmt, t);
/* When inlining and source bind refers to one of the optimized
away parameters, change the source bind into normal debug bind
referring to the corresponding DEBUG_EXPR_DECL that should have
been bound before the call stmt. */
t = gimple_debug_source_bind_get_value (stmt);
if (t != NULL_TREE
&& TREE_CODE (t) == PARM_DECL
&& id->call_stmt)
{
vec<tree, va_gc> **debug_args = decl_debug_args_lookup (id->src_fn);
unsigned int i;
if (debug_args != NULL)
{
for (i = 0; i < vec_safe_length (*debug_args); i += 2)
if ((**debug_args)[i] == DECL_ORIGIN (t)
&& TREE_CODE ((**debug_args)[i + 1]) == DEBUG_EXPR_DECL)
{
t = (**debug_args)[i + 1];
stmt->subcode = GIMPLE_DEBUG_BIND;
gimple_debug_bind_set_value (stmt, t);
break;
}
}
}
if (gimple_debug_source_bind_p (stmt))
walk_tree (gimple_debug_source_bind_get_value_ptr (stmt),
remap_gimple_op_r, &wi, NULL);
}
processing_debug_stmt = 0;
update_stmt (stmt);
}
/* Process deferred debug stmts. In order to give values better odds
of being successfully remapped, we delay the processing of debug
stmts until all other stmts that might require remapping are
processed. */
static void
copy_debug_stmts (copy_body_data *id)
{
size_t i;
gdebug *stmt;
if (!id->debug_stmts.exists ())
return;
FOR_EACH_VEC_ELT (id->debug_stmts, i, stmt)
copy_debug_stmt (stmt, id);
id->debug_stmts.release ();
}
/* Make a copy of the body of SRC_FN so that it can be inserted inline in
another function. */
static tree
copy_tree_body (copy_body_data *id)
{
tree fndecl = id->src_fn;
tree body = DECL_SAVED_TREE (fndecl