| /* Interprocedural analyses. |
| Copyright (C) 2005-2018 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "alloc-pool.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "tree-streamer.h" |
| #include "cgraph.h" |
| #include "diagnostic.h" |
| #include "fold-const.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "calls.h" |
| #include "stor-layout.h" |
| #include "print-tree.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-walk.h" |
| #include "symbol-summary.h" |
| #include "ipa-prop.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-inline.h" |
| #include "ipa-fnsummary.h" |
| #include "gimple-pretty-print.h" |
| #include "params.h" |
| #include "ipa-utils.h" |
| #include "dbgcnt.h" |
| #include "domwalk.h" |
| #include "builtins.h" |
| |
| /* Function summary where the parameter infos are actually stored. */ |
| ipa_node_params_t *ipa_node_params_sum = NULL; |
| /* Vector of IPA-CP transformation data for each clone. */ |
| vec<ipcp_transformation_summary, va_gc> *ipcp_transformations; |
| /* Edge summary for IPA-CP edge information. */ |
| ipa_edge_args_sum_t *ipa_edge_args_sum; |
| |
| /* Traits for a hash table for reusing already existing ipa_bits. */ |
| |
| struct ipa_bit_ggc_hash_traits : public ggc_cache_remove <ipa_bits *> |
| { |
| typedef ipa_bits *value_type; |
| typedef ipa_bits *compare_type; |
| static hashval_t |
| hash (const ipa_bits *p) |
| { |
| hashval_t t = (hashval_t) p->value.to_shwi (); |
| return iterative_hash_host_wide_int (p->mask.to_shwi (), t); |
| } |
| static bool |
| equal (const ipa_bits *a, const ipa_bits *b) |
| { |
| return a->value == b->value && a->mask == b->mask; |
| } |
| static void |
| mark_empty (ipa_bits *&p) |
| { |
| p = NULL; |
| } |
| static bool |
| is_empty (const ipa_bits *p) |
| { |
| return p == NULL; |
| } |
| static bool |
| is_deleted (const ipa_bits *p) |
| { |
| return p == reinterpret_cast<const ipa_bits *> (1); |
| } |
| static void |
| mark_deleted (ipa_bits *&p) |
| { |
| p = reinterpret_cast<ipa_bits *> (1); |
| } |
| }; |
| |
| /* Hash table for avoid repeated allocations of equal ipa_bits. */ |
| static GTY ((cache)) hash_table<ipa_bit_ggc_hash_traits> *ipa_bits_hash_table; |
| |
| /* Traits for a hash table for reusing value_ranges used for IPA. Note that |
| the equiv bitmap is not hashed and is expected to be NULL. */ |
| |
| struct ipa_vr_ggc_hash_traits : public ggc_cache_remove <value_range *> |
| { |
| typedef value_range *value_type; |
| typedef value_range *compare_type; |
| static hashval_t |
| hash (const value_range *p) |
| { |
| gcc_checking_assert (!p->equiv); |
| inchash::hash hstate (p->type); |
| hstate.add_ptr (p->min); |
| hstate.add_ptr (p->max); |
| return hstate.end (); |
| } |
| static bool |
| equal (const value_range *a, const value_range *b) |
| { |
| return a->type == b->type && a->min == b->min && a->max == b->max; |
| } |
| static void |
| mark_empty (value_range *&p) |
| { |
| p = NULL; |
| } |
| static bool |
| is_empty (const value_range *p) |
| { |
| return p == NULL; |
| } |
| static bool |
| is_deleted (const value_range *p) |
| { |
| return p == reinterpret_cast<const value_range *> (1); |
| } |
| static void |
| mark_deleted (value_range *&p) |
| { |
| p = reinterpret_cast<value_range *> (1); |
| } |
| }; |
| |
| /* Hash table for avoid repeated allocations of equal value_ranges. */ |
| static GTY ((cache)) hash_table<ipa_vr_ggc_hash_traits> *ipa_vr_hash_table; |
| |
| /* Holders of ipa cgraph hooks: */ |
| static struct cgraph_node_hook_list *function_insertion_hook_holder; |
| |
| /* Description of a reference to an IPA constant. */ |
| struct ipa_cst_ref_desc |
| { |
| /* Edge that corresponds to the statement which took the reference. */ |
| struct cgraph_edge *cs; |
| /* Linked list of duplicates created when call graph edges are cloned. */ |
| struct ipa_cst_ref_desc *next_duplicate; |
| /* Number of references in IPA structures, IPA_UNDESCRIBED_USE if the value |
| if out of control. */ |
| int refcount; |
| }; |
| |
| /* Allocation pool for reference descriptions. */ |
| |
| static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool |
| ("IPA-PROP ref descriptions"); |
| |
| /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated |
| with NODE should prevent us from analyzing it for the purposes of IPA-CP. */ |
| |
| static bool |
| ipa_func_spec_opts_forbid_analysis_p (struct cgraph_node *node) |
| { |
| tree fs_opts = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (node->decl); |
| |
| if (!fs_opts) |
| return false; |
| return !opt_for_fn (node->decl, optimize) || !opt_for_fn (node->decl, flag_ipa_cp); |
| } |
| |
| /* Return index of the formal whose tree is PTREE in function which corresponds |
| to INFO. */ |
| |
| static int |
| ipa_get_param_decl_index_1 (vec<ipa_param_descriptor, va_gc> *descriptors, |
| tree ptree) |
| { |
| int i, count; |
| |
| count = vec_safe_length (descriptors); |
| for (i = 0; i < count; i++) |
| if ((*descriptors)[i].decl_or_type == ptree) |
| return i; |
| |
| return -1; |
| } |
| |
| /* Return index of the formal whose tree is PTREE in function which corresponds |
| to INFO. */ |
| |
| int |
| ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree) |
| { |
| return ipa_get_param_decl_index_1 (info->descriptors, ptree); |
| } |
| |
| /* Populate the param_decl field in parameter DESCRIPTORS that correspond to |
| NODE. */ |
| |
| static void |
| ipa_populate_param_decls (struct cgraph_node *node, |
| vec<ipa_param_descriptor, va_gc> &descriptors) |
| { |
| tree fndecl; |
| tree fnargs; |
| tree parm; |
| int param_num; |
| |
| fndecl = node->decl; |
| gcc_assert (gimple_has_body_p (fndecl)); |
| fnargs = DECL_ARGUMENTS (fndecl); |
| param_num = 0; |
| for (parm = fnargs; parm; parm = DECL_CHAIN (parm)) |
| { |
| descriptors[param_num].decl_or_type = parm; |
| descriptors[param_num].move_cost = estimate_move_cost (TREE_TYPE (parm), |
| true); |
| param_num++; |
| } |
| } |
| |
| /* Return how many formal parameters FNDECL has. */ |
| |
| int |
| count_formal_params (tree fndecl) |
| { |
| tree parm; |
| int count = 0; |
| gcc_assert (gimple_has_body_p (fndecl)); |
| |
| for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) |
| count++; |
| |
| return count; |
| } |
| |
| /* Return the declaration of Ith formal parameter of the function corresponding |
| to INFO. Note there is no setter function as this array is built just once |
| using ipa_initialize_node_params. */ |
| |
| void |
| ipa_dump_param (FILE *file, struct ipa_node_params *info, int i) |
| { |
| fprintf (file, "param #%i", i); |
| if ((*info->descriptors)[i].decl_or_type) |
| { |
| fprintf (file, " "); |
| print_generic_expr (file, (*info->descriptors)[i].decl_or_type); |
| } |
| } |
| |
| /* If necessary, allocate vector of parameter descriptors in info of NODE. |
| Return true if they were allocated, false if not. */ |
| |
| static bool |
| ipa_alloc_node_params (struct cgraph_node *node, int param_count) |
| { |
| struct ipa_node_params *info = IPA_NODE_REF (node); |
| |
| if (!info->descriptors && param_count) |
| { |
| vec_safe_grow_cleared (info->descriptors, param_count); |
| return true; |
| } |
| else |
| return false; |
| } |
| |
| /* Initialize the ipa_node_params structure associated with NODE by counting |
| the function parameters, creating the descriptors and populating their |
| param_decls. */ |
| |
| void |
| ipa_initialize_node_params (struct cgraph_node *node) |
| { |
| struct ipa_node_params *info = IPA_NODE_REF (node); |
| |
| if (!info->descriptors |
| && ipa_alloc_node_params (node, count_formal_params (node->decl))) |
| ipa_populate_param_decls (node, *info->descriptors); |
| } |
| |
| /* Print the jump functions associated with call graph edge CS to file F. */ |
| |
| static void |
| ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs) |
| { |
| int i, count; |
| |
| count = ipa_get_cs_argument_count (IPA_EDGE_REF (cs)); |
| for (i = 0; i < count; i++) |
| { |
| struct ipa_jump_func *jump_func; |
| enum jump_func_type type; |
| |
| jump_func = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), i); |
| type = jump_func->type; |
| |
| fprintf (f, " param %d: ", i); |
| if (type == IPA_JF_UNKNOWN) |
| fprintf (f, "UNKNOWN\n"); |
| else if (type == IPA_JF_CONST) |
| { |
| tree val = jump_func->value.constant.value; |
| fprintf (f, "CONST: "); |
| print_generic_expr (f, val); |
| if (TREE_CODE (val) == ADDR_EXPR |
| && TREE_CODE (TREE_OPERAND (val, 0)) == CONST_DECL) |
| { |
| fprintf (f, " -> "); |
| print_generic_expr (f, DECL_INITIAL (TREE_OPERAND (val, 0))); |
| } |
| fprintf (f, "\n"); |
| } |
| else if (type == IPA_JF_PASS_THROUGH) |
| { |
| fprintf (f, "PASS THROUGH: "); |
| fprintf (f, "%d, op %s", |
| jump_func->value.pass_through.formal_id, |
| get_tree_code_name(jump_func->value.pass_through.operation)); |
| if (jump_func->value.pass_through.operation != NOP_EXPR) |
| { |
| fprintf (f, " "); |
| print_generic_expr (f, jump_func->value.pass_through.operand); |
| } |
| if (jump_func->value.pass_through.agg_preserved) |
| fprintf (f, ", agg_preserved"); |
| fprintf (f, "\n"); |
| } |
| else if (type == IPA_JF_ANCESTOR) |
| { |
| fprintf (f, "ANCESTOR: "); |
| fprintf (f, "%d, offset " HOST_WIDE_INT_PRINT_DEC, |
| jump_func->value.ancestor.formal_id, |
| jump_func->value.ancestor.offset); |
| if (jump_func->value.ancestor.agg_preserved) |
| fprintf (f, ", agg_preserved"); |
| fprintf (f, "\n"); |
| } |
| |
| if (jump_func->agg.items) |
| { |
| struct ipa_agg_jf_item *item; |
| int j; |
| |
| fprintf (f, " Aggregate passed by %s:\n", |
| jump_func->agg.by_ref ? "reference" : "value"); |
| FOR_EACH_VEC_SAFE_ELT (jump_func->agg.items, j, item) |
| { |
| fprintf (f, " offset: " HOST_WIDE_INT_PRINT_DEC ", ", |
| item->offset); |
| if (TYPE_P (item->value)) |
| fprintf (f, "clobber of " HOST_WIDE_INT_PRINT_DEC " bits", |
| tree_to_uhwi (TYPE_SIZE (item->value))); |
| else |
| { |
| fprintf (f, "cst: "); |
| print_generic_expr (f, item->value); |
| } |
| fprintf (f, "\n"); |
| } |
| } |
| |
| struct ipa_polymorphic_call_context *ctx |
| = ipa_get_ith_polymorhic_call_context (IPA_EDGE_REF (cs), i); |
| if (ctx && !ctx->useless_p ()) |
| { |
| fprintf (f, " Context: "); |
| ctx->dump (dump_file); |
| } |
| |
| if (jump_func->bits) |
| { |
| fprintf (f, " value: "); |
| print_hex (jump_func->bits->value, f); |
| fprintf (f, ", mask: "); |
| print_hex (jump_func->bits->mask, f); |
| fprintf (f, "\n"); |
| } |
| else |
| fprintf (f, " Unknown bits\n"); |
| |
| if (jump_func->m_vr) |
| { |
| fprintf (f, " VR "); |
| fprintf (f, "%s[", |
| (jump_func->m_vr->type == VR_ANTI_RANGE) ? "~" : ""); |
| print_decs (wi::to_wide (jump_func->m_vr->min), f); |
| fprintf (f, ", "); |
| print_decs (wi::to_wide (jump_func->m_vr->max), f); |
| fprintf (f, "]\n"); |
| } |
| else |
| fprintf (f, " Unknown VR\n"); |
| } |
| } |
| |
| |
| /* Print the jump functions of all arguments on all call graph edges going from |
| NODE to file F. */ |
| |
| void |
| ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node) |
| { |
| struct cgraph_edge *cs; |
| |
| fprintf (f, " Jump functions of caller %s:\n", node->dump_name ()); |
| for (cs = node->callees; cs; cs = cs->next_callee) |
| { |
| if (!ipa_edge_args_info_available_for_edge_p (cs)) |
| continue; |
| |
| fprintf (f, " callsite %s -> %s : \n", |
| node->dump_name (), |
| cs->callee->dump_name ()); |
| ipa_print_node_jump_functions_for_edge (f, cs); |
| } |
| |
| for (cs = node->indirect_calls; cs; cs = cs->next_callee) |
| { |
| struct cgraph_indirect_call_info *ii; |
| if (!ipa_edge_args_info_available_for_edge_p (cs)) |
| continue; |
| |
| ii = cs->indirect_info; |
| if (ii->agg_contents) |
| fprintf (f, " indirect %s callsite, calling param %i, " |
| "offset " HOST_WIDE_INT_PRINT_DEC ", %s", |
| ii->member_ptr ? "member ptr" : "aggregate", |
| ii->param_index, ii->offset, |
| ii->by_ref ? "by reference" : "by_value"); |
| else |
| fprintf (f, " indirect %s callsite, calling param %i, " |
| "offset " HOST_WIDE_INT_PRINT_DEC, |
| ii->polymorphic ? "polymorphic" : "simple", ii->param_index, |
| ii->offset); |
| |
| if (cs->call_stmt) |
| { |
| fprintf (f, ", for stmt "); |
| print_gimple_stmt (f, cs->call_stmt, 0, TDF_SLIM); |
| } |
| else |
| fprintf (f, "\n"); |
| if (ii->polymorphic) |
| ii->context.dump (f); |
| ipa_print_node_jump_functions_for_edge (f, cs); |
| } |
| } |
| |
| /* Print ipa_jump_func data structures of all nodes in the call graph to F. */ |
| |
| void |
| ipa_print_all_jump_functions (FILE *f) |
| { |
| struct cgraph_node *node; |
| |
| fprintf (f, "\nJump functions:\n"); |
| FOR_EACH_FUNCTION (node) |
| { |
| ipa_print_node_jump_functions (f, node); |
| } |
| } |
| |
| /* Set jfunc to be a know-really nothing jump function. */ |
| |
| static void |
| ipa_set_jf_unknown (struct ipa_jump_func *jfunc) |
| { |
| jfunc->type = IPA_JF_UNKNOWN; |
| jfunc->bits = NULL; |
| jfunc->m_vr = NULL; |
| } |
| |
| /* Set JFUNC to be a copy of another jmp (to be used by jump function |
| combination code). The two functions will share their rdesc. */ |
| |
| static void |
| ipa_set_jf_cst_copy (struct ipa_jump_func *dst, |
| struct ipa_jump_func *src) |
| |
| { |
| gcc_checking_assert (src->type == IPA_JF_CONST); |
| dst->type = IPA_JF_CONST; |
| dst->value.constant = src->value.constant; |
| } |
| |
| /* Set JFUNC to be a constant jmp function. */ |
| |
| static void |
| ipa_set_jf_constant (struct ipa_jump_func *jfunc, tree constant, |
| struct cgraph_edge *cs) |
| { |
| jfunc->type = IPA_JF_CONST; |
| jfunc->value.constant.value = unshare_expr_without_location (constant); |
| |
| if (TREE_CODE (constant) == ADDR_EXPR |
| && TREE_CODE (TREE_OPERAND (constant, 0)) == FUNCTION_DECL) |
| { |
| struct ipa_cst_ref_desc *rdesc; |
| |
| rdesc = ipa_refdesc_pool.allocate (); |
| rdesc->cs = cs; |
| rdesc->next_duplicate = NULL; |
| rdesc->refcount = 1; |
| jfunc->value.constant.rdesc = rdesc; |
| } |
| else |
| jfunc->value.constant.rdesc = NULL; |
| } |
| |
| /* Set JFUNC to be a simple pass-through jump function. */ |
| static void |
| ipa_set_jf_simple_pass_through (struct ipa_jump_func *jfunc, int formal_id, |
| bool agg_preserved) |
| { |
| jfunc->type = IPA_JF_PASS_THROUGH; |
| jfunc->value.pass_through.operand = NULL_TREE; |
| jfunc->value.pass_through.formal_id = formal_id; |
| jfunc->value.pass_through.operation = NOP_EXPR; |
| jfunc->value.pass_through.agg_preserved = agg_preserved; |
| } |
| |
| /* Set JFUNC to be an unary pass through jump function. */ |
| |
| static void |
| ipa_set_jf_unary_pass_through (struct ipa_jump_func *jfunc, int formal_id, |
| enum tree_code operation) |
| { |
| jfunc->type = IPA_JF_PASS_THROUGH; |
| jfunc->value.pass_through.operand = NULL_TREE; |
| jfunc->value.pass_through.formal_id = formal_id; |
| jfunc->value.pass_through.operation = operation; |
| jfunc->value.pass_through.agg_preserved = false; |
| } |
| /* Set JFUNC to be an arithmetic pass through jump function. */ |
| |
| static void |
| ipa_set_jf_arith_pass_through (struct ipa_jump_func *jfunc, int formal_id, |
| tree operand, enum tree_code operation) |
| { |
| jfunc->type = IPA_JF_PASS_THROUGH; |
| jfunc->value.pass_through.operand = unshare_expr_without_location (operand); |
| jfunc->value.pass_through.formal_id = formal_id; |
| jfunc->value.pass_through.operation = operation; |
| jfunc->value.pass_through.agg_preserved = false; |
| } |
| |
| /* Set JFUNC to be an ancestor jump function. */ |
| |
| static void |
| ipa_set_ancestor_jf (struct ipa_jump_func *jfunc, HOST_WIDE_INT offset, |
| int formal_id, bool agg_preserved) |
| { |
| jfunc->type = IPA_JF_ANCESTOR; |
| jfunc->value.ancestor.formal_id = formal_id; |
| jfunc->value.ancestor.offset = offset; |
| jfunc->value.ancestor.agg_preserved = agg_preserved; |
| } |
| |
| /* Get IPA BB information about the given BB. FBI is the context of analyzis |
| of this function body. */ |
| |
| static struct ipa_bb_info * |
| ipa_get_bb_info (struct ipa_func_body_info *fbi, basic_block bb) |
| { |
| gcc_checking_assert (fbi); |
| return &fbi->bb_infos[bb->index]; |
| } |
| |
| /* Structure to be passed in between detect_type_change and |
| check_stmt_for_type_change. */ |
| |
| struct prop_type_change_info |
| { |
| /* Offset into the object where there is the virtual method pointer we are |
| looking for. */ |
| HOST_WIDE_INT offset; |
| /* The declaration or SSA_NAME pointer of the base that we are checking for |
| type change. */ |
| tree object; |
| /* Set to true if dynamic type change has been detected. */ |
| bool type_maybe_changed; |
| }; |
| |
| /* Return true if STMT can modify a virtual method table pointer. |
| |
| This function makes special assumptions about both constructors and |
| destructors which are all the functions that are allowed to alter the VMT |
| pointers. It assumes that destructors begin with assignment into all VMT |
| pointers and that constructors essentially look in the following way: |
| |
| 1) The very first thing they do is that they call constructors of ancestor |
| sub-objects that have them. |
| |
| 2) Then VMT pointers of this and all its ancestors is set to new values |
| corresponding to the type corresponding to the constructor. |
| |
| 3) Only afterwards, other stuff such as constructor of member sub-objects |
| and the code written by the user is run. Only this may include calling |
| virtual functions, directly or indirectly. |
| |
| There is no way to call a constructor of an ancestor sub-object in any |
| other way. |
| |
| This means that we do not have to care whether constructors get the correct |
| type information because they will always change it (in fact, if we define |
| the type to be given by the VMT pointer, it is undefined). |
| |
| The most important fact to derive from the above is that if, for some |
| statement in the section 3, we try to detect whether the dynamic type has |
| changed, we can safely ignore all calls as we examine the function body |
| backwards until we reach statements in section 2 because these calls cannot |
| be ancestor constructors or destructors (if the input is not bogus) and so |
| do not change the dynamic type (this holds true only for automatically |
| allocated objects but at the moment we devirtualize only these). We then |
| must detect that statements in section 2 change the dynamic type and can try |
| to derive the new type. That is enough and we can stop, we will never see |
| the calls into constructors of sub-objects in this code. Therefore we can |
| safely ignore all call statements that we traverse. |
| */ |
| |
| static bool |
| stmt_may_be_vtbl_ptr_store (gimple *stmt) |
| { |
| if (is_gimple_call (stmt)) |
| return false; |
| if (gimple_clobber_p (stmt)) |
| return false; |
| else if (is_gimple_assign (stmt)) |
| { |
| tree lhs = gimple_assign_lhs (stmt); |
| |
| if (!AGGREGATE_TYPE_P (TREE_TYPE (lhs))) |
| { |
| if (flag_strict_aliasing |
| && !POINTER_TYPE_P (TREE_TYPE (lhs))) |
| return false; |
| |
| if (TREE_CODE (lhs) == COMPONENT_REF |
| && !DECL_VIRTUAL_P (TREE_OPERAND (lhs, 1))) |
| return false; |
| /* In the future we might want to use get_ref_base_and_extent to find |
| if there is a field corresponding to the offset and if so, proceed |
| almost like if it was a component ref. */ |
| } |
| } |
| return true; |
| } |
| |
| /* Callback of walk_aliased_vdefs and a helper function for detect_type_change |
| to check whether a particular statement may modify the virtual table |
| pointerIt stores its result into DATA, which points to a |
| prop_type_change_info structure. */ |
| |
| static bool |
| check_stmt_for_type_change (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) |
| { |
| gimple *stmt = SSA_NAME_DEF_STMT (vdef); |
| struct prop_type_change_info *tci = (struct prop_type_change_info *) data; |
| |
| if (stmt_may_be_vtbl_ptr_store (stmt)) |
| { |
| tci->type_maybe_changed = true; |
| return true; |
| } |
| else |
| return false; |
| } |
| |
| /* See if ARG is PARAM_DECl describing instance passed by pointer |
| or reference in FUNCTION. Return false if the dynamic type may change |
| in between beggining of the function until CALL is invoked. |
| |
| Generally functions are not allowed to change type of such instances, |
| but they call destructors. We assume that methods can not destroy the THIS |
| pointer. Also as a special cases, constructor and destructors may change |
| type of the THIS pointer. */ |
| |
| static bool |
| param_type_may_change_p (tree function, tree arg, gimple *call) |
| { |
| /* Pure functions can not do any changes on the dynamic type; |
| that require writting to memory. */ |
| if (flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST)) |
| return false; |
| /* We need to check if we are within inlined consturctor |
| or destructor (ideally we would have way to check that the |
| inline cdtor is actually working on ARG, but we don't have |
| easy tie on this, so punt on all non-pure cdtors. |
| We may also record the types of cdtors and once we know type |
| of the instance match them. |
| |
| Also code unification optimizations may merge calls from |
| different blocks making return values unreliable. So |
| do nothing during late optimization. */ |
| if (DECL_STRUCT_FUNCTION (function)->after_inlining) |
| return true; |
| if (TREE_CODE (arg) == SSA_NAME |
| && SSA_NAME_IS_DEFAULT_DEF (arg) |
| && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) |
| { |
| /* Normal (non-THIS) argument. */ |
| if ((SSA_NAME_VAR (arg) != DECL_ARGUMENTS (function) |
| || TREE_CODE (TREE_TYPE (function)) != METHOD_TYPE) |
| /* THIS pointer of an method - here we want to watch constructors |
| and destructors as those definitely may change the dynamic |
| type. */ |
| || (TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE |
| && !DECL_CXX_CONSTRUCTOR_P (function) |
| && !DECL_CXX_DESTRUCTOR_P (function) |
| && (SSA_NAME_VAR (arg) == DECL_ARGUMENTS (function)))) |
| { |
| /* Walk the inline stack and watch out for ctors/dtors. */ |
| for (tree block = gimple_block (call); block && TREE_CODE (block) == BLOCK; |
| block = BLOCK_SUPERCONTEXT (block)) |
| if (inlined_polymorphic_ctor_dtor_block_p (block, false)) |
| return true; |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /* Detect whether the dynamic type of ARG of COMP_TYPE has changed (before |
| callsite CALL) by looking for assignments to its virtual table pointer. If |
| it is, return true and fill in the jump function JFUNC with relevant type |
| information or set it to unknown. ARG is the object itself (not a pointer |
| to it, unless dereferenced). BASE is the base of the memory access as |
| returned by get_ref_base_and_extent, as is the offset. |
| |
| This is helper function for detect_type_change and detect_type_change_ssa |
| that does the heavy work which is usually unnecesary. */ |
| |
| static bool |
| detect_type_change_from_memory_writes (tree arg, tree base, tree comp_type, |
| gcall *call, struct ipa_jump_func *jfunc, |
| HOST_WIDE_INT offset) |
| { |
| struct prop_type_change_info tci; |
| ao_ref ao; |
| bool entry_reached = false; |
| |
| gcc_checking_assert (DECL_P (arg) |
| || TREE_CODE (arg) == MEM_REF |
| || handled_component_p (arg)); |
| |
| comp_type = TYPE_MAIN_VARIANT (comp_type); |
| |
| /* Const calls cannot call virtual methods through VMT and so type changes do |
| not matter. */ |
| if (!flag_devirtualize || !gimple_vuse (call) |
| /* Be sure expected_type is polymorphic. */ |
| || !comp_type |
| || TREE_CODE (comp_type) != RECORD_TYPE |
| || !TYPE_BINFO (TYPE_MAIN_VARIANT (comp_type)) |
| || !BINFO_VTABLE (TYPE_BINFO (TYPE_MAIN_VARIANT (comp_type)))) |
| return true; |
| |
| ao_ref_init (&ao, arg); |
| ao.base = base; |
| ao.offset = offset; |
| ao.size = POINTER_SIZE; |
| ao.max_size = ao.size; |
| |
| tci.offset = offset; |
| tci.object = get_base_address (arg); |
| tci.type_maybe_changed = false; |
| |
| walk_aliased_vdefs (&ao, gimple_vuse (call), check_stmt_for_type_change, |
| &tci, NULL, &entry_reached); |
| if (!tci.type_maybe_changed) |
| return false; |
| |
| ipa_set_jf_unknown (jfunc); |
| return true; |
| } |
| |
| /* Detect whether the dynamic type of ARG of COMP_TYPE may have changed. |
| If it is, return true and fill in the jump function JFUNC with relevant type |
| information or set it to unknown. ARG is the object itself (not a pointer |
| to it, unless dereferenced). BASE is the base of the memory access as |
| returned by get_ref_base_and_extent, as is the offset. */ |
| |
| static bool |
| detect_type_change (tree arg, tree base, tree comp_type, gcall *call, |
| struct ipa_jump_func *jfunc, HOST_WIDE_INT offset) |
| { |
| if (!flag_devirtualize) |
| return false; |
| |
| if (TREE_CODE (base) == MEM_REF |
| && !param_type_may_change_p (current_function_decl, |
| TREE_OPERAND (base, 0), |
| call)) |
| return false; |
| return detect_type_change_from_memory_writes (arg, base, comp_type, |
| call, jfunc, offset); |
| } |
| |
| /* Like detect_type_change but ARG is supposed to be a non-dereferenced pointer |
| SSA name (its dereference will become the base and the offset is assumed to |
| be zero). */ |
| |
| static bool |
| detect_type_change_ssa (tree arg, tree comp_type, |
| gcall *call, struct ipa_jump_func *jfunc) |
| { |
| gcc_checking_assert (TREE_CODE (arg) == SSA_NAME); |
| if (!flag_devirtualize |
| || !POINTER_TYPE_P (TREE_TYPE (arg))) |
| return false; |
| |
| if (!param_type_may_change_p (current_function_decl, arg, call)) |
| return false; |
| |
| arg = build2 (MEM_REF, ptr_type_node, arg, |
| build_int_cst (ptr_type_node, 0)); |
| |
| return detect_type_change_from_memory_writes (arg, arg, comp_type, |
| call, jfunc, 0); |
| } |
| |
| /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the |
| boolean variable pointed to by DATA. */ |
| |
| static bool |
| mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, |
| void *data) |
| { |
| bool *b = (bool *) data; |
| *b = true; |
| return true; |
| } |
| |
| /* Return true if we have already walked so many statements in AA that we |
| should really just start giving up. */ |
| |
| static bool |
| aa_overwalked (struct ipa_func_body_info *fbi) |
| { |
| gcc_checking_assert (fbi); |
| return fbi->aa_walked > (unsigned) PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS); |
| } |
| |
| /* Find the nearest valid aa status for parameter specified by INDEX that |
| dominates BB. */ |
| |
| static struct ipa_param_aa_status * |
| find_dominating_aa_status (struct ipa_func_body_info *fbi, basic_block bb, |
| int index) |
| { |
| while (true) |
| { |
| bb = get_immediate_dominator (CDI_DOMINATORS, bb); |
| if (!bb) |
| return NULL; |
| struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb); |
| if (!bi->param_aa_statuses.is_empty () |
| && bi->param_aa_statuses[index].valid) |
| return &bi->param_aa_statuses[index]; |
| } |
| } |
| |
| /* Get AA status structure for the given BB and parameter with INDEX. Allocate |
| structures and/or intialize the result with a dominating description as |
| necessary. */ |
| |
| static struct ipa_param_aa_status * |
| parm_bb_aa_status_for_bb (struct ipa_func_body_info *fbi, basic_block bb, |
| int index) |
| { |
| gcc_checking_assert (fbi); |
| struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb); |
| if (bi->param_aa_statuses.is_empty ()) |
| bi->param_aa_statuses.safe_grow_cleared (fbi->param_count); |
| struct ipa_param_aa_status *paa = &bi->param_aa_statuses[index]; |
| if (!paa->valid) |
| { |
| gcc_checking_assert (!paa->parm_modified |
| && !paa->ref_modified |
| && !paa->pt_modified); |
| struct ipa_param_aa_status *dom_paa; |
| dom_paa = find_dominating_aa_status (fbi, bb, index); |
| if (dom_paa) |
| *paa = *dom_paa; |
| else |
| paa->valid = true; |
| } |
| |
| return paa; |
| } |
| |
| /* Return true if a load from a formal parameter PARM_LOAD is known to retrieve |
| a value known not to be modified in this function before reaching the |
| statement STMT. FBI holds information about the function we have so far |
| gathered but do not survive the summary building stage. */ |
| |
| static bool |
| parm_preserved_before_stmt_p (struct ipa_func_body_info *fbi, int index, |
| gimple *stmt, tree parm_load) |
| { |
| struct ipa_param_aa_status *paa; |
| bool modified = false; |
| ao_ref refd; |
| |
| tree base = get_base_address (parm_load); |
| gcc_assert (TREE_CODE (base) == PARM_DECL); |
| if (TREE_READONLY (base)) |
| return true; |
| |
| /* FIXME: FBI can be NULL if we are being called from outside |
| ipa_node_analysis or ipcp_transform_function, which currently happens |
| during inlining analysis. It would be great to extend fbi's lifetime and |
| always have it. Currently, we are just not afraid of too much walking in |
| that case. */ |
| if (fbi) |
| { |
| if (aa_overwalked (fbi)) |
| return false; |
| paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index); |
| if (paa->parm_modified) |
| return false; |
| } |
| else |
| paa = NULL; |
| |
| gcc_checking_assert (gimple_vuse (stmt) != NULL_TREE); |
| ao_ref_init (&refd, parm_load); |
| int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, |
| &modified, NULL); |
| if (fbi) |
| fbi->aa_walked += walked; |
| if (paa && modified) |
| paa->parm_modified = true; |
| return !modified; |
| } |
| |
| /* If STMT is an assignment that loads a value from an parameter declaration, |
| return the index of the parameter in ipa_node_params which has not been |
| modified. Otherwise return -1. */ |
| |
| static int |
| load_from_unmodified_param (struct ipa_func_body_info *fbi, |
| vec<ipa_param_descriptor, va_gc> *descriptors, |
| gimple *stmt) |
| { |
| int index; |
| tree op1; |
| |
| if (!gimple_assign_single_p (stmt)) |
| return -1; |
| |
| op1 = gimple_assign_rhs1 (stmt); |
| if (TREE_CODE (op1) != PARM_DECL) |
| return -1; |
| |
| index = ipa_get_param_decl_index_1 (descriptors, op1); |
| if (index < 0 |
| || !parm_preserved_before_stmt_p (fbi, index, stmt, op1)) |
| return -1; |
| |
| return index; |
| } |
| |
| /* Return true if memory reference REF (which must be a load through parameter |
| with INDEX) loads data that are known to be unmodified in this function |
| before reaching statement STMT. */ |
| |
| static bool |
| parm_ref_data_preserved_p (struct ipa_func_body_info *fbi, |
| int index, gimple *stmt, tree ref) |
| { |
| struct ipa_param_aa_status *paa; |
| bool modified = false; |
| ao_ref refd; |
| |
| /* FIXME: FBI can be NULL if we are being called from outside |
| ipa_node_analysis or ipcp_transform_function, which currently happens |
| during inlining analysis. It would be great to extend fbi's lifetime and |
| always have it. Currently, we are just not afraid of too much walking in |
| that case. */ |
| if (fbi) |
| { |
| if (aa_overwalked (fbi)) |
| return false; |
| paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index); |
| if (paa->ref_modified) |
| return false; |
| } |
| else |
| paa = NULL; |
| |
| gcc_checking_assert (gimple_vuse (stmt)); |
| ao_ref_init (&refd, ref); |
| int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, |
| &modified, NULL); |
| if (fbi) |
| fbi->aa_walked += walked; |
| if (paa && modified) |
| paa->ref_modified = true; |
| return !modified; |
| } |
| |
| /* Return true if the data pointed to by PARM (which is a parameter with INDEX) |
| is known to be unmodified in this function before reaching call statement |
| CALL into which it is passed. FBI describes the function body. */ |
| |
| static bool |
| parm_ref_data_pass_through_p (struct ipa_func_body_info *fbi, int index, |
| gimple *call, tree parm) |
| { |
| bool modified = false; |
| ao_ref refd; |
| |
| /* It's unnecessary to calculate anything about memory contnets for a const |
| function because it is not goin to use it. But do not cache the result |
| either. Also, no such calculations for non-pointers. */ |
| if (!gimple_vuse (call) |
| || !POINTER_TYPE_P (TREE_TYPE (parm)) |
| || aa_overwalked (fbi)) |
| return false; |
| |
| struct ipa_param_aa_status *paa = parm_bb_aa_status_for_bb (fbi, |
| gimple_bb (call), |
| index); |
| if (paa->pt_modified) |
| return false; |
| |
| ao_ref_init_from_ptr_and_size (&refd, parm, NULL_TREE); |
| int walked = walk_aliased_vdefs (&refd, gimple_vuse (call), mark_modified, |
| &modified, NULL); |
| fbi->aa_walked += walked; |
| if (modified) |
| paa->pt_modified = true; |
| return !modified; |
| } |
| |
| /* Return true if we can prove that OP is a memory reference loading |
| data from an aggregate passed as a parameter. |
| |
| The function works in two modes. If GUARANTEED_UNMODIFIED is NULL, it return |
| false if it cannot prove that the value has not been modified before the |
| load in STMT. If GUARANTEED_UNMODIFIED is not NULL, it will return true even |
| if it cannot prove the value has not been modified, in that case it will |
| store false to *GUARANTEED_UNMODIFIED, otherwise it will store true there. |
| |
| INFO and PARMS_AINFO describe parameters of the current function (but the |
| latter can be NULL), STMT is the load statement. If function returns true, |
| *INDEX_P, *OFFSET_P and *BY_REF is filled with the parameter index, offset |
| within the aggregate and whether it is a load from a value passed by |
| reference respectively. */ |
| |
| bool |
| ipa_load_from_parm_agg (struct ipa_func_body_info *fbi, |
| vec<ipa_param_descriptor, va_gc> *descriptors, |
| gimple *stmt, tree op, int *index_p, |
| HOST_WIDE_INT *offset_p, HOST_WIDE_INT *size_p, |
| bool *by_ref_p, bool *guaranteed_unmodified) |
| { |
| int index; |
| HOST_WIDE_INT size; |
| bool reverse; |
| tree base = get_ref_base_and_extent_hwi (op, offset_p, &size, &reverse); |
| |
| if (!base) |
| return false; |
| |
| if (DECL_P (base)) |
| { |
| int index = ipa_get_param_decl_index_1 (descriptors, base); |
| if (index >= 0 |
| && parm_preserved_before_stmt_p (fbi, index, stmt, op)) |
| { |
| *index_p = index; |
| *by_ref_p = false; |
| if (size_p) |
| *size_p = size; |
| if (guaranteed_unmodified) |
| *guaranteed_unmodified = true; |
| return true; |
| } |
| return false; |
| } |
| |
| if (TREE_CODE (base) != MEM_REF |
| || TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME |
| || !integer_zerop (TREE_OPERAND (base, 1))) |
| return false; |
| |
| if (SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (base, 0))) |
| { |
| tree parm = SSA_NAME_VAR (TREE_OPERAND (base, 0)); |
| index = ipa_get_param_decl_index_1 (descriptors, parm); |
| } |
| else |
| { |
| /* This branch catches situations where a pointer parameter is not a |
| gimple register, for example: |
| |
| void hip7(S*) (struct S * p) |
| { |
| void (*<T2e4>) (struct S *) D.1867; |
| struct S * p.1; |
| |
| <bb 2>: |
| p.1_1 = p; |
| D.1867_2 = p.1_1->f; |
| D.1867_2 (); |
| gdp = &p; |
| */ |
| |
| gimple *def = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0)); |
| index = load_from_unmodified_param (fbi, descriptors, def); |
| } |
| |
| if (index >= 0) |
| { |
| bool data_preserved = parm_ref_data_preserved_p (fbi, index, stmt, op); |
| if (!data_preserved && !guaranteed_unmodified) |
| return false; |
| |
| *index_p = index; |
| *by_ref_p = true; |
| if (size_p) |
| *size_p = size; |
| if (guaranteed_unmodified) |
| *guaranteed_unmodified = data_preserved; |
| return true; |
| } |
| return false; |
| } |
| |
| /* Given that an actual argument is an SSA_NAME (given in NAME) and is a result |
| of an assignment statement STMT, try to determine whether we are actually |
| handling any of the following cases and construct an appropriate jump |
| function into JFUNC if so: |
| |
| 1) The passed value is loaded from a formal parameter which is not a gimple |
| register (most probably because it is addressable, the value has to be |
| scalar) and we can guarantee the value has not changed. This case can |
| therefore be described by a simple pass-through jump function. For example: |
| |
| foo (int a) |
| { |
| int a.0; |
| |
| a.0_2 = a; |
| bar (a.0_2); |
| |
| 2) The passed value can be described by a simple arithmetic pass-through |
| jump function. E.g. |
| |
| foo (int a) |
| { |
| int D.2064; |
| |
| D.2064_4 = a.1(D) + 4; |
| bar (D.2064_4); |
| |
| This case can also occur in combination of the previous one, e.g.: |
| |
| foo (int a, int z) |
| { |
| int a.0; |
| int D.2064; |
| |
| a.0_3 = a; |
| D.2064_4 = a.0_3 + 4; |
| foo (D.2064_4); |
| |
| 3) The passed value is an address of an object within another one (which |
| also passed by reference). Such situations are described by an ancestor |
| jump function and describe situations such as: |
| |
| B::foo() (struct B * const this) |
| { |
| struct A * D.1845; |
| |
| D.1845_2 = &this_1(D)->D.1748; |
| A::bar (D.1845_2); |
| |
| INFO is the structure describing individual parameters access different |
| stages of IPA optimizations. PARMS_AINFO contains the information that is |
| only needed for intraprocedural analysis. */ |
| |
| static void |
| compute_complex_assign_jump_func (struct ipa_func_body_info *fbi, |
| struct ipa_node_params *info, |
| struct ipa_jump_func *jfunc, |
| gcall *call, gimple *stmt, tree name, |
| tree param_type) |
| { |
| HOST_WIDE_INT offset, size; |
| tree op1, tc_ssa, base, ssa; |
| bool reverse; |
| int index; |
| |
| op1 = gimple_assign_rhs1 (stmt); |
| |
| if (TREE_CODE (op1) == SSA_NAME) |
| { |
| if (SSA_NAME_IS_DEFAULT_DEF (op1)) |
| index = ipa_get_param_decl_index (info, SSA_NAME_VAR (op1)); |
| else |
| index = load_from_unmodified_param (fbi, info->descriptors, |
| SSA_NAME_DEF_STMT (op1)); |
| tc_ssa = op1; |
| } |
| else |
| { |
| index = load_from_unmodified_param (fbi, info->descriptors, stmt); |
| tc_ssa = gimple_assign_lhs (stmt); |
| } |
| |
| if (index >= 0) |
| { |
| switch (gimple_assign_rhs_class (stmt)) |
| { |
| case GIMPLE_BINARY_RHS: |
| { |
| tree op2 = gimple_assign_rhs2 (stmt); |
| if (!is_gimple_ip_invariant (op2) |
| || ((TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) |
| != tcc_comparison) |
| && !useless_type_conversion_p (TREE_TYPE (name), |
| TREE_TYPE (op1)))) |
| return; |
| |
| ipa_set_jf_arith_pass_through (jfunc, index, op2, |
| gimple_assign_rhs_code (stmt)); |
| break; |
| } |
| case GIMPLE_SINGLE_RHS: |
| { |
| bool agg_p = parm_ref_data_pass_through_p (fbi, index, call, |
| tc_ssa); |
| ipa_set_jf_simple_pass_through (jfunc, index, agg_p); |
| break; |
| } |
| case GIMPLE_UNARY_RHS: |
| if (is_gimple_assign (stmt) |
| && gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS |
| && ! CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))) |
| ipa_set_jf_unary_pass_through (jfunc, index, |
| gimple_assign_rhs_code (stmt)); |
| default:; |
| } |
| return; |
| } |
| |
| if (TREE_CODE (op1) != ADDR_EXPR) |
| return; |
| op1 = TREE_OPERAND (op1, 0); |
| if (TREE_CODE (TREE_TYPE (op1)) != RECORD_TYPE) |
| return; |
| base = get_ref_base_and_extent_hwi (op1, &offset, &size, &reverse); |
| offset_int mem_offset; |
| if (!base |
| || TREE_CODE (base) != MEM_REF |
| || !mem_ref_offset (base).is_constant (&mem_offset)) |
| return; |
| offset += mem_offset.to_short_addr () * BITS_PER_UNIT; |
| ssa = TREE_OPERAND (base, 0); |
| if (TREE_CODE (ssa) != SSA_NAME |
| || !SSA_NAME_IS_DEFAULT_DEF (ssa) |
| || offset < 0) |
| return; |
| |
| /* Dynamic types are changed in constructors and destructors. */ |
| index = ipa_get_param_decl_index (info, SSA_NAME_VAR (ssa)); |
| if (index >= 0 && param_type && POINTER_TYPE_P (param_type)) |
| ipa_set_ancestor_jf (jfunc, offset, index, |
| parm_ref_data_pass_through_p (fbi, index, call, ssa)); |
| } |
| |
| /* Extract the base, offset and MEM_REF expression from a statement ASSIGN if |
| it looks like: |
| |
| iftmp.1_3 = &obj_2(D)->D.1762; |
| |
| The base of the MEM_REF must be a default definition SSA NAME of a |
| parameter. Return NULL_TREE if it looks otherwise. If case of success, the |
| whole MEM_REF expression is returned and the offset calculated from any |
| handled components and the MEM_REF itself is stored into *OFFSET. The whole |
| RHS stripped off the ADDR_EXPR is stored into *OBJ_P. */ |
| |
| static tree |
| get_ancestor_addr_info (gimple *assign, tree *obj_p, HOST_WIDE_INT *offset) |
| { |
| HOST_WIDE_INT size; |
| tree expr, parm, obj; |
| bool reverse; |
| |
| if (!gimple_assign_single_p (assign)) |
| return NULL_TREE; |
| expr = gimple_assign_rhs1 (assign); |
| |
| if (TREE_CODE (expr) != ADDR_EXPR) |
| return NULL_TREE; |
| expr = TREE_OPERAND (expr, 0); |
| obj = expr; |
| expr = get_ref_base_and_extent_hwi (expr, offset, &size, &reverse); |
| |
| offset_int mem_offset; |
| if (!expr |
| || TREE_CODE (expr) != MEM_REF |
| || !mem_ref_offset (expr).is_constant (&mem_offset)) |
| return NULL_TREE; |
| parm = TREE_OPERAND (expr, 0); |
| if (TREE_CODE (parm) != SSA_NAME |
| || !SSA_NAME_IS_DEFAULT_DEF (parm) |
| || TREE_CODE (SSA_NAME_VAR (parm)) != PARM_DECL) |
| return NULL_TREE; |
| |
| *offset += mem_offset.to_short_addr () * BITS_PER_UNIT; |
| *obj_p = obj; |
| return expr; |
| } |
| |
| |
| /* Given that an actual argument is an SSA_NAME that is a result of a phi |
| statement PHI, try to find out whether NAME is in fact a |
| multiple-inheritance typecast from a descendant into an ancestor of a formal |
| parameter and thus can be described by an ancestor jump function and if so, |
| write the appropriate function into JFUNC. |
| |
| Essentially we want to match the following pattern: |
| |
| if (obj_2(D) != 0B) |
| goto <bb 3>; |
| else |
| goto <bb 4>; |
| |
| <bb 3>: |
| iftmp.1_3 = &obj_2(D)->D.1762; |
| |
| <bb 4>: |
| # iftmp.1_1 = PHI <iftmp.1_3(3), 0B(2)> |
| D.1879_6 = middleman_1 (iftmp.1_1, i_5(D)); |
| return D.1879_6; */ |
| |
| static void |
| compute_complex_ancestor_jump_func (struct ipa_func_body_info *fbi, |
| struct ipa_node_params *info, |
| struct ipa_jump_func *jfunc, |
| gcall *call, gphi *phi) |
| { |
| HOST_WIDE_INT offset; |
| gimple *assign, *cond; |
| basic_block phi_bb, assign_bb, cond_bb; |
| tree tmp, parm, expr, obj; |
| int index, i; |
| |
| if (gimple_phi_num_args (phi) != 2) |
| return; |
| |
| if (integer_zerop (PHI_ARG_DEF (phi, 1))) |
| tmp = PHI_ARG_DEF (phi, 0); |
| else if (integer_zerop (PHI_ARG_DEF (phi, 0))) |
| tmp = PHI_ARG_DEF (phi, 1); |
| else |
| return; |
| if (TREE_CODE (tmp) != SSA_NAME |
| || SSA_NAME_IS_DEFAULT_DEF (tmp) |
| || !POINTER_TYPE_P (TREE_TYPE (tmp)) |
| || TREE_CODE (TREE_TYPE (TREE_TYPE (tmp))) != RECORD_TYPE) |
| return; |
| |
| assign = SSA_NAME_DEF_STMT (tmp); |
| assign_bb = gimple_bb (assign); |
| if (!single_pred_p (assign_bb)) |
| return; |
| expr = get_ancestor_addr_info (assign, &obj, &offset); |
| if (!expr) |
| return; |
| parm = TREE_OPERAND (expr, 0); |
| index = ipa_get_param_decl_index (info, SSA_NAME_VAR (parm)); |
| if (index < 0) |
| return; |
| |
| cond_bb = single_pred (assign_bb); |
| cond = last_stmt (cond_bb); |
| if (!cond |
| || gimple_code (cond) != GIMPLE_COND |
| || gimple_cond_code (cond) != NE_EXPR |
| || gimple_cond_lhs (cond) != parm |
| || !integer_zerop (gimple_cond_rhs (cond))) |
| return; |
| |
| phi_bb = gimple_bb (phi); |
| for (i = 0; i < 2; i++) |
| { |
| basic_block pred = EDGE_PRED (phi_bb, i)->src; |
| if (pred != assign_bb && pred != cond_bb) |
| return; |
| } |
| |
| ipa_set_ancestor_jf (jfunc, offset, index, |
| parm_ref_data_pass_through_p (fbi, index, call, parm)); |
| } |
| |
| /* Inspect the given TYPE and return true iff it has the same structure (the |
| same number of fields of the same types) as a C++ member pointer. If |
| METHOD_PTR and DELTA are non-NULL, store the trees representing the |
| corresponding fields there. */ |
| |
| static bool |
| type_like_member_ptr_p (tree type, tree *method_ptr, tree *delta) |
| { |
| tree fld; |
| |
| if (TREE_CODE (type) != RECORD_TYPE) |
| return false; |
| |
| fld = TYPE_FIELDS (type); |
| if (!fld || !POINTER_TYPE_P (TREE_TYPE (fld)) |
| || TREE_CODE (TREE_TYPE (TREE_TYPE (fld))) != METHOD_TYPE |
| || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) |
| return false; |
| |
| if (method_ptr) |
| *method_ptr = fld; |
| |
| fld = DECL_CHAIN (fld); |
| if (!fld || INTEGRAL_TYPE_P (fld) |
| || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) |
| return false; |
| if (delta) |
| *delta = fld; |
| |
| if (DECL_CHAIN (fld)) |
| return false; |
| |
| return true; |
| } |
| |
| /* If RHS is an SSA_NAME and it is defined by a simple copy assign statement, |
| return the rhs of its defining statement. Otherwise return RHS as it |
| is. */ |
| |
| static inline tree |
| get_ssa_def_if_simple_copy (tree rhs) |
| { |
| while (TREE_CODE (rhs) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (rhs)) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (rhs); |
| |
| if (gimple_assign_single_p (def_stmt)) |
| rhs = gimple_assign_rhs1 (def_stmt); |
| else |
| break; |
| } |
| return rhs; |
| } |
| |
| /* Simple linked list, describing known contents of an aggregate beforere |
| call. */ |
| |
| struct ipa_known_agg_contents_list |
| { |
| /* Offset and size of the described part of the aggregate. */ |
| HOST_WIDE_INT offset, size; |
| /* Known constant value or NULL if the contents is known to be unknown. */ |
| tree constant; |
| /* Pointer to the next structure in the list. */ |
| struct ipa_known_agg_contents_list *next; |
| }; |
| |
| /* Find the proper place in linked list of ipa_known_agg_contents_list |
| structures where to put a new one with the given LHS_OFFSET and LHS_SIZE, |
| unless there is a partial overlap, in which case return NULL, or such |
| element is already there, in which case set *ALREADY_THERE to true. */ |
| |
| static struct ipa_known_agg_contents_list ** |
| get_place_in_agg_contents_list (struct ipa_known_agg_contents_list **list, |
| HOST_WIDE_INT lhs_offset, |
| HOST_WIDE_INT lhs_size, |
| bool *already_there) |
| { |
| struct ipa_known_agg_contents_list **p = list; |
| while (*p && (*p)->offset < lhs_offset) |
| { |
| if ((*p)->offset + (*p)->size > lhs_offset) |
| return NULL; |
| p = &(*p)->next; |
| } |
| |
| if (*p && (*p)->offset < lhs_offset + lhs_size) |
| { |
| if ((*p)->offset == lhs_offset && (*p)->size == lhs_size) |
| /* We already know this value is subsequently overwritten with |
| something else. */ |
| *already_there = true; |
| else |
| /* Otherwise this is a partial overlap which we cannot |
| represent. */ |
| return NULL; |
| } |
| return p; |
| } |
| |
| /* Build aggregate jump function from LIST, assuming there are exactly |
| CONST_COUNT constant entries there and that th offset of the passed argument |
| is ARG_OFFSET and store it into JFUNC. */ |
| |
| static void |
| build_agg_jump_func_from_list (struct ipa_known_agg_contents_list *list, |
| int const_count, HOST_WIDE_INT arg_offset, |
| struct ipa_jump_func *jfunc) |
| { |
| vec_alloc (jfunc->agg.items, const_count); |
| while (list) |
| { |
| if (list->constant) |
| { |
| struct ipa_agg_jf_item item; |
| item.offset = list->offset - arg_offset; |
| gcc_assert ((item.offset % BITS_PER_UNIT) == 0); |
| item.value = unshare_expr_without_location (list->constant); |
| jfunc->agg.items->quick_push (item); |
| } |
| list = list->next; |
| } |
| } |
| |
| /* Traverse statements from CALL backwards, scanning whether an aggregate given |
| in ARG is filled in with constant values. ARG can either be an aggregate |
| expression or a pointer to an aggregate. ARG_TYPE is the type of the |
| aggregate. JFUNC is the jump function into which the constants are |
| subsequently stored. */ |
| |
| static void |
| determine_locally_known_aggregate_parts (gcall *call, tree arg, |
| tree arg_type, |
| struct ipa_jump_func *jfunc) |
| { |
| struct ipa_known_agg_contents_list *list = NULL; |
| int item_count = 0, const_count = 0; |
| HOST_WIDE_INT arg_offset, arg_size; |
| gimple_stmt_iterator gsi; |
| tree arg_base; |
| bool check_ref, by_ref; |
| ao_ref r; |
| |
| if (PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS) == 0) |
| return; |
| |
| /* The function operates in three stages. First, we prepare check_ref, r, |
| arg_base and arg_offset based on what is actually passed as an actual |
| argument. */ |
| |
| if (POINTER_TYPE_P (arg_type)) |
| { |
| by_ref = true; |
| if (TREE_CODE (arg) == SSA_NAME) |
| { |
| tree type_size; |
| if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (arg_type)))) |
| return; |
| check_ref = true; |
| arg_base = arg; |
| arg_offset = 0; |
| type_size = TYPE_SIZE (TREE_TYPE (arg_type)); |
| arg_size = tree_to_uhwi (type_size); |
| ao_ref_init_from_ptr_and_size (&r, arg_base, NULL_TREE); |
| } |
| else if (TREE_CODE (arg) == ADDR_EXPR) |
| { |
| bool reverse; |
| |
| arg = TREE_OPERAND (arg, 0); |
| arg_base = get_ref_base_and_extent_hwi (arg, &arg_offset, |
| &arg_size, &reverse); |
| if (!arg_base) |
| return; |
| if (DECL_P (arg_base)) |
| { |
| check_ref = false; |
| ao_ref_init (&r, arg_base); |
| } |
| else |
| return; |
| } |
| else |
| return; |
| } |
| else |
| { |
| bool reverse; |
| |
| gcc_checking_assert (AGGREGATE_TYPE_P (TREE_TYPE (arg))); |
| |
| by_ref = false; |
| check_ref = false; |
| arg_base = get_ref_base_and_extent_hwi (arg, &arg_offset, |
| &arg_size, &reverse); |
| if (!arg_base) |
| return; |
| |
| ao_ref_init (&r, arg); |
| } |
| |
| /* Second stage walks back the BB, looks at individual statements and as long |
| as it is confident of how the statements affect contents of the |
| aggregates, it builds a sorted linked list of ipa_agg_jf_list structures |
| describing it. */ |
| gsi = gsi_for_stmt (call); |
| gsi_prev (&gsi); |
| for (; !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| struct ipa_known_agg_contents_list *n, **p; |
| gimple *stmt = gsi_stmt (gsi); |
| HOST_WIDE_INT lhs_offset, lhs_size; |
| tree lhs, rhs, lhs_base; |
| bool reverse; |
| |
| if (!stmt_may_clobber_ref_p_1 (stmt, &r)) |
| continue; |
| if (!gimple_assign_single_p (stmt)) |
| break; |
| |
| lhs = gimple_assign_lhs (stmt); |
| rhs = gimple_assign_rhs1 (stmt); |
| if (!is_gimple_reg_type (TREE_TYPE (rhs)) |
| || TREE_CODE (lhs) == BIT_FIELD_REF |
| || contains_bitfld_component_ref_p (lhs)) |
| break; |
| |
| lhs_base = get_ref_base_and_extent_hwi (lhs, &lhs_offset, |
| &lhs_size, &reverse); |
| if (!lhs_base) |
| break; |
| |
| if (check_ref) |
| { |
| if (TREE_CODE (lhs_base) != MEM_REF |
| || TREE_OPERAND (lhs_base, 0) != arg_base |
| || !integer_zerop (TREE_OPERAND (lhs_base, 1))) |
| break; |
| } |
| else if (lhs_base != arg_base) |
| { |
| if (DECL_P (lhs_base)) |
| continue; |
| else |
| break; |
| } |
| |
| bool already_there = false; |
| p = get_place_in_agg_contents_list (&list, lhs_offset, lhs_size, |
| &already_there); |
| if (!p) |
| break; |
| if (already_there) |
| continue; |
| |
| rhs = get_ssa_def_if_simple_copy (rhs); |
| n = XALLOCA (struct ipa_known_agg_contents_list); |
| n->size = lhs_size; |
| n->offset = lhs_offset; |
| if (is_gimple_ip_invariant (rhs)) |
| { |
| n->constant = rhs; |
| const_count++; |
| } |
| else |
| n->constant = NULL_TREE; |
| n->next = *p; |
| *p = n; |
| |
| item_count++; |
| if (const_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS) |
| || item_count == 2 * PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS)) |
| break; |
| } |
| |
| /* Third stage just goes over the list and creates an appropriate vector of |
| ipa_agg_jf_item structures out of it, of sourse only if there are |
| any known constants to begin with. */ |
| |
| if (const_count) |
| { |
| jfunc->agg.by_ref = by_ref; |
| build_agg_jump_func_from_list (list, const_count, arg_offset, jfunc); |
| } |
| } |
| |
| /* Return the Ith param type of callee associated with call graph |
| edge E. */ |
| |
| tree |
| ipa_get_callee_param_type (struct cgraph_edge *e, int i) |
| { |
| int n; |
| tree type = (e->callee |
| ? TREE_TYPE (e->callee->decl) |
| : gimple_call_fntype (e->call_stmt)); |
| tree t = TYPE_ARG_TYPES (type); |
| |
| for (n = 0; n < i; n++) |
| { |
| if (!t) |
| break; |
| t = TREE_CHAIN (t); |
| } |
| if (t) |
| return TREE_VALUE (t); |
| if (!e->callee) |
| return NULL; |
| t = DECL_ARGUMENTS (e->callee->decl); |
| for (n = 0; n < i; n++) |
| { |
| if (!t) |
| return NULL; |
| t = TREE_CHAIN (t); |
| } |
| if (t) |
| return TREE_TYPE (t); |
| return NULL; |
| } |
| |
| /* Return ipa_bits with VALUE and MASK values, which can be either a newly |
| allocated structure or a previously existing one shared with other jump |
| functions and/or transformation summaries. */ |
| |
| ipa_bits * |
| ipa_get_ipa_bits_for_value (const widest_int &value, const widest_int &mask) |
| { |
| ipa_bits tmp; |
| tmp.value = value; |
| tmp.mask = mask; |
| |
| ipa_bits **slot = ipa_bits_hash_table->find_slot (&tmp, INSERT); |
| if (*slot) |
| return *slot; |
| |
| ipa_bits *res = ggc_alloc<ipa_bits> (); |
| res->value = value; |
| res->mask = mask; |
| *slot = res; |
| |
| return res; |
| } |
| |
| /* Assign to JF a pointer to ipa_bits structure with VALUE and MASK. Use hash |
| table in order to avoid creating multiple same ipa_bits structures. */ |
| |
| static void |
| ipa_set_jfunc_bits (ipa_jump_func *jf, const widest_int &value, |
| const widest_int &mask) |
| { |
| jf->bits = ipa_get_ipa_bits_for_value (value, mask); |
| } |
| |
| /* Return a pointer to a value_range just like *TMP, but either find it in |
| ipa_vr_hash_table or allocate it in GC memory. TMP->equiv must be NULL. */ |
| |
| static value_range * |
| ipa_get_value_range (value_range *tmp) |
| { |
| value_range **slot = ipa_vr_hash_table->find_slot (tmp, INSERT); |
| if (*slot) |
| return *slot; |
| |
| value_range *vr = ggc_alloc<value_range> (); |
| *vr = *tmp; |
| *slot = vr; |
| |
| return vr; |
| } |
| |
| /* Return a pointer to a value range consisting of TYPE, MIN, MAX and an empty |
| equiv set. Use hash table in order to avoid creating multiple same copies of |
| value_ranges. */ |
| |
| static value_range * |
| ipa_get_value_range (enum value_range_type type, tree min, tree max) |
| { |
| value_range tmp; |
| tmp.type = type; |
| tmp.min = min; |
| tmp.max = max; |
| tmp.equiv = NULL; |
| return ipa_get_value_range (&tmp); |
| } |
| |
| /* Assign to JF a pointer to a value_range structure with TYPE, MIN and MAX and |
| a NULL equiv bitmap. Use hash table in order to avoid creating multiple |
| same value_range structures. */ |
| |
| static void |
| ipa_set_jfunc_vr (ipa_jump_func *jf, enum value_range_type type, |
| tree min, tree max) |
| { |
| jf->m_vr = ipa_get_value_range (type, min, max); |
| } |
| |
| /* Assign to JF a pointer to a value_range just liek TMP but either fetch a |
| copy from ipa_vr_hash_table or allocate a new on in GC memory. */ |
| |
| static void |
| ipa_set_jfunc_vr (ipa_jump_func *jf, value_range *tmp) |
| { |
| jf->m_vr = ipa_get_value_range (tmp); |
| } |
| |
| /* Compute jump function for all arguments of callsite CS and insert the |
| information in the jump_functions array in the ipa_edge_args corresponding |
| to this callsite. */ |
| |
| static void |
| ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi, |
| struct cgraph_edge *cs) |
| { |
| struct ipa_node_params *info = IPA_NODE_REF (cs->caller); |
| struct ipa_edge_args *args = IPA_EDGE_REF (cs); |
| gcall *call = cs->call_stmt; |
| int n, arg_num = gimple_call_num_args (call); |
| bool useful_context = false; |
| |
| if (arg_num == 0 || args->jump_functions) |
| return; |
| vec_safe_grow_cleared (args->jump_functions, arg_num); |
| if (flag_devirtualize) |
| vec_safe_grow_cleared (args->polymorphic_call_contexts, arg_num); |
| |
| if (gimple_call_internal_p (call)) |
| return; |
| if (ipa_func_spec_opts_forbid_analysis_p (cs->caller)) |
| return; |
| |
| for (n = 0; n < arg_num; n++) |
| { |
| struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, n); |
| tree arg = gimple_call_arg (call, n); |
| tree param_type = ipa_get_callee_param_type (cs, n); |
| if (flag_devirtualize && POINTER_TYPE_P (TREE_TYPE (arg))) |
| { |
| tree instance; |
| struct ipa_polymorphic_call_context context (cs->caller->decl, |
| arg, cs->call_stmt, |
| &instance); |
| context.get_dynamic_type (instance, arg, NULL, cs->call_stmt); |
| *ipa_get_ith_polymorhic_call_context (args, n) = context; |
| if (!context.useless_p ()) |
| useful_context = true; |
| } |
| |
| if (POINTER_TYPE_P (TREE_TYPE (arg))) |
| { |
| bool addr_nonzero = false; |
| bool strict_overflow = false; |
| |
| if (TREE_CODE (arg) == SSA_NAME |
| && param_type |
| && get_ptr_nonnull (arg)) |
| addr_nonzero = true; |
| else if (tree_single_nonzero_warnv_p (arg, &strict_overflow)) |
| addr_nonzero = true; |
| |
| if (addr_nonzero) |
| { |
| tree z = build_int_cst (TREE_TYPE (arg), 0); |
| ipa_set_jfunc_vr (jfunc, VR_ANTI_RANGE, z, z); |
| } |
| else |
| gcc_assert (!jfunc->m_vr); |
| } |
| else |
| { |
| wide_int min, max; |
| value_range_type type; |
| if (TREE_CODE (arg) == SSA_NAME |
| && param_type |
| && (type = get_range_info (arg, &min, &max)) |
| && (type == VR_RANGE || type == VR_ANTI_RANGE)) |
| { |
| value_range tmpvr,resvr; |
| |
| tmpvr.type = type; |
| tmpvr.min = wide_int_to_tree (TREE_TYPE (arg), min); |
| tmpvr.max = wide_int_to_tree (TREE_TYPE (arg), max); |
| tmpvr.equiv = NULL; |
| memset (&resvr, 0, sizeof (resvr)); |
| extract_range_from_unary_expr (&resvr, NOP_EXPR, param_type, |
| &tmpvr, TREE_TYPE (arg)); |
| if (resvr.type == VR_RANGE || resvr.type == VR_ANTI_RANGE) |
| ipa_set_jfunc_vr (jfunc, &resvr); |
| else |
| gcc_assert (!jfunc->m_vr); |
| } |
| else |
| gcc_assert (!jfunc->m_vr); |
| } |
| |
| if (INTEGRAL_TYPE_P (TREE_TYPE (arg)) |
| && (TREE_CODE (arg) == SSA_NAME || TREE_CODE (arg) == INTEGER_CST)) |
| { |
| if (TREE_CODE (arg) == SSA_NAME) |
| ipa_set_jfunc_bits (jfunc, 0, |
| widest_int::from (get_nonzero_bits (arg), |
| TYPE_SIGN (TREE_TYPE (arg)))); |
| else |
| ipa_set_jfunc_bits (jfunc, wi::to_widest (arg), 0); |
| } |
| else if (POINTER_TYPE_P (TREE_TYPE (arg))) |
| { |
| unsigned HOST_WIDE_INT bitpos; |
| unsigned align; |
| |
| get_pointer_alignment_1 (arg, &align, &bitpos); |
| widest_int mask = wi::bit_and_not |
| (wi::mask<widest_int> (TYPE_PRECISION (TREE_TYPE (arg)), false), |
| align / BITS_PER_UNIT - 1); |
| widest_int value = bitpos / BITS_PER_UNIT; |
| ipa_set_jfunc_bits (jfunc, value, mask); |
| } |
| else |
| gcc_assert (!jfunc->bits); |
| |
| if (is_gimple_ip_invariant (arg) |
| || (VAR_P (arg) |
| && is_global_var (arg) |
| && TREE_READONLY (arg))) |
| ipa_set_jf_constant (jfunc, arg, cs); |
| else if (!is_gimple_reg_type (TREE_TYPE (arg)) |
| && TREE_CODE (arg) == PARM_DECL) |
| { |
| int index = ipa_get_param_decl_index (info, arg); |
| |
| gcc_assert (index >=0); |
| /* Aggregate passed by value, check for pass-through, otherwise we |
| will attempt to fill in aggregate contents later in this |
| for cycle. */ |
| if (parm_preserved_before_stmt_p (fbi, index, call, arg)) |
| { |
| ipa_set_jf_simple_pass_through (jfunc, index, false); |
| continue; |
| } |
| } |
| else if (TREE_CODE (arg) == SSA_NAME) |
| { |
| if (SSA_NAME_IS_DEFAULT_DEF (arg)) |
| { |
| int index = ipa_get_param_decl_index (info, SSA_NAME_VAR (arg)); |
| if (index >= 0) |
| { |
| bool agg_p; |
| agg_p = parm_ref_data_pass_through_p (fbi, index, call, arg); |
| ipa_set_jf_simple_pass_through (jfunc, index, agg_p); |
| } |
| } |
| else |
| { |
| gimple *stmt = SSA_NAME_DEF_STMT (arg); |
| if (is_gimple_assign (stmt)) |
| compute_complex_assign_jump_func (fbi, info, jfunc, |
| call, stmt, arg, param_type); |
| else if (gimple_code (stmt) == GIMPLE_PHI) |
| compute_complex_ancestor_jump_func (fbi, info, jfunc, |
| call, |
| as_a <gphi *> (stmt)); |
| } |
| } |
| |
| /* If ARG is pointer, we can not use its type to determine the type of aggregate |
| passed (because type conversions are ignored in gimple). Usually we can |
| safely get type from function declaration, but in case of K&R prototypes or |
| variadic functions we can try our luck with type of the pointer passed. |
| TODO: Since we look for actual initialization of the memory object, we may better |
| work out the type based on the memory stores we find. */ |
| if (!param_type) |
| param_type = TREE_TYPE (arg); |
| |
| if ((jfunc->type != IPA_JF_PASS_THROUGH |
| || !ipa_get_jf_pass_through_agg_preserved (jfunc)) |
| && (jfunc->type != IPA_JF_ANCESTOR |
| || !ipa_get_jf_ancestor_agg_preserved (jfunc)) |
| && (AGGREGATE_TYPE_P (TREE_TYPE (arg)) |
| || POINTER_TYPE_P (param_type))) |
| determine_locally_known_aggregate_parts (call, arg, param_type, jfunc); |
| } |
| if (!useful_context) |
| vec_free (args->polymorphic_call_contexts); |
| } |
| |
| /* Compute jump functions for all edges - both direct and indirect - outgoing |
| from BB. */ |
| |
| static void |
| ipa_compute_jump_functions_for_bb (struct ipa_func_body_info *fbi, basic_block bb) |
| { |
| struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb); |
| int i; |
| struct cgraph_edge *cs; |
| |
| FOR_EACH_VEC_ELT_REVERSE (bi->cg_edges, i, cs) |
| { |
| struct cgraph_node *callee = cs->callee; |
| |
| if (callee) |
| { |
| callee->ultimate_alias_target (); |
| /* We do not need to bother analyzing calls to unknown functions |
| unless they may become known during lto/whopr. */ |
| if (!callee->definition && !flag_lto) |
| continue; |
| } |
| ipa_compute_jump_functions_for_edge (fbi, cs); |
| } |
| } |
| |
| /* If STMT looks like a statement loading a value from a member pointer formal |
| parameter, return that parameter and store the offset of the field to |
| *OFFSET_P, if it is non-NULL. Otherwise return NULL (but *OFFSET_P still |
| might be clobbered). If USE_DELTA, then we look for a use of the delta |
| field rather than the pfn. */ |
| |
| static tree |
| ipa_get_stmt_member_ptr_load_param (gimple *stmt, bool use_delta, |
| HOST_WIDE_INT *offset_p) |
| { |
| tree rhs, rec, ref_field, ref_offset, fld, ptr_field, delta_field; |
| |
| if (!gimple_assign_single_p (stmt)) |
| return NULL_TREE; |
| |
| rhs = gimple_assign_rhs1 (stmt); |
| if (TREE_CODE (rhs) == COMPONENT_REF) |
| { |
| ref_field = TREE_OPERAND (rhs, 1); |
| rhs = TREE_OPERAND (rhs, 0); |
| } |
| else |
| ref_field = NULL_TREE; |
| if (TREE_CODE (rhs) != MEM_REF) |
| return NULL_TREE; |
| rec = TREE_OPERAND (rhs, 0); |
| if (TREE_CODE (rec) != ADDR_EXPR) |
| return NULL_TREE; |
| rec = TREE_OPERAND (rec, 0); |
| if (TREE_CODE (rec) != PARM_DECL |
| || !type_like_member_ptr_p (TREE_TYPE (rec), &ptr_field, &delta_field)) |
| return NULL_TREE; |
| ref_offset = TREE_OPERAND (rhs, 1); |
| |
| if (use_delta) |
| fld = delta_field; |
| else |
| fld = ptr_field; |
| if (offset_p) |
| *offset_p = int_bit_position (fld); |
| |
| if (ref_field) |
| { |
| if (integer_nonzerop (ref_offset)) |
| return NULL_TREE; |
| return ref_field == fld ? rec : NULL_TREE; |
| } |
| else |
| return tree_int_cst_equal (byte_position (fld), ref_offset) ? rec |
| : NULL_TREE; |
| } |
| |
| /* Returns true iff T is an SSA_NAME defined by a statement. */ |
| |
| static bool |
| ipa_is_ssa_with_stmt_def (tree t) |
| { |
| if (TREE_CODE (t) == SSA_NAME |
| && !SSA_NAME_IS_DEFAULT_DEF (t)) |
| return true; |
| else |
| return false; |
| } |
| |
| /* Find the indirect call graph edge corresponding to STMT and mark it as a |
| call to a parameter number PARAM_INDEX. NODE is the caller. Return the |
| indirect call graph edge. */ |
| |
| static struct cgraph_edge * |
| ipa_note_param_call (struct cgraph_node *node, int param_index, |
| gcall *stmt) |
| { |
| struct cgraph_edge *cs; |
| |
| cs = node->get_edge (stmt); |
| cs->indirect_info->param_index = param_index; |
| cs->indirect_info->agg_contents = 0; |
| cs->indirect_info->member_ptr = 0; |
| cs->indirect_info->guaranteed_unmodified = 0; |
| return cs; |
| } |
| |
| /* Analyze the CALL and examine uses of formal parameters of the caller NODE |
| (described by INFO). PARMS_AINFO is a pointer to a vector containing |
| intermediate information about each formal parameter. Currently it checks |
| whether the call calls a pointer that is a formal parameter and if so, the |
| parameter is marked with the called flag and an indirect call graph edge |
| describing the call is created. This is very simple for ordinary pointers |
| represented in SSA but not-so-nice when it comes to member pointers. The |
| ugly part of this function does nothing more than trying to match the |
| pattern of such a call. An example of such a pattern is the gimple dump |
| below, the call is on the last line: |
| |
| <bb 2>: |
| f$__delta_5 = f.__delta; |
| f$__pfn_24 = f.__pfn; |
| |
| or |
| <bb 2>: |
| f$__delta_5 = MEM[(struct *)&f]; |
| f$__pfn_24 = MEM[(struct *)&f + 4B]; |
| |
| and a few lines below: |
| |
| <bb 5> |
| D.2496_3 = (int) f$__pfn_24; |
| D.2497_4 = D.2496_3 & 1; |
| if (D.2497_4 != 0) |
| goto <bb 3>; |
| else |
| goto <bb 4>; |
| |
| <bb 6>: |
| D.2500_7 = (unsigned int) f$__delta_5; |
| D.2501_8 = &S + D.2500_7; |
| D.2502_9 = (int (*__vtbl_ptr_type) (void) * *) D.2501_8; |
| D.2503_10 = *D.2502_9; |
| D.2504_12 = f$__pfn_24 + -1; |
| D.2505_13 = (unsigned int) D.2504_12; |
| D.2506_14 = D.2503_10 + D.2505_13; |
| D.2507_15 = *D.2506_14; |
| iftmp.11_16 = (String:: *) D.2507_15; |
| |
| <bb 7>: |
| # iftmp.11_1 = PHI <iftmp.11_16(3), f$__pfn_24(2)> |
| D.2500_19 = (unsigned int) f$__delta_5; |
| D.2508_20 = &S + D.2500_19; |
| D.2493_21 = iftmp.11_1 (D.2508_20, 4); |
| |
| Such patterns are results of simple calls to a member pointer: |
| |
| int doprinting (int (MyString::* f)(int) const) |
| { |
| MyString S ("somestring"); |
| |
| return (S.*f)(4); |
| } |
| |
| Moreover, the function also looks for called pointers loaded from aggregates |
| passed by value or reference. */ |
| |
| static void |
| ipa_analyze_indirect_call_uses (struct ipa_func_body_info *fbi, gcall *call, |
| tree target) |
| { |
| struct ipa_node_params *info = fbi->info; |
| HOST_WIDE_INT offset; |
| bool by_ref; |
| |
| if (SSA_NAME_IS_DEFAULT_DEF (target)) |
| { |
| tree var = SSA_NAME_VAR (target); |
| int index = ipa_get_param_decl_index (info, var); |
| if (index >= 0) |
| ipa_note_param_call (fbi->node, index, call); |
| return; |
| } |
| |
| int index; |
| gimple *def = SSA_NAME_DEF_STMT (target); |
| bool guaranteed_unmodified; |
| if (gimple_assign_single_p (def) |
| && ipa_load_from_parm_agg (fbi, info->descriptors, def, |
| gimple_assign_rhs1 (def), &index, &offset, |
| NULL, &by_ref, &guaranteed_unmodified)) |
| { |
| struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call); |
| cs->indirect_info->offset = offset; |
| cs->indirect_info->agg_contents = 1; |
| cs->indirect_info->by_ref = by_ref; |
| cs->indirect_info->guaranteed_unmodified = guaranteed_unmodified; |
| return; |
| } |
| |
| /* Now we need to try to match the complex pattern of calling a member |
| pointer. */ |
| if (gimple_code (def) != GIMPLE_PHI |
| || gimple_phi_num_args (def) != 2 |
| || !POINTER_TYPE_P (TREE_TYPE (target)) |
| || TREE_CODE (TREE_TYPE (TREE_TYPE (target))) != METHOD_TYPE) |
| return; |
| |
| /* First, we need to check whether one of these is a load from a member |
| pointer that is a parameter to this function. */ |
| tree n1 = PHI_ARG_DEF (def, 0); |
| tree n2 = PHI_ARG_DEF (def, 1); |
| if (!ipa_is_ssa_with_stmt_def (n1) || !ipa_is_ssa_with_stmt_def (n2)) |
| return; |
| gimple *d1 = SSA_NAME_DEF_STMT (n1); |
| gimple *d2 = SSA_NAME_DEF_STMT (n2); |
| |
| tree rec; |
| basic_block bb, virt_bb; |
| basic_block join = gimple_bb (def); |
| if ((rec = ipa_get_stmt_member_ptr_load_param (d1, false, &offset))) |
| { |
| if (ipa_get_stmt_member_ptr_load_param (d2, false, NULL)) |
| return; |
| |
| bb = EDGE_PRED (join, 0)->src; |
| virt_bb = gimple_bb (d2); |
| } |
| else if ((rec = ipa_get_stmt_member_ptr_load_param (d2, false, &offset))) |
| { |
| bb = EDGE_PRED (join, 1)->src; |
| virt_bb = gimple_bb (d1); |
| } |
| else |
| return; |
| |
| /* Second, we need to check that the basic blocks are laid out in the way |
| corresponding to the pattern. */ |
| |
| if (!single_pred_p (virt_bb) || !single_succ_p (virt_bb) |
| || single_pred (virt_bb) != bb |
| || single_succ (virt_bb) != join) |
| return; |
| |
| /* Third, let's see that the branching is done depending on the least |
| significant bit of the pfn. */ |
| |
| gimple *branch = last_stmt (bb); |
| if (!branch || gimple_code (branch) != GIMPLE_COND) |
| return; |
| |
| if ((gimple_cond_code (branch) != NE_EXPR |
| && gimple_cond_code (branch) != EQ_EXPR) |
| || !integer_zerop (gimple_cond_rhs (branch))) |
| return; |
| |
| tree cond = gimple_cond_lhs (branch); |
| if (!ipa_is_ssa_with_stmt_def (cond)) |
| return; |
| |
| def = SSA_NAME_DEF_STMT (cond); |
| if (!is_gimple_assign (def) |
| || gimple_assign_rhs_code (def) != BIT_AND_EXPR |
| || !integer_onep (gimple_assign_rhs2 (def))) |
| return; |
| |
| cond = gimple_assign_rhs1 (def); |
| if (!ipa_is_ssa_with_stmt_def (cond)) |
| return; |
| |
| def = SSA_NAME_DEF_STMT (cond); |
| |
| if (is_gimple_assign (def) |
| && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def))) |
| { |
| cond = gimple_assign_rhs1 (def); |
| if (!ipa_is_ssa_with_stmt_def (cond)) |
| return; |
| def = SSA_NAME_DEF_STMT (cond); |
| } |
| |
| tree rec2; |
| rec2 = ipa_get_stmt_member_ptr_load_param (def, |
| (TARGET_PTRMEMFUNC_VBIT_LOCATION |
| == ptrmemfunc_vbit_in_delta), |
| NULL); |
| if (rec != rec2) |
| return; |
| |
| index = ipa_get_param_decl_index (info, rec); |
| if (index >= 0 |
| && parm_preserved_before_stmt_p (fbi, index, call, rec)) |
| { |
| struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call); |
| cs->indirect_info->offset = offset; |
| cs->indirect_info->agg_contents = 1; |
| cs->indirect_info->member_ptr = 1; |
| cs->indirect_info->guaranteed_unmodified = 1; |
| } |
| |
| return; |
| } |
| |
| /* Analyze a CALL to an OBJ_TYPE_REF which is passed in TARGET and if the |
| object referenced in the expression is a formal parameter of the caller |
| FBI->node (described by FBI->info), create a call note for the |
| statement. */ |
| |
| static void |
| ipa_analyze_virtual_call_uses (struct ipa_func_body_info *fbi, |
| gcall *call, tree target) |
| { |
| tree obj = OBJ_TYPE_REF_OBJECT (target); |
| int index; |
| HOST_WIDE_INT anc_offset; |
| |
| if (!flag_devirtualize) |
| return; |
| |
| if (TREE_CODE (obj) != SSA_NAME) |
| return; |
| |
| struct ipa_node_params *info = fbi->info; |
| if (SSA_NAME_IS_DEFAULT_DEF (obj)) |
| { |
| struct ipa_jump_func jfunc; |
| if (TREE_CODE (SSA_NAME_VAR (obj)) != PARM_DECL) |
| return; |
| |
| anc_offset = 0; |
| index = ipa_get_param_decl_index (info, SSA_NAME_VAR (obj)); |
| gcc_assert (index >= 0); |
| if (detect_type_change_ssa (obj, obj_type_ref_class (target), |
| call, &jfunc)) |
| return; |
| } |
| else |
| { |
| struct ipa_jump_func jfunc; |
| gimple *stmt = SSA_NAME_DEF_STMT (obj); |
| tree expr; |
| |
| expr = get_ancestor_addr_info (stmt, &obj, &anc_offset); |
| if (!expr) |
| return; |
| index = ipa_get_param_decl_index (info, |
| SSA_NAME_VAR (TREE_OPERAND (expr, 0))); |
| gcc_assert (index >= 0); |
| if (detect_type_change (obj, expr, obj_type_ref_class (target), |
| call, &jfunc, anc_offset)) |
| return; |
| } |
| |
| struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call); |
| struct cgraph_indirect_call_info *ii = cs->indirect_info; |
| ii->offset = anc_offset; |
| ii->otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target)); |
| ii->otr_type = obj_type_ref_class (target); |
| ii->polymorphic = 1; |
| } |
| |
| /* Analyze a call statement CALL whether and how it utilizes formal parameters |
| of the caller (described by INFO). PARMS_AINFO is a pointer to a vector |
| containing intermediate information about each formal parameter. */ |
| |
| static void |
| ipa_analyze_call_uses (struct ipa_func_body_info *fbi, gcall *call) |
| { |
| tree target = gimple_call_fn (call); |
| |
| if (!target |
| || (TREE_CODE (target) != SSA_NAME |
| && !virtual_method_call_p (target))) |
| return; |
| |
| struct cgraph_edge *cs = fbi->node->get_edge (call); |
| /* If we previously turned the call into a direct call, there is |
| no need to analyze. */ |
| if (cs && !cs->indirect_unknown_callee) |
| return; |
| |
| if (cs->indirect_info->polymorphic && flag_devirtualize) |
| { |
| tree instance; |
| tree target = gimple_call_fn (call); |
| ipa_polymorphic_call_context context (current_function_decl, |
| target, call, &instance); |
| |
| gcc_checking_assert (cs->indirect_info->otr_type |
| == obj_type_ref_class (target)); |
| gcc_checking_assert (cs->indirect_info->otr_token |
| == tree_to_shwi (OBJ_TYPE_REF_TOKEN (target))); |
| |
| cs->indirect_info->vptr_changed |
| = !context.get_dynamic_type (instance, |
| OBJ_TYPE_REF_OBJECT (target), |
| obj_type_ref_class (target), call); |
| cs->indirect_info->context = context; |
| } |
| |
| if (TREE_CODE (target) == SSA_NAME) |
| ipa_analyze_indirect_call_uses (fbi, call, target); |
| else if (virtual_method_call_p (target)) |
| ipa_analyze_virtual_call_uses (fbi, call, target); |
| } |
| |
| |
| /* Analyze the call statement STMT with respect to formal parameters (described |
| in INFO) of caller given by FBI->NODE. Currently it only checks whether |
| formal parameters are called. */ |
| |
| static void |
| ipa_analyze_stmt_uses (struct ipa_func_body_info *fbi, gimple *stmt) |
| { |
| if (is_gimple_call (stmt)) |
| ipa_analyze_call_uses (fbi, as_a <gcall *> (stmt)); |
| } |
| |
| /* Callback of walk_stmt_load_store_addr_ops for the visit_load. |
| If OP is a parameter declaration, mark it as used in the info structure |
| passed in DATA. */ |
| |
| static bool |
| visit_ref_for_mod_analysis (gimple *, tree op, tree, void *data) |
| { |
| struct ipa_node_params *info = (struct ipa_node_params *) data; |
| |
| op = get_base_address (op); |
| if (op |
| && TREE_CODE (op) == PARM_DECL) |
| { |
| int index = ipa_get_param_decl_index (info, op); |
| gcc_assert (index >= 0); |
| ipa_set_param_used (info, index, true); |
| } |
| |
| return false; |
| } |
| |
| /* Scan the statements in BB and inspect the uses of formal parameters. Store |
| the findings in various structures of the associated ipa_node_params |
| structure, such as parameter flags, notes etc. FBI holds various data about |
| the function being analyzed. */ |
| |
| static void |
| ipa_analyze_params_uses_in_bb (struct ipa_func_body_info *fbi, basic_block bb) |
| { |
| gimple_stmt_iterator gsi; |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (is_gimple_debug (stmt)) |
| continue; |
| |
| ipa_analyze_stmt_uses (fbi, stmt); |
| walk_stmt_load_store_addr_ops (stmt, fbi->info, |
| visit_ref_for_mod_analysis, |
| visit_ref_for_mod_analysis, |
| visit_ref_for_mod_analysis); |
| } |
| for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| walk_stmt_load_store_addr_ops (gsi_stmt (gsi), fbi->info, |
| visit_ref_for_mod_analysis, |
| visit_ref_for_mod_analysis, |
| visit_ref_for_mod_analysis); |
| } |
| |
| /* Calculate controlled uses of parameters of NODE. */ |
| |
| static void |
| ipa_analyze_controlled_uses (struct cgraph_node *node) |
| { |
| struct ipa_node_params *info = IPA_NODE_REF (node); |
| |
| for (int i = 0; i < ipa_get_param_count (info); i++) |
| { |
| tree parm = ipa_get_param (info, i); |
| int controlled_uses = 0; |
| |
| /* For SSA regs see if parameter is used. For non-SSA we compute |
| the flag during modification analysis. */ |
| if (is_gimple_reg (parm)) |
| { |
| tree ddef = ssa_default_def (DECL_STRUCT_FUNCTION (node->decl), |
| parm); |
| if (ddef && !has_zero_uses (ddef)) |
| { |
| imm_use_iterator imm_iter; |
| use_operand_p use_p; |
| |
| ipa_set_param_used (info, i, true); |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ddef) |
| if (!is_gimple_call (USE_STMT (use_p))) |
| { |
| if (!is_gimple_debug (USE_STMT (use_p))) |
| { |
| controlled_uses = IPA_UNDESCRIBED_USE; |
| break; |
| } |
| } |
| else |
| controlled_uses++; |
| } |
| else |
| controlled_uses = 0; |
| } |
| else |
| controlled_uses = IPA_UNDESCRIBED_USE; |
| ipa_set_controlled_uses (info, i, controlled_uses); |
| } |
| } |
| |
| /* Free stuff in BI. */ |
| |
| static void |
| free_ipa_bb_info (struct ipa_bb_info *bi) |
| { |
| bi->cg_edges.release (); |
| bi->param_aa_statuses.release (); |
| } |
| |
| /* Dominator walker driving the analysis. */ |
| |
| class analysis_dom_walker : public dom_walker |
| { |
| public: |
| analysis_dom_walker (struct ipa_func_body_info *fbi) |
| : dom_walker (CDI_DOMINATORS), m_fbi (fbi) {} |
| |
| virtual edge before_dom_children (basic_block); |
| |
| private: |
| struct ipa_func_body_info *m_fbi; |
| }; |
| |
| edge |
| analysis_dom_walker::before_dom_children (basic_block bb) |
| { |
| ipa_analyze_params_uses_in_bb (m_fbi, bb); |
| ipa_compute_jump_functions_for_bb (m_fbi, bb); |
| return NULL; |
| } |
| |
| /* Release body info FBI. */ |
| |
| void |
| ipa_release_body_info (struct ipa_func_body_info *fbi) |
| { |
| int i; |
| struct ipa_bb_info *bi; |
| |
| FOR_EACH_VEC_ELT (fbi->bb_infos, i, bi) |
| free_ipa_bb_info (bi); |
| fbi->bb_infos.release (); |
| } |
| |
| /* Initialize the array describing properties of formal parameters |
| of NODE, analyze their uses and compute jump functions associated |
| with actual arguments of calls from within NODE. */ |
| |
| void |
| ipa_analyze_node (struct cgraph_node *node) |
| { |
| struct ipa_func_body_info fbi; |
| struct ipa_node_params *info; |
| |
| ipa_check_create_node_params (); |
| ipa_check_create_edge_args (); |
| info = IPA_NODE_REF (node); |
| |
| if (info->analysis_done) |
| return; |
| info->analysis_done = 1; |
| |
| if (ipa_func_spec_opts_forbid_analysis_p (node)) |
| { |
| for (int i = 0; i < ipa_get_param_count (info); i++) |
| { |
| ipa_set_param_used (info, i, true); |
| ipa_set_controlled_uses (info, i, IPA_UNDESCRIBED_USE); |
| } |
| return; |
| } |
| |
| struct function *func = DECL_STRUCT_FUNCTION (node->decl); |
| push_cfun (func); |
| calculate_dominance_info (CDI_DOMINATORS); |
| ipa_initialize_node_params (node); |
| ipa_analyze_controlled_uses (node); |
| |
| fbi.node = node; |
| fbi.info = IPA_NODE_REF (node); |
| fbi.bb_infos = vNULL; |
| fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
| fbi.param_count = ipa_get_param_count (info); |
| fbi.aa_walked = 0; |
| |
| for (struct cgraph_edge *cs = node->callees; cs; cs = cs->next_callee) |
| { |
| ipa_bb_info *bi = ipa_get_bb_info (&fbi, gimple_bb (cs->call_stmt)); |
| bi->cg_edges.safe_push (cs); |
| } |
| |
| for (struct cgraph_edge *cs = node->indirect_calls; cs; cs = cs->next_callee) |
| { |
| ipa_bb_info *bi = ipa_get_bb_info (&fbi, gimple_bb (cs->call_stmt)); |
| bi->cg_edges.safe_push (cs); |
| } |
| |
| analysis_dom_walker (&fbi).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
| |
| ipa_release_body_info (&fbi); |
| free_dominance_info (CDI_DOMINATORS); |
| pop_cfun (); |
| } |
| |
| /* Update the jump functions associated with call graph edge E when the call |
| graph edge CS is being inlined, assuming that E->caller is already (possibly |
| indirectly) inlined into CS->callee and that E has not been inlined. */ |
| |
| static void |
| update_jump_functions_after_inlining (struct cgraph_edge *cs, |
| struct cgraph_edge *e) |
| { |
| struct ipa_edge_args *top = IPA_EDGE_REF (cs); |
| struct ipa_edge_args *args = IPA_EDGE_REF (e); |
| int count = ipa_get_cs_argument_count (args); |
| int i; |
| |
| for (i = 0; i < count; i++) |
| { |
| struct ipa_jump_func *dst = ipa_get_ith_jump_func (args, i); |
| struct ipa_polymorphic_call_context *dst_ctx |
| = ipa_get_ith_polymorhic_call_context (args, i); |
| |
| if (dst->type == IPA_JF_ANCESTOR) |
| { |
| struct ipa_jump_func *src; |
| int dst_fid = dst->value.ancestor.formal_id; |
| struct ipa_polymorphic_call_context *src_ctx |
| = ipa_get_ith_polymorhic_call_context (top, dst_fid); |
| |
| /* Variable number of arguments can cause havoc if we try to access |
| one that does not exist in the inlined edge. So make sure we |
| don't. */ |
| if (dst_fid >= ipa_get_cs_argument_count (top)) |
| { |
| ipa_set_jf_unknown (dst); |
| continue; |
| } |
| |
| src = ipa_get_ith_jump_func (top, dst_fid); |
| |
| if (src_ctx && !src_ctx->useless_p ()) |
| { |
| struct ipa_polymorphic_call_context ctx = *src_ctx; |
| |
| /* TODO: Make type preserved safe WRT contexts. */ |
| if (!ipa_get_jf_ancestor_type_preserved (dst)) |
| ctx.possible_dynamic_type_change (e->in_polymorphic_cdtor); |
| ctx.offset_by (dst->value.ancestor.offset); |
| if (!ctx.useless_p ()) |
| { |
| if (!dst_ctx) |
| { |
| vec_safe_grow_cleared (args->polymorphic_call_contexts, |
| count); |
| dst_ctx = ipa_get_ith_polymorhic_call_context (args, i); |
| } |
| |
| dst_ctx->combine_with (ctx); |
| } |
| } |
| |
| if (src->agg.items |
| && (dst->value.ancestor.agg_preserved || !src->agg.by_ref)) |
| { |
| struct ipa_agg_jf_item *item; |
| int j; |
| |
| /* Currently we do not produce clobber aggregate jump functions, |
| replace with merging when we do. */ |
| gcc_assert (!dst->agg.items); |
| |
| dst->agg.items = vec_safe_copy (src->agg.items); |
| dst->agg.by_ref = src->agg.by_ref; |
| FOR_EACH_VEC_SAFE_ELT (dst->agg.items, j, item) |
| item->offset -= dst->value.ancestor.offset; |
| } |
| |
| if (src->type == IPA_JF_PASS_THROUGH |
| && src->value.pass_through.operation == NOP_EXPR) |
| { |
| dst->value.ancestor.formal_id = src->value.pass_through.formal_id; |
| dst->value.ancestor.agg_preserved &= |
| src->value.pass_through.agg_preserved; |
| } |
| else if (src->type == IPA_JF_PASS_THROUGH |
| && TREE_CODE_CLASS (src->value.pass_through.operation) == tcc_unary) |
| { |
| dst->value.ancestor.formal_id = src->value.pass_through.formal_id; |
| dst->value.ancestor.agg_preserved = false; |
| } |
| else if (src->type == IPA_JF_ANCESTOR) |
| { |
| dst->value.ancestor.formal_id = src->value.ancestor.formal_id; |
| dst->value.ancestor.offset += src->value.ancestor.offset; |
| dst->value.ancestor.agg_preserved &= |
| src->value.ancestor.agg_preserved; |
| } |
| else |
| ipa_set_jf_unknown (dst); |
| } |
| else if (dst->type == IPA_JF_PASS_THROUGH) |
| { |
| struct ipa_jump_func *src; |
| /* We must check range due to calls with variable number of arguments |
| and we cannot combine jump functions with operations. */ |
| if (dst->value.pass_through.operation == NOP_EXPR |
| && (dst->value.pass_through.formal_id |
| < ipa_get_cs_argument_count (top))) |
| { |
| int dst_fid = dst->value.pass_through.formal_id; |
| src = ipa_get_ith_jump_func (top, dst_fid); |
| bool dst_agg_p = ipa_get_jf_pass_through_agg_preserved (dst); |
| struct ipa_polymorphic_call_context *src_ctx |
| = ipa_get_ith_polymorhic_call_context (top, dst_fid); |
| |
| if (src_ctx && !src_ctx->useless_p ()) |
| { |
| struct ipa_polymorphic_call_context ctx = *src_ctx; |
| |
| /* TODO: Make type preserved safe WRT contexts. */ |
| if (!ipa_get_jf_pass_through_type_preserved (dst)) |
| ctx.possible_dynamic_type_change (e->in_polymorphic_cdtor); |
| if (!ctx.useless_p ()) |
| { |
| if (!dst_ctx) |
| { |
| vec_safe_grow_cleared (args->polymorphic_call_contexts, |
| count); |
| dst_ctx = ipa_get_ith_polymorhic_call_context (args, i); |
| } |
| dst_ctx->combine_with (ctx); |
| } |
| } |
| switch (src->type) |
| { |
| case IPA_JF_UNKNOWN: |
| ipa_set_jf_unknown (dst); |
| break; |
| case IPA_JF_CONST: |
| ipa_set_jf_cst_copy (dst, src); |
| break; |
| |
| case IPA_JF_PASS_THROUGH: |
| { |
| int formal_id = ipa_get_jf_pass_through_formal_id (src); |
| enum tree_code operation; |
| operation = ipa_get_jf_pass_through_operation (src); |
| |
| if (operation == NOP_EXPR) |
| { |
| bool agg_p; |
| agg_p = dst_agg_p |
| && ipa_get_jf_pass_through_agg_preserved (src); |
| ipa_set_jf_simple_pass_through (dst, formal_id, agg_p); |
| } |
| else if (TREE_CODE_CLASS (operation) == tcc_unary) |
| ipa_set_jf_unary_pass_through (dst, formal_id, operation); |
| else |
| { |
| tree operand = ipa_get_jf_pass_through_operand (src); |
| ipa_set_jf_arith_pass_through (dst, formal_id, operand, |
| operation); |
| } |
| break; |
| } |
| case IPA_JF_ANCESTOR: |
| { |
| bool agg_p; |
| agg_p = dst_agg_p |
| && ipa_get_jf_ancestor_agg_preserved (src); |
| ipa_set_ancestor_jf (dst, |
| ipa_get_jf_ancestor_offset (src), |
| ipa_get_jf_ancestor_formal_id (src), |
| agg_p); |
| break; |
| } |
| default: |
| gcc_unreachable (); |
| } |
| |
| if (src->agg.items |
| && (dst_agg_p || !src->agg.by_ref)) |
| { |
| /* Currently we do not produce clobber aggregate jump |
| functions, replace with merging when we do. */ |
| gcc_assert (!dst->agg.items); |
| |
| dst->agg.by_ref = src->agg.by_ref; |
| dst->agg.items = vec_safe_copy (src->agg.items); |
| } |
| } |
| else |
| ipa_set_jf_unknown (dst); |
| } |
| } |
| } |
| |
| /* If TARGET is an addr_expr of a function declaration, make it the |
| (SPECULATIVE)destination of an indirect edge IE and return the edge. |
| Otherwise, return NULL. */ |
| |
| struct cgraph_edge * |
| ipa_make_edge_direct_to_target (struct cgraph_edge *ie, tree target, |
| bool speculative) |
| { |
| struct cgraph_node *callee; |
| struct ipa_call_summary *es = ipa_call_summaries->get (ie); |
| bool unreachable = false; |
| |
| if (TREE_CODE (target) == ADDR_EXPR) |
| target = TREE_OPERAND (target, 0); |
| if (TREE_CODE (target) != FUNCTION_DECL) |
| { |
| target = canonicalize_constructor_val (target, NULL); |
| if (!target || TREE_CODE (target) != FUNCTION_DECL) |
| { |
| /* Member pointer call that goes through a VMT lookup. */ |
| if (ie->indirect_info->member_ptr |
| /* Or if target is not an invariant expression and we do not |
| know if it will evaulate to function at runtime. |
| This can happen when folding through &VAR, where &VAR |
| is IP invariant, but VAR itself is not. |
| |
| TODO: Revisit this when GCC 5 is branched. It seems that |
| member_ptr check is not needed and that we may try to fold |
| the expression and see if VAR is readonly. */ |
| || !is_gimple_ip_invariant (target)) |
| { |
| if (dump_enabled_p ()) |
| { |
| location_t loc = gimple_location_safe (ie->call_stmt); |
| dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc, |
| "discovered direct call non-invariant %s\n", |
| ie->caller->dump_name ()); |
| } |
| return NULL; |
| } |
| |
| |
| if (dump_enabled_p ()) |
| { |
| location_t loc = gimple_location_safe (ie->call_stmt); |
| dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc, |
| "discovered direct call to non-function in %s, " |
| "making it __builtin_unreachable\n", |
| ie->caller->dump_name ()); |
| } |
| |
| target = builtin_decl_implicit (BUILT_IN_UNREACHABLE); |
| callee = cgraph_node::get_create (target); |
| unreachable = true; |
| } |
| else |
| callee = cgraph_node::get (target); |
| } |
| else |
| callee = cgraph_node::get (target); |
| |
| /* Because may-edges are not explicitely represented and vtable may be external, |
| we may create the first reference to the object in the unit. */ |
| if (!callee || callee->global.inlined_to) |
| { |
| |
| /* We are better to ensure we can refer to it. |
| In the case of static functions we are out of luck, since we already |
| removed its body. In the case of public functions we may or may |
| not introduce the reference. */ |
| if (!canonicalize_constructor_val (target, NULL) |
| || !TREE_PUBLIC (target)) |
| { |
| if (dump_file) |
| fprintf (dump_file, "ipa-prop: Discovered call to a known target " |
| "(%s -> %s) but can not refer to it. Giving up.\n", |
| ie->caller->dump_name (), |
| ie->callee->dump_name ()); |
| return NULL; |
| } |
| callee = cgraph_node::get_create (target); |
| } |
| |
| /* If the edge is already speculated. */ |
| if (speculative && ie->speculative) |
| { |
| struct cgraph_edge *e2; |
| struct ipa_ref *ref; |
| ie->speculative_call_info (e2, ie, ref); |
| if (e2->callee->ultimate_alias_target () |
| != callee->ultimate_alias_target ()) |
| { |
| if (dump_file) |
| fprintf (dump_file, "ipa-prop: Discovered call to a speculative " |
| "target (%s -> %s) but the call is already " |
| "speculated to %s. Giving up.\n", |
| ie->caller->dump_name (), callee->dump_name (), |
| e2->callee->dump_name ()); |
| } |
| else |
| { |
| if (dump_file) |
| fprintf (dump_file, "ipa-prop: Discovered call to a speculative target " |
| "(%s -> %s) this agree with previous speculation.\n", |
| ie->caller->dump_name (), callee->dump_name ()); |
| } |
| return NULL; |
| } |
| |
| if (!dbg_cnt (devirt)) |
| return NULL; |
| |
| ipa_check_create_node_params (); |
| |
| /* We can not make edges to inline clones. It is bug that someone removed |
| the cgraph node too early. */ |
| gcc_assert (!callee->global.inlined_to); |
| |
| if (dump_file && !unreachable) |
| { |
| fprintf (dump_file, "ipa-prop: Discovered %s call to a %s target " |
| "(%s -> %s), for stmt ", |
| ie->indirect_info->polymorphic ? "a virtual" : "an indirect", |
| speculative ? "speculative" : "known", |
| ie->caller->dump_name (), |
| callee->dump_name ()); |
| if (ie->call_stmt) |
| print_gimple_stmt (dump_file, ie->call_stmt, 2, TDF_SLIM); |
| else |
| fprintf (dump_file, "with uid %i\n", ie->lto_stmt_uid); |
| } |
| if (dump_enabled_p ()) |
| { |
| location_t loc = gimple_location_safe (ie->call_stmt); |
| |
| dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc, |
| "converting indirect call in %s to direct call to %s\n", |
| ie->caller->name (), callee->name ()); |
| } |
| if (!speculative) |
| { |
| struct cgraph_edge *orig = ie; |
| ie = ie->make_direct (callee); |
| /* If we resolved speculative edge the cost is already up to date |
| for direct call (adjusted by inline_edge_duplication_hook). */ |
| if (ie == orig) |
| { |
| es = ipa_call_summaries->get (ie); |
| es->call_stmt_size -= (eni_size_weights.indirect_call_cost |
| - eni_size_weights.call_cost); |
| es->call_stmt_time -= (eni_time_weights.indirect_call_cost |
| - eni_time_weights.call_cost); |
| } |
| } |
| else |
| { |
| if (!callee->can_be_discarded_p ()) |
| { |
| cgraph_node *alias; |
| alias = dyn_cast<cgraph_node *> (callee->noninterposable_alias ()); |
| if (alias) |
| callee = alias; |
| } |
| /* make_speculative will update ie's cost to direct call cost. */ |
| ie = ie->make_speculative |
| (callee, ie->count.apply_scale (8, 10)); |
| } |
| |
| return ie; |
| } |
| |
| /* Attempt to locate an interprocedural constant at a given REQ_OFFSET in |
| CONSTRUCTOR and return it. Return NULL if the search fails for some |
| reason. */ |
| |
| static tree |
| find_constructor_constant_at_offset (tree constructor, HOST_WIDE_INT req_offset) |
| { |
| tree type = TREE_TYPE (constructor); |
| if (TREE_CODE (type) != ARRAY_TYPE |
| && TREE_CODE (type) != RECORD_TYPE) |
| return NULL; |
| |
| unsigned ix; |
| tree index, val; |
| FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (constructor), ix, index, val) |
| { |
| HOST_WIDE_INT elt_offset; |
| if (TREE_CODE (type) == ARRAY_TYPE) |
| { |
| offset_int off; |
| tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (type)); |
| gcc_assert (TREE_CODE (unit_size) == INTEGER_CST); |
| |
| if (index) |
| { |
| if (TREE_CODE (index) == RANGE_EXPR) |
| off = wi::to_offset (TREE_OPERAND (index, 0)); |
| else |
| off = wi::to_offset (index); |
| if (TYPE_DOMAIN (type) && TYPE_MIN_VALUE (TYPE_DOMAIN (type))) |
| { |
| tree low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); |
| gcc_assert (TREE_CODE (unit_size) == INTEGER_CST); |
| off = wi::sext (off - wi::to_offset (low_bound), |
| TYPE_PRECISION (TREE_TYPE (index))); |
| } |
| off *= wi::to_offset (unit_size); |
| /* ??? Handle more than just the first index of a |
| RANGE_EXPR. */ |
| } |
| else |
| off = wi::to_offset (unit_size) * ix; |
| |
| off = wi::lshift (off, LOG2_BITS_PER_UNIT); |
| if (!wi::fits_shwi_p (off) || wi::neg_p (off)) |
| continue; |
| elt_offset = off.to_shwi (); |
| } |
| else if (TREE_CODE (type) == RECORD_TYPE) |
| { |
| gcc_checking_assert (index && TREE_CODE (index) == FIELD_DECL); |
| if (DECL_BIT_FIELD (index)) |
| continue; |
| elt_offset = int_bit_position (index); |
| } |
| else |
| gcc_unreachable (); |
| |
| if (elt_offset > req_offset) |
| return NULL; |
| |
| if (TREE_CODE (val) == CONSTRUCTOR) |
| return find_constructor_constant_at_offset (val, |
| req_offset - elt_offset); |
| |
| if (elt_offset == req_offset |
| && is_gimple_reg_type (TREE_TYPE (val)) |
| && is_gimple_ip_invariant (val)) |
| return val; |
| } |
| return NULL; |
| } |
| |
| /* Check whether SCALAR could be used to look up an aggregate interprocedural |
| invariant from a static constructor and if so, return it. Otherwise return |
| NULL. */ |
| |
| static tree |
| ipa_find_agg_cst_from_init (tree scalar, HOST_WIDE_INT offset, bool by_ref) |
| { |
| if (by_ref) |
| { |
| if (TREE_CODE (scalar) != ADDR_EXPR) |
| return NULL; |
| scalar = TREE_OPERAND (scalar, 0); |
| } |
| |
| if (!VAR_P (scalar) |
| || !is_global_var (scalar) |
| || !TREE_READONLY (scalar) |
| || !DECL_INITIAL (scalar) |
| || TREE_CODE (DECL_INITIAL (scalar)) != CONSTRUCTOR) |
| return NULL; |
| |
| return find_constructor_constant_at_offset (DECL_INITIAL (scalar), offset); |
| } |
| |
| /* Retrieve value from aggregate jump function AGG or static initializer of |
| SCALAR (which can be NULL) for the given OFFSET or return NULL if there is |
| none. BY_REF specifies whether the value has to be passed by reference or |
| by value. If FROM_GLOBAL_CONSTANT is non-NULL, then the boolean it points |
| to is set to true if the value comes from an initializer of a constant. */ |
| |
| tree |
| ipa_find_agg_cst_for_param (struct ipa_agg_jump_function *agg, tree scalar, |
| HOST_WIDE_INT offset, bool by_ref, |
| bool *from_global_constant) |
| { |
| struct ipa_agg_jf_item *item; |
| int i; |
| |
| if (scalar) |
| { |
| tree res = ipa_find_agg_cst_from_init (scalar, offset, by_ref); |
| if (res) |
| { |
| if (from_global_constant) |
| *from_global_constant = true; |
| return res; |
| } |
| } |
| |
| if (!agg |
| || by_ref != agg->by_ref) |
| return NULL; |
| |
| FOR_EACH_VEC_SAFE_ELT (agg->items, i, item) |
| if (item->offset == offset) |
| { |
| /* Currently we do not have clobber values, return NULL for them once |
| we do. */ |
| gcc_checking_assert (is_gimple_ip_invariant (item->value)); |
| if (from_global_constant) |
| *from_global_constant = false; |
| return item->value; |
| } |
| return NULL; |
| } |
| |
| /* Remove a reference to SYMBOL from the list of references of a node given by |
| reference description RDESC. Return true if the reference has been |
| successfully found and removed. */ |
| |
| static bool |
| remove_described_reference (symtab_node *symbol, struct ipa_cst_ref_desc *rdesc) |
| { |
| struct ipa_ref *to_del; |
| struct cgraph_edge *origin; |
| |
| origin = rdesc->cs; |
| if (!origin) |
| return false; |
| to_del = origin->caller->find_reference (symbol, origin->call_stmt, |
| origin->lto_stmt_uid); |
| if (!to_del) |
| return false; |
| |
| to_del->remove_reference (); |
| if (dump_file) |
| fprintf (dump_file, "ipa-prop: Removed a reference from %s to %s.\n", |
| origin->caller->dump_name (), xstrdup_for_dump (symbol->name ())); |
| return true; |
| } |
| |
| /* If JFUNC has a reference description with refcount different from |
| IPA_UNDESCRIBED_USE, return the reference description, otherwise return |
| NULL. JFUNC must be a constant jump function. */ |
| |
| static struct ipa_cst_ref_desc * |
| jfunc_rdesc_usable (struct ipa_jump_func *jfunc) |
| { |
| struct ipa_cst_ref_desc *rdesc = ipa_get_jf_constant_rdesc (jfunc); |
| if (rdesc && rdesc->refcount != IPA_UNDESCRIBED_USE) |
| return rdesc; |
| else |
| return NULL; |
| } |
| |
| /* If the value of constant jump function JFUNC is an address of a function |
| declaration, return the associated call graph node. Otherwise return |
| NULL. */ |
| |
| static cgraph_node * |
| cgraph_node_for_jfunc (struct ipa_jump_func *jfunc) |
| { |
| gcc_checking_assert (jfunc->type == IPA_JF_CONST); |
| tree cst = ipa_get_jf_constant (jfunc); |
| if (TREE_CODE (cst) != ADDR_EXPR |
| || TREE_CODE (TREE_OPERAND (cst, 0)) != FUNCTION_DECL) |
| return NULL; |
| |
| return cgraph_node::get (TREE_OPERAND (cst, 0)); |
| } |
| |
| |
| /* If JFUNC is a constant jump function with a usable rdesc, decrement its |
| refcount and if it hits zero, remove reference to SYMBOL from the caller of |
| the edge specified in the rdesc. Return false if either the symbol or the |
| reference could not be found, otherwise return true. */ |
| |
| static bool |
|