| /* Inlining decision heuristics. |
| Copyright (C) 2003-2015 Free Software Foundation, Inc. |
| Contributed by Jan Hubicka |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* Analysis used by the inliner and other passes limiting code size growth. |
| |
| We estimate for each function |
| - function body size |
| - average function execution time |
| - inlining size benefit (that is how much of function body size |
| and its call sequence is expected to disappear by inlining) |
| - inlining time benefit |
| - function frame size |
| For each call |
| - call statement size and time |
| |
| inlinie_summary datastructures store above information locally (i.e. |
| parameters of the function itself) and globally (i.e. parameters of |
| the function created by applying all the inline decisions already |
| present in the callgraph). |
| |
| We provide accestor to the inline_summary datastructure and |
| basic logic updating the parameters when inlining is performed. |
| |
| The summaries are context sensitive. Context means |
| 1) partial assignment of known constant values of operands |
| 2) whether function is inlined into the call or not. |
| It is easy to add more variants. To represent function size and time |
| that depends on context (i.e. it is known to be optimized away when |
| context is known either by inlining or from IP-CP and clonning), |
| we use predicates. Predicates are logical formulas in |
| conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps |
| specifying what conditions must be true. Conditions are simple test |
| of the form described above. |
| |
| In order to make predicate (possibly) true, all of its clauses must |
| be (possibly) true. To make clause (possibly) true, one of conditions |
| it mentions must be (possibly) true. There are fixed bounds on |
| number of clauses and conditions and all the manipulation functions |
| are conservative in positive direction. I.e. we may lose precision |
| by thinking that predicate may be true even when it is not. |
| |
| estimate_edge_size and estimate_edge_growth can be used to query |
| function size/time in the given context. inline_merge_summary merges |
| properties of caller and callee after inlining. |
| |
| Finally pass_inline_parameters is exported. This is used to drive |
| computation of function parameters used by the early inliner. IPA |
| inlined performs analysis via its analyze_function method. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "hash-set.h" |
| #include "machmode.h" |
| #include "vec.h" |
| #include "double-int.h" |
| #include "input.h" |
| #include "alias.h" |
| #include "symtab.h" |
| #include "wide-int.h" |
| #include "inchash.h" |
| #include "real.h" |
| #include "tree.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "stringpool.h" |
| #include "print-tree.h" |
| #include "tree-inline.h" |
| #include "langhooks.h" |
| #include "flags.h" |
| #include "diagnostic.h" |
| #include "gimple-pretty-print.h" |
| #include "params.h" |
| #include "tree-pass.h" |
| #include "coverage.h" |
| #include "predict.h" |
| #include "hard-reg-set.h" |
| #include "input.h" |
| #include "function.h" |
| #include "dominance.h" |
| #include "cfg.h" |
| #include "cfganal.h" |
| #include "basic-block.h" |
| #include "tree-ssa-alias.h" |
| #include "internal-fn.h" |
| #include "gimple-expr.h" |
| #include "is-a.h" |
| #include "gimple.h" |
| #include "gimple-iterator.h" |
| #include "gimple-ssa.h" |
| #include "tree-cfg.h" |
| #include "tree-phinodes.h" |
| #include "ssa-iterators.h" |
| #include "tree-ssanames.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-ssa-loop.h" |
| #include "hash-map.h" |
| #include "plugin-api.h" |
| #include "ipa-ref.h" |
| #include "cgraph.h" |
| #include "alloc-pool.h" |
| #include "symbol-summary.h" |
| #include "ipa-prop.h" |
| #include "lto-streamer.h" |
| #include "data-streamer.h" |
| #include "tree-streamer.h" |
| #include "ipa-inline.h" |
| #include "cfgloop.h" |
| #include "tree-scalar-evolution.h" |
| #include "ipa-utils.h" |
| #include "cilk.h" |
| #include "cfgexpand.h" |
| |
| /* Estimate runtime of function can easilly run into huge numbers with many |
| nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an |
| integer. For anything larger we use gcov_type. */ |
| #define MAX_TIME 500000 |
| |
| /* Number of bits in integer, but we really want to be stable across different |
| hosts. */ |
| #define NUM_CONDITIONS 32 |
| |
| enum predicate_conditions |
| { |
| predicate_false_condition = 0, |
| predicate_not_inlined_condition = 1, |
| predicate_first_dynamic_condition = 2 |
| }; |
| |
| /* Special condition code we use to represent test that operand is compile time |
| constant. */ |
| #define IS_NOT_CONSTANT ERROR_MARK |
| /* Special condition code we use to represent test that operand is not changed |
| across invocation of the function. When operand IS_NOT_CONSTANT it is always |
| CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage |
| of executions even when they are not compile time constants. */ |
| #define CHANGED IDENTIFIER_NODE |
| |
| /* Holders of ipa cgraph hooks: */ |
| static struct cgraph_2edge_hook_list *edge_duplication_hook_holder; |
| static struct cgraph_edge_hook_list *edge_removal_hook_holder; |
| static void inline_edge_removal_hook (struct cgraph_edge *, void *); |
| static void inline_edge_duplication_hook (struct cgraph_edge *, |
| struct cgraph_edge *, void *); |
| |
| /* VECtor holding inline summaries. |
| In GGC memory because conditions might point to constant trees. */ |
| function_summary <inline_summary *> *inline_summaries; |
| vec<inline_edge_summary_t> inline_edge_summary_vec; |
| |
| /* Cached node/edge growths. */ |
| vec<edge_growth_cache_entry> edge_growth_cache; |
| |
| /* Edge predicates goes here. */ |
| static alloc_pool edge_predicate_pool; |
| |
| /* Return true predicate (tautology). |
| We represent it by empty list of clauses. */ |
| |
| static inline struct predicate |
| true_predicate (void) |
| { |
| struct predicate p; |
| p.clause[0] = 0; |
| return p; |
| } |
| |
| |
| /* Return predicate testing single condition number COND. */ |
| |
| static inline struct predicate |
| single_cond_predicate (int cond) |
| { |
| struct predicate p; |
| p.clause[0] = 1 << cond; |
| p.clause[1] = 0; |
| return p; |
| } |
| |
| |
| /* Return false predicate. First clause require false condition. */ |
| |
| static inline struct predicate |
| false_predicate (void) |
| { |
| return single_cond_predicate (predicate_false_condition); |
| } |
| |
| |
| /* Return true if P is (true). */ |
| |
| static inline bool |
| true_predicate_p (struct predicate *p) |
| { |
| return !p->clause[0]; |
| } |
| |
| |
| /* Return true if P is (false). */ |
| |
| static inline bool |
| false_predicate_p (struct predicate *p) |
| { |
| if (p->clause[0] == (1 << predicate_false_condition)) |
| { |
| gcc_checking_assert (!p->clause[1] |
| && p->clause[0] == 1 << predicate_false_condition); |
| return true; |
| } |
| return false; |
| } |
| |
| |
| /* Return predicate that is set true when function is not inlined. */ |
| |
| static inline struct predicate |
| not_inlined_predicate (void) |
| { |
| return single_cond_predicate (predicate_not_inlined_condition); |
| } |
| |
| /* Simple description of whether a memory load or a condition refers to a load |
| from an aggregate and if so, how and where from in the aggregate. |
| Individual fields have the same meaning like fields with the same name in |
| struct condition. */ |
| |
| struct agg_position_info |
| { |
| HOST_WIDE_INT offset; |
| bool agg_contents; |
| bool by_ref; |
| }; |
| |
| /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL |
| correspond to fields of condition structure. AGGPOS describes whether the |
| used operand is loaded from an aggregate and where in the aggregate it is. |
| It can be NULL, which means this not a load from an aggregate. */ |
| |
| static struct predicate |
| add_condition (struct inline_summary *summary, int operand_num, |
| HOST_WIDE_INT size, struct agg_position_info *aggpos, |
| enum tree_code code, tree val) |
| { |
| int i; |
| struct condition *c; |
| struct condition new_cond; |
| HOST_WIDE_INT offset; |
| bool agg_contents, by_ref; |
| |
| if (aggpos) |
| { |
| offset = aggpos->offset; |
| agg_contents = aggpos->agg_contents; |
| by_ref = aggpos->by_ref; |
| } |
| else |
| { |
| offset = 0; |
| agg_contents = false; |
| by_ref = false; |
| } |
| |
| gcc_checking_assert (operand_num >= 0); |
| for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++) |
| { |
| if (c->operand_num == operand_num |
| && c->size == size |
| && c->code == code |
| && c->val == val |
| && c->agg_contents == agg_contents |
| && (!agg_contents || (c->offset == offset && c->by_ref == by_ref))) |
| return single_cond_predicate (i + predicate_first_dynamic_condition); |
| } |
| /* Too many conditions. Give up and return constant true. */ |
| if (i == NUM_CONDITIONS - predicate_first_dynamic_condition) |
| return true_predicate (); |
| |
| new_cond.operand_num = operand_num; |
| new_cond.code = code; |
| new_cond.val = val; |
| new_cond.agg_contents = agg_contents; |
| new_cond.by_ref = by_ref; |
| new_cond.offset = offset; |
| new_cond.size = size; |
| vec_safe_push (summary->conds, new_cond); |
| return single_cond_predicate (i + predicate_first_dynamic_condition); |
| } |
| |
| |
| /* Add clause CLAUSE into the predicate P. */ |
| |
| static inline void |
| add_clause (conditions conditions, struct predicate *p, clause_t clause) |
| { |
| int i; |
| int i2; |
| int insert_here = -1; |
| int c1, c2; |
| |
| /* True clause. */ |
| if (!clause) |
| return; |
| |
| /* False clause makes the whole predicate false. Kill the other variants. */ |
| if (clause == (1 << predicate_false_condition)) |
| { |
| p->clause[0] = (1 << predicate_false_condition); |
| p->clause[1] = 0; |
| return; |
| } |
| if (false_predicate_p (p)) |
| return; |
| |
| /* No one should be silly enough to add false into nontrivial clauses. */ |
| gcc_checking_assert (!(clause & (1 << predicate_false_condition))); |
| |
| /* Look where to insert the clause. At the same time prune out |
| clauses of P that are implied by the new clause and thus |
| redundant. */ |
| for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++) |
| { |
| p->clause[i2] = p->clause[i]; |
| |
| if (!p->clause[i]) |
| break; |
| |
| /* If p->clause[i] implies clause, there is nothing to add. */ |
| if ((p->clause[i] & clause) == p->clause[i]) |
| { |
| /* We had nothing to add, none of clauses should've become |
| redundant. */ |
| gcc_checking_assert (i == i2); |
| return; |
| } |
| |
| if (p->clause[i] < clause && insert_here < 0) |
| insert_here = i2; |
| |
| /* If clause implies p->clause[i], then p->clause[i] becomes redundant. |
| Otherwise the p->clause[i] has to stay. */ |
| if ((p->clause[i] & clause) != clause) |
| i2++; |
| } |
| |
| /* Look for clauses that are obviously true. I.e. |
| op0 == 5 || op0 != 5. */ |
| for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++) |
| { |
| condition *cc1; |
| if (!(clause & (1 << c1))) |
| continue; |
| cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition]; |
| /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT |
| and thus there is no point for looking for them. */ |
| if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT) |
| continue; |
| for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++) |
| if (clause & (1 << c2)) |
| { |
| condition *cc1 = |
| &(*conditions)[c1 - predicate_first_dynamic_condition]; |
| condition *cc2 = |
| &(*conditions)[c2 - predicate_first_dynamic_condition]; |
| if (cc1->operand_num == cc2->operand_num |
| && cc1->val == cc2->val |
| && cc2->code != IS_NOT_CONSTANT |
| && cc2->code != CHANGED |
| && cc1->code == invert_tree_comparison (cc2->code, |
| HONOR_NANS (cc1->val))) |
| return; |
| } |
| } |
| |
| |
| /* We run out of variants. Be conservative in positive direction. */ |
| if (i2 == MAX_CLAUSES) |
| return; |
| /* Keep clauses in decreasing order. This makes equivalence testing easy. */ |
| p->clause[i2 + 1] = 0; |
| if (insert_here >= 0) |
| for (; i2 > insert_here; i2--) |
| p->clause[i2] = p->clause[i2 - 1]; |
| else |
| insert_here = i2; |
| p->clause[insert_here] = clause; |
| } |
| |
| |
| /* Return P & P2. */ |
| |
| static struct predicate |
| and_predicates (conditions conditions, |
| struct predicate *p, struct predicate *p2) |
| { |
| struct predicate out = *p; |
| int i; |
| |
| /* Avoid busy work. */ |
| if (false_predicate_p (p2) || true_predicate_p (p)) |
| return *p2; |
| if (false_predicate_p (p) || true_predicate_p (p2)) |
| return *p; |
| |
| /* See how far predicates match. */ |
| for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES); |
| } |
| |
| /* Combine the predicates rest. */ |
| for (; p2->clause[i]; i++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES); |
| add_clause (conditions, &out, p2->clause[i]); |
| } |
| return out; |
| } |
| |
| |
| /* Return true if predicates are obviously equal. */ |
| |
| static inline bool |
| predicates_equal_p (struct predicate *p, struct predicate *p2) |
| { |
| int i; |
| for (i = 0; p->clause[i]; i++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES); |
| gcc_checking_assert (p->clause[i] > p->clause[i + 1]); |
| gcc_checking_assert (!p2->clause[i] |
| || p2->clause[i] > p2->clause[i + 1]); |
| if (p->clause[i] != p2->clause[i]) |
| return false; |
| } |
| return !p2->clause[i]; |
| } |
| |
| |
| /* Return P | P2. */ |
| |
| static struct predicate |
| or_predicates (conditions conditions, |
| struct predicate *p, struct predicate *p2) |
| { |
| struct predicate out = true_predicate (); |
| int i, j; |
| |
| /* Avoid busy work. */ |
| if (false_predicate_p (p2) || true_predicate_p (p)) |
| return *p; |
| if (false_predicate_p (p) || true_predicate_p (p2)) |
| return *p2; |
| if (predicates_equal_p (p, p2)) |
| return *p; |
| |
| /* OK, combine the predicates. */ |
| for (i = 0; p->clause[i]; i++) |
| for (j = 0; p2->clause[j]; j++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES); |
| add_clause (conditions, &out, p->clause[i] | p2->clause[j]); |
| } |
| return out; |
| } |
| |
| |
| /* Having partial truth assignment in POSSIBLE_TRUTHS, return false |
| if predicate P is known to be false. */ |
| |
| static bool |
| evaluate_predicate (struct predicate *p, clause_t possible_truths) |
| { |
| int i; |
| |
| /* True remains true. */ |
| if (true_predicate_p (p)) |
| return true; |
| |
| gcc_assert (!(possible_truths & (1 << predicate_false_condition))); |
| |
| /* See if we can find clause we can disprove. */ |
| for (i = 0; p->clause[i]; i++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES); |
| if (!(p->clause[i] & possible_truths)) |
| return false; |
| } |
| return true; |
| } |
| |
| /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated |
| instruction will be recomputed per invocation of the inlined call. */ |
| |
| static int |
| predicate_probability (conditions conds, |
| struct predicate *p, clause_t possible_truths, |
| vec<inline_param_summary> inline_param_summary) |
| { |
| int i; |
| int combined_prob = REG_BR_PROB_BASE; |
| |
| /* True remains true. */ |
| if (true_predicate_p (p)) |
| return REG_BR_PROB_BASE; |
| |
| if (false_predicate_p (p)) |
| return 0; |
| |
| gcc_assert (!(possible_truths & (1 << predicate_false_condition))); |
| |
| /* See if we can find clause we can disprove. */ |
| for (i = 0; p->clause[i]; i++) |
| { |
| gcc_checking_assert (i < MAX_CLAUSES); |
| if (!(p->clause[i] & possible_truths)) |
| return 0; |
| else |
| { |
| int this_prob = 0; |
| int i2; |
| if (!inline_param_summary.exists ()) |
| return REG_BR_PROB_BASE; |
| for (i2 = 0; i2 < NUM_CONDITIONS; i2++) |
| if ((p->clause[i] & possible_truths) & (1 << i2)) |
| { |
| if (i2 >= predicate_first_dynamic_condition) |
| { |
| condition *c = |
| &(*conds)[i2 - predicate_first_dynamic_condition]; |
| if (c->code == CHANGED |
| && (c->operand_num < |
| (int) inline_param_summary.length ())) |
| { |
| int iprob = |
| inline_param_summary[c->operand_num].change_prob; |
| this_prob = MAX (this_prob, iprob); |
| } |
| else |
| this_prob = REG_BR_PROB_BASE; |
| } |
| else |
| this_prob = REG_BR_PROB_BASE; |
| } |
| combined_prob = MIN (this_prob, combined_prob); |
| if (!combined_prob) |
| return 0; |
| } |
| } |
| return combined_prob; |
| } |
| |
| |
| /* Dump conditional COND. */ |
| |
| static void |
| dump_condition (FILE *f, conditions conditions, int cond) |
| { |
| condition *c; |
| if (cond == predicate_false_condition) |
| fprintf (f, "false"); |
| else if (cond == predicate_not_inlined_condition) |
| fprintf (f, "not inlined"); |
| else |
| { |
| c = &(*conditions)[cond - predicate_first_dynamic_condition]; |
| fprintf (f, "op%i", c->operand_num); |
| if (c->agg_contents) |
| fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]", |
| c->by_ref ? "ref " : "", c->offset); |
| if (c->code == IS_NOT_CONSTANT) |
| { |
| fprintf (f, " not constant"); |
| return; |
| } |
| if (c->code == CHANGED) |
| { |
| fprintf (f, " changed"); |
| return; |
| } |
| fprintf (f, " %s ", op_symbol_code (c->code)); |
| print_generic_expr (f, c->val, 1); |
| } |
| } |
| |
| |
| /* Dump clause CLAUSE. */ |
| |
| static void |
| dump_clause (FILE *f, conditions conds, clause_t clause) |
| { |
| int i; |
| bool found = false; |
| fprintf (f, "("); |
| if (!clause) |
| fprintf (f, "true"); |
| for (i = 0; i < NUM_CONDITIONS; i++) |
| if (clause & (1 << i)) |
| { |
| if (found) |
| fprintf (f, " || "); |
| found = true; |
| dump_condition (f, conds, i); |
| } |
| fprintf (f, ")"); |
| } |
| |
| |
| /* Dump predicate PREDICATE. */ |
| |
| static void |
| dump_predicate (FILE *f, conditions conds, struct predicate *pred) |
| { |
| int i; |
| if (true_predicate_p (pred)) |
| dump_clause (f, conds, 0); |
| else |
| for (i = 0; pred->clause[i]; i++) |
| { |
| if (i) |
| fprintf (f, " && "); |
| dump_clause (f, conds, pred->clause[i]); |
| } |
| fprintf (f, "\n"); |
| } |
| |
| |
| /* Dump inline hints. */ |
| void |
| dump_inline_hints (FILE *f, inline_hints hints) |
| { |
| if (!hints) |
| return; |
| fprintf (f, "inline hints:"); |
| if (hints & INLINE_HINT_indirect_call) |
| { |
| hints &= ~INLINE_HINT_indirect_call; |
| fprintf (f, " indirect_call"); |
| } |
| if (hints & INLINE_HINT_loop_iterations) |
| { |
| hints &= ~INLINE_HINT_loop_iterations; |
| fprintf (f, " loop_iterations"); |
| } |
| if (hints & INLINE_HINT_loop_stride) |
| { |
| hints &= ~INLINE_HINT_loop_stride; |
| fprintf (f, " loop_stride"); |
| } |
| if (hints & INLINE_HINT_same_scc) |
| { |
| hints &= ~INLINE_HINT_same_scc; |
| fprintf (f, " same_scc"); |
| } |
| if (hints & INLINE_HINT_in_scc) |
| { |
| hints &= ~INLINE_HINT_in_scc; |
| fprintf (f, " in_scc"); |
| } |
| if (hints & INLINE_HINT_cross_module) |
| { |
| hints &= ~INLINE_HINT_cross_module; |
| fprintf (f, " cross_module"); |
| } |
| if (hints & INLINE_HINT_declared_inline) |
| { |
| hints &= ~INLINE_HINT_declared_inline; |
| fprintf (f, " declared_inline"); |
| } |
| if (hints & INLINE_HINT_array_index) |
| { |
| hints &= ~INLINE_HINT_array_index; |
| fprintf (f, " array_index"); |
| } |
| if (hints & INLINE_HINT_known_hot) |
| { |
| hints &= ~INLINE_HINT_known_hot; |
| fprintf (f, " known_hot"); |
| } |
| gcc_assert (!hints); |
| } |
| |
| |
| /* Record SIZE and TIME under condition PRED into the inline summary. */ |
| |
| static void |
| account_size_time (struct inline_summary *summary, int size, int time, |
| struct predicate *pred) |
| { |
| size_time_entry *e; |
| bool found = false; |
| int i; |
| |
| if (false_predicate_p (pred)) |
| return; |
| |
| /* We need to create initial empty unconitional clause, but otherwie |
| we don't need to account empty times and sizes. */ |
| if (!size && !time && summary->entry) |
| return; |
| |
| /* Watch overflow that might result from insane profiles. */ |
| if (time > MAX_TIME * INLINE_TIME_SCALE) |
| time = MAX_TIME * INLINE_TIME_SCALE; |
| gcc_assert (time >= 0); |
| |
| for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++) |
| if (predicates_equal_p (&e->predicate, pred)) |
| { |
| found = true; |
| break; |
| } |
| if (i == 256) |
| { |
| i = 0; |
| found = true; |
| e = &(*summary->entry)[0]; |
| gcc_assert (!e->predicate.clause[0]); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, |
| "\t\tReached limit on number of entries, " |
| "ignoring the predicate."); |
| } |
| if (dump_file && (dump_flags & TDF_DETAILS) && (time || size)) |
| { |
| fprintf (dump_file, |
| "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:", |
| ((double) size) / INLINE_SIZE_SCALE, |
| ((double) time) / INLINE_TIME_SCALE, found ? "" : "new "); |
| dump_predicate (dump_file, summary->conds, pred); |
| } |
| if (!found) |
| { |
| struct size_time_entry new_entry; |
| new_entry.size = size; |
| new_entry.time = time; |
| new_entry.predicate = *pred; |
| vec_safe_push (summary->entry, new_entry); |
| } |
| else |
| { |
| e->size += size; |
| e->time += time; |
| if (e->time > MAX_TIME * INLINE_TIME_SCALE) |
| e->time = MAX_TIME * INLINE_TIME_SCALE; |
| } |
| } |
| |
| /* We proved E to be unreachable, redirect it to __bultin_unreachable. */ |
| |
| static struct cgraph_edge * |
| redirect_to_unreachable (struct cgraph_edge *e) |
| { |
| struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL; |
| struct cgraph_node *target = cgraph_node::get_create |
| (builtin_decl_implicit (BUILT_IN_UNREACHABLE)); |
| |
| if (e->speculative) |
| e = e->resolve_speculation (target->decl); |
| else if (!e->callee) |
| e->make_direct (target); |
| else |
| e->redirect_callee (target); |
| struct inline_edge_summary *es = inline_edge_summary (e); |
| e->inline_failed = CIF_UNREACHABLE; |
| e->frequency = 0; |
| e->count = 0; |
| es->call_stmt_size = 0; |
| es->call_stmt_time = 0; |
| if (callee) |
| callee->remove_symbol_and_inline_clones (); |
| return e; |
| } |
| |
| /* Set predicate for edge E. */ |
| |
| static void |
| edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate) |
| { |
| /* If the edge is determined to be never executed, redirect it |
| to BUILTIN_UNREACHABLE to save inliner from inlining into it. */ |
| if (predicate && false_predicate_p (predicate) |
| /* When handling speculative edges, we need to do the redirection |
| just once. Do it always on the direct edge, so we do not |
| attempt to resolve speculation while duplicating the edge. */ |
| && (!e->speculative || e->callee)) |
| e = redirect_to_unreachable (e); |
| |
| struct inline_edge_summary *es = inline_edge_summary (e); |
| if (predicate && !true_predicate_p (predicate)) |
| { |
| if (!es->predicate) |
| es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool); |
| *es->predicate = *predicate; |
| } |
| else |
| { |
| if (es->predicate) |
| pool_free (edge_predicate_pool, es->predicate); |
| es->predicate = NULL; |
| } |
| } |
| |
| /* Set predicate for hint *P. */ |
| |
| static void |
| set_hint_predicate (struct predicate **p, struct predicate new_predicate) |
| { |
| if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate)) |
| { |
| if (*p) |
| pool_free (edge_predicate_pool, *p); |
| *p = NULL; |
| } |
| else |
| { |
| if (!*p) |
| *p = (struct predicate *) pool_alloc (edge_predicate_pool); |
| **p = new_predicate; |
| } |
| } |
| |
| |
| /* KNOWN_VALS is partial mapping of parameters of NODE to constant values. |
| KNOWN_AGGS is a vector of aggreggate jump functions for each parameter. |
| Return clause of possible truths. When INLINE_P is true, assume that we are |
| inlining. |
| |
| ERROR_MARK means compile time invariant. */ |
| |
| static clause_t |
| evaluate_conditions_for_known_args (struct cgraph_node *node, |
| bool inline_p, |
| vec<tree> known_vals, |
| vec<ipa_agg_jump_function_p> |
| known_aggs) |
| { |
| clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition; |
| struct inline_summary *info = inline_summaries->get (node); |
| int i; |
| struct condition *c; |
| |
| for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) |
| { |
| tree val; |
| tree res; |
| |
| /* We allow call stmt to have fewer arguments than the callee function |
| (especially for K&R style programs). So bound check here (we assume |
| known_aggs vector, if non-NULL, has the same length as |
| known_vals). */ |
| gcc_checking_assert (!known_aggs.exists () |
| || (known_vals.length () == known_aggs.length ())); |
| if (c->operand_num >= (int) known_vals.length ()) |
| { |
| clause |= 1 << (i + predicate_first_dynamic_condition); |
| continue; |
| } |
| |
| if (c->agg_contents) |
| { |
| struct ipa_agg_jump_function *agg; |
| |
| if (c->code == CHANGED |
| && !c->by_ref |
| && (known_vals[c->operand_num] == error_mark_node)) |
| continue; |
| |
| if (known_aggs.exists ()) |
| { |
| agg = known_aggs[c->operand_num]; |
| val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref); |
| } |
| else |
| val = NULL_TREE; |
| } |
| else |
| { |
| val = known_vals[c->operand_num]; |
| if (val == error_mark_node && c->code != CHANGED) |
| val = NULL_TREE; |
| } |
| |
| if (!val) |
| { |
| clause |= 1 << (i + predicate_first_dynamic_condition); |
| continue; |
| } |
| if (c->code == CHANGED) |
| continue; |
| |
| if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size) |
| { |
| clause |= 1 << (i + predicate_first_dynamic_condition); |
| continue; |
| } |
| if (c->code == IS_NOT_CONSTANT) |
| continue; |
| |
| val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val); |
| res = val |
| ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val) |
| : NULL; |
| |
| if (res && integer_zerop (res)) |
| continue; |
| |
| clause |= 1 << (i + predicate_first_dynamic_condition); |
| } |
| return clause; |
| } |
| |
| |
| /* Work out what conditions might be true at invocation of E. */ |
| |
| static void |
| evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p, |
| clause_t *clause_ptr, |
| vec<tree> *known_vals_ptr, |
| vec<ipa_polymorphic_call_context> |
| *known_contexts_ptr, |
| vec<ipa_agg_jump_function_p> *known_aggs_ptr) |
| { |
| struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
| struct inline_summary *info = inline_summaries->get (callee); |
| vec<tree> known_vals = vNULL; |
| vec<ipa_agg_jump_function_p> known_aggs = vNULL; |
| |
| if (clause_ptr) |
| *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition; |
| if (known_vals_ptr) |
| known_vals_ptr->create (0); |
| if (known_contexts_ptr) |
| known_contexts_ptr->create (0); |
| |
| if (ipa_node_params_sum |
| && !e->call_stmt_cannot_inline_p |
| && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr)) |
| { |
| struct ipa_node_params *parms_info; |
| struct ipa_edge_args *args = IPA_EDGE_REF (e); |
| struct inline_edge_summary *es = inline_edge_summary (e); |
| int i, count = ipa_get_cs_argument_count (args); |
| |
| if (e->caller->global.inlined_to) |
| parms_info = IPA_NODE_REF (e->caller->global.inlined_to); |
| else |
| parms_info = IPA_NODE_REF (e->caller); |
| |
| if (count && (info->conds || known_vals_ptr)) |
| known_vals.safe_grow_cleared (count); |
| if (count && (info->conds || known_aggs_ptr)) |
| known_aggs.safe_grow_cleared (count); |
| if (count && known_contexts_ptr) |
| known_contexts_ptr->safe_grow_cleared (count); |
| |
| for (i = 0; i < count; i++) |
| { |
| struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i); |
| tree cst = ipa_value_from_jfunc (parms_info, jf); |
| |
| if (!cst && e->call_stmt |
| && i < (int)gimple_call_num_args (e->call_stmt)) |
| { |
| cst = gimple_call_arg (e->call_stmt, i); |
| if (!is_gimple_min_invariant (cst)) |
| cst = NULL; |
| } |
| if (cst) |
| { |
| gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO); |
| if (known_vals.exists ()) |
| known_vals[i] = cst; |
| } |
| else if (inline_p && !es->param[i].change_prob) |
| known_vals[i] = error_mark_node; |
| |
| if (known_contexts_ptr) |
| (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e, |
| i, jf); |
| /* TODO: When IPA-CP starts propagating and merging aggregate jump |
| functions, use its knowledge of the caller too, just like the |
| scalar case above. */ |
| known_aggs[i] = &jf->agg; |
| } |
| } |
| else if (e->call_stmt && !e->call_stmt_cannot_inline_p |
| && ((clause_ptr && info->conds) || known_vals_ptr)) |
| { |
| int i, count = (int)gimple_call_num_args (e->call_stmt); |
| |
| if (count && (info->conds || known_vals_ptr)) |
| known_vals.safe_grow_cleared (count); |
| for (i = 0; i < count; i++) |
| { |
| tree cst = gimple_call_arg (e->call_stmt, i); |
| if (!is_gimple_min_invariant (cst)) |
| cst = NULL; |
| if (cst) |
| known_vals[i] = cst; |
| } |
| } |
| |
| if (clause_ptr) |
| *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p, |
| known_vals, known_aggs); |
| |
| if (known_vals_ptr) |
| *known_vals_ptr = known_vals; |
| else |
| known_vals.release (); |
| |
| if (known_aggs_ptr) |
| *known_aggs_ptr = known_aggs; |
| else |
| known_aggs.release (); |
| } |
| |
| |
| /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */ |
| |
| static void |
| inline_summary_alloc (void) |
| { |
| if (!edge_removal_hook_holder) |
| edge_removal_hook_holder = |
| symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL); |
| if (!edge_duplication_hook_holder) |
| edge_duplication_hook_holder = |
| symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL); |
| |
| if (!inline_summaries) |
| inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab); |
| |
| if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid) |
| inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1); |
| if (!edge_predicate_pool) |
| edge_predicate_pool = create_alloc_pool ("edge predicates", |
| sizeof (struct predicate), 10); |
| } |
| |
| /* We are called multiple time for given function; clear |
| data from previous run so they are not cumulated. */ |
| |
| static void |
| reset_inline_edge_summary (struct cgraph_edge *e) |
| { |
| if (e->uid < (int) inline_edge_summary_vec.length ()) |
| { |
| struct inline_edge_summary *es = inline_edge_summary (e); |
| |
| es->call_stmt_size = es->call_stmt_time = 0; |
| if (es->predicate) |
| pool_free (edge_predicate_pool, es->predicate); |
| es->predicate = NULL; |
| es->param.release (); |
| } |
| } |
| |
| /* We are called multiple time for given function; clear |
| data from previous run so they are not cumulated. */ |
| |
| static void |
| reset_inline_summary (struct cgraph_node *node, |
| inline_summary *info) |
| { |
| struct cgraph_edge *e; |
| |
| info->self_size = info->self_time = 0; |
| info->estimated_stack_size = 0; |
| info->estimated_self_stack_size = 0; |
| info->stack_frame_offset = 0; |
| info->size = 0; |
| info->time = 0; |
| info->growth = 0; |
| info->scc_no = 0; |
| if (info->loop_iterations) |
| { |
| pool_free (edge_predicate_pool, info->loop_iterations); |
| info->loop_iterations = NULL; |
| } |
| if (info->loop_stride) |
| { |
| pool_free (edge_predicate_pool, info->loop_stride); |
| info->loop_stride = NULL; |
| } |
| if (info->array_index) |
| { |
| pool_free (edge_predicate_pool, info->array_index); |
| info->array_index = NULL; |
| } |
| vec_free (info->conds); |
| vec_free (info->entry); |
| for (e = node->callees; e; e = e->next_callee) |
| reset_inline_edge_summary (e); |
| for (e = node->indirect_calls; e; e = e->next_callee) |
| reset_inline_edge_summary (e); |
| } |
| |
| /* Hook that is called by cgraph.c when a node is removed. */ |
| |
| void |
| inline_summary_t::remove (cgraph_node *node, inline_summary *info) |
| { |
| reset_inline_summary (node, info); |
| } |
| |
| /* Remap predicate P of former function to be predicate of duplicated function. |
| POSSIBLE_TRUTHS is clause of possible truths in the duplicated node, |
| INFO is inline summary of the duplicated node. */ |
| |
| static struct predicate |
| remap_predicate_after_duplication (struct predicate *p, |
| clause_t possible_truths, |
| struct inline_summary *info) |
| { |
| struct predicate new_predicate = true_predicate (); |
| int j; |
| for (j = 0; p->clause[j]; j++) |
| if (!(possible_truths & p->clause[j])) |
| { |
| new_predicate = false_predicate (); |
| break; |
| } |
| else |
| add_clause (info->conds, &new_predicate, |
| possible_truths & p->clause[j]); |
| return new_predicate; |
| } |
| |
| /* Same as remap_predicate_after_duplication but handle hint predicate *P. |
| Additionally care about allocating new memory slot for updated predicate |
| and set it to NULL when it becomes true or false (and thus uninteresting). |
| */ |
| |
| static void |
| remap_hint_predicate_after_duplication (struct predicate **p, |
| clause_t possible_truths, |
| struct inline_summary *info) |
| { |
| struct predicate new_predicate; |
| |
| if (!*p) |
| return; |
| |
| new_predicate = remap_predicate_after_duplication (*p, |
| possible_truths, info); |
| /* We do not want to free previous predicate; it is used by node origin. */ |
| *p = NULL; |
| set_hint_predicate (p, new_predicate); |
| } |
| |
| |
| /* Hook that is called by cgraph.c when a node is duplicated. */ |
| void |
| inline_summary_t::duplicate (cgraph_node *src, |
| cgraph_node *dst, |
| inline_summary *, |
| inline_summary *info) |
| { |
| inline_summary_alloc (); |
| memcpy (info, inline_summaries->get (src), sizeof (inline_summary)); |
| /* TODO: as an optimization, we may avoid copying conditions |
| that are known to be false or true. */ |
| info->conds = vec_safe_copy (info->conds); |
| |
| /* When there are any replacements in the function body, see if we can figure |
| out that something was optimized out. */ |
| if (ipa_node_params_sum && dst->clone.tree_map) |
| { |
| vec<size_time_entry, va_gc> *entry = info->entry; |
| /* Use SRC parm info since it may not be copied yet. */ |
| struct ipa_node_params *parms_info = IPA_NODE_REF (src); |
| vec<tree> known_vals = vNULL; |
| int count = ipa_get_param_count (parms_info); |
| int i, j; |
| clause_t possible_truths; |
| struct predicate true_pred = true_predicate (); |
| size_time_entry *e; |
| int optimized_out_size = 0; |
| bool inlined_to_p = false; |
| struct cgraph_edge *edge, *next; |
| |
| info->entry = 0; |
| known_vals.safe_grow_cleared (count); |
| for (i = 0; i < count; i++) |
| { |
| struct ipa_replace_map *r; |
| |
| for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++) |
| { |
| if (((!r->old_tree && r->parm_num == i) |
| || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i))) |
| && r->replace_p && !r->ref_p) |
| { |
| known_vals[i] = r->new_tree; |
| break; |
| } |
| } |
| } |
| possible_truths = evaluate_conditions_for_known_args (dst, false, |
| known_vals, |
| vNULL); |
| known_vals.release (); |
| |
| account_size_time (info, 0, 0, &true_pred); |
| |
| /* Remap size_time vectors. |
| Simplify the predicate by prunning out alternatives that are known |
| to be false. |
| TODO: as on optimization, we can also eliminate conditions known |
| to be true. */ |
| for (i = 0; vec_safe_iterate (entry, i, &e); i++) |
| { |
| struct predicate new_predicate; |
| new_predicate = remap_predicate_after_duplication (&e->predicate, |
| possible_truths, |
| info); |
| if (false_predicate_p (&new_predicate)) |
| optimized_out_size += e->size; |
| else |
| account_size_time (info, e->size, e->time, &new_predicate); |
| } |
| |
| /* Remap edge predicates with the same simplification as above. |
| Also copy constantness arrays. */ |
| for (edge = dst->callees; edge; edge = next) |
| { |
| struct predicate new_predicate; |
| struct inline_edge_summary *es = inline_edge_summary (edge); |
| next = edge->next_callee; |
| |
| if (!edge->inline_failed) |
| inlined_to_p = true; |
| if (!es->predicate) |
| continue; |
| new_predicate = remap_predicate_after_duplication (es->predicate, |
| possible_truths, |
| info); |
| if (false_predicate_p (&new_predicate) |
| && !false_predicate_p (es->predicate)) |
| optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE; |
| edge_set_predicate (edge, &new_predicate); |
| } |
| |
| /* Remap indirect edge predicates with the same simplificaiton as above. |
| Also copy constantness arrays. */ |
| for (edge = dst->indirect_calls; edge; edge = next) |
| { |
| struct predicate new_predicate; |
| struct inline_edge_summary *es = inline_edge_summary (edge); |
| next = edge->next_callee; |
| |
| gcc_checking_assert (edge->inline_failed); |
| if (!es->predicate) |
| continue; |
| new_predicate = remap_predicate_after_duplication (es->predicate, |
| possible_truths, |
| info); |
| if (false_predicate_p (&new_predicate) |
| && !false_predicate_p (es->predicate)) |
| optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE; |
| edge_set_predicate (edge, &new_predicate); |
| } |
| remap_hint_predicate_after_duplication (&info->loop_iterations, |
| possible_truths, info); |
| remap_hint_predicate_after_duplication (&info->loop_stride, |
| possible_truths, info); |
| remap_hint_predicate_after_duplication (&info->array_index, |
| possible_truths, info); |
| |
| /* If inliner or someone after inliner will ever start producing |
| non-trivial clones, we will get trouble with lack of information |
| about updating self sizes, because size vectors already contains |
| sizes of the calees. */ |
| gcc_assert (!inlined_to_p || !optimized_out_size); |
| } |
| else |
| { |
| info->entry = vec_safe_copy (info->entry); |
| if (info->loop_iterations) |
| { |
| predicate p = *info->loop_iterations; |
| info->loop_iterations = NULL; |
| set_hint_predicate (&info->loop_iterations, p); |
| } |
| if (info->loop_stride) |
| { |
| predicate p = *info->loop_stride; |
| info->loop_stride = NULL; |
| set_hint_predicate (&info->loop_stride, p); |
| } |
| if (info->array_index) |
| { |
| predicate p = *info->array_index; |
| info->array_index = NULL; |
| set_hint_predicate (&info->array_index, p); |
| } |
| } |
| if (!dst->global.inlined_to) |
| inline_update_overall_summary (dst); |
| } |
| |
| |
| /* Hook that is called by cgraph.c when a node is duplicated. */ |
| |
| static void |
| inline_edge_duplication_hook (struct cgraph_edge *src, |
| struct cgraph_edge *dst, |
| ATTRIBUTE_UNUSED void *data) |
| { |
| struct inline_edge_summary *info; |
| struct inline_edge_summary *srcinfo; |
| inline_summary_alloc (); |
| info = inline_edge_summary (dst); |
| srcinfo = inline_edge_summary (src); |
| memcpy (info, srcinfo, sizeof (struct inline_edge_summary)); |
| info->predicate = NULL; |
| edge_set_predicate (dst, srcinfo->predicate); |
| info->param = srcinfo->param.copy (); |
| if (!dst->indirect_unknown_callee && src->indirect_unknown_callee) |
| { |
| info->call_stmt_size -= (eni_size_weights.indirect_call_cost |
| - eni_size_weights.call_cost); |
| info->call_stmt_time -= (eni_time_weights.indirect_call_cost |
| - eni_time_weights.call_cost); |
| } |
| } |
| |
| |
| /* Keep edge cache consistent across edge removal. */ |
| |
| static void |
| inline_edge_removal_hook (struct cgraph_edge *edge, |
| void *data ATTRIBUTE_UNUSED) |
| { |
| if (edge_growth_cache.exists ()) |
| reset_edge_growth_cache (edge); |
| reset_inline_edge_summary (edge); |
| } |
| |
| |
| /* Initialize growth caches. */ |
| |
| void |
| initialize_growth_caches (void) |
| { |
| if (symtab->edges_max_uid) |
| edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid); |
| } |
| |
| |
| /* Free growth caches. */ |
| |
| void |
| free_growth_caches (void) |
| { |
| edge_growth_cache.release (); |
| } |
| |
| |
| /* Dump edge summaries associated to NODE and recursively to all clones. |
| Indent by INDENT. */ |
| |
| static void |
| dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node, |
| struct inline_summary *info) |
| { |
| struct cgraph_edge *edge; |
| for (edge = node->callees; edge; edge = edge->next_callee) |
| { |
| struct inline_edge_summary *es = inline_edge_summary (edge); |
| struct cgraph_node *callee = edge->callee->ultimate_alias_target (); |
| int i; |
| |
| fprintf (f, |
| "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i" |
| " time: %2i callee size:%2i stack:%2i", |
| indent, "", callee->name (), callee->order, |
| !edge->inline_failed |
| ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed), |
| indent, "", es->loop_depth, edge->frequency, |
| es->call_stmt_size, es->call_stmt_time, |
| (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE, |
| (int) inline_summaries->get (callee)->estimated_stack_size); |
| |
| if (es->predicate) |
| { |
| fprintf (f, " predicate: "); |
| dump_predicate (f, info->conds, es->predicate); |
| } |
| else |
| fprintf (f, "\n"); |
| if (es->param.exists ()) |
| for (i = 0; i < (int) es->param.length (); i++) |
| { |
| int prob = es->param[i].change_prob; |
| |
| if (!prob) |
| fprintf (f, "%*s op%i is compile time invariant\n", |
| indent + 2, "", i); |
| else if (prob != REG_BR_PROB_BASE) |
| fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i, |
| prob * 100.0 / REG_BR_PROB_BASE); |
| } |
| if (!edge->inline_failed) |
| { |
| fprintf (f, "%*sStack frame offset %i, callee self size %i," |
| " callee size %i\n", |
| indent + 2, "", |
| (int) inline_summaries->get (callee)->stack_frame_offset, |
| (int) inline_summaries->get (callee)->estimated_self_stack_size, |
| (int) inline_summaries->get (callee)->estimated_stack_size); |
| dump_inline_edge_summary (f, indent + 2, callee, info); |
| } |
| } |
| for (edge = node->indirect_calls; edge; edge = edge->next_callee) |
| { |
| struct inline_edge_summary *es = inline_edge_summary (edge); |
| fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i" |
| " time: %2i", |
| indent, "", |
| es->loop_depth, |
| edge->frequency, es->call_stmt_size, es->call_stmt_time); |
| if (es->predicate) |
| { |
| fprintf (f, "predicate: "); |
| dump_predicate (f, info->conds, es->predicate); |
| } |
| else |
| fprintf (f, "\n"); |
| } |
| } |
| |
| |
| void |
| dump_inline_summary (FILE *f, struct cgraph_node *node) |
| { |
| if (node->definition) |
| { |
| struct inline_summary *s = inline_summaries->get (node); |
| size_time_entry *e; |
| int i; |
| fprintf (f, "Inline summary for %s/%i", node->name (), |
| node->order); |
| if (DECL_DISREGARD_INLINE_LIMITS (node->decl)) |
| fprintf (f, " always_inline"); |
| if (s->inlinable) |
| fprintf (f, " inlinable"); |
| if (s->contains_cilk_spawn) |
| fprintf (f, " contains_cilk_spawn"); |
| fprintf (f, "\n self time: %i\n", s->self_time); |
| fprintf (f, " global time: %i\n", s->time); |
| fprintf (f, " self size: %i\n", s->self_size); |
| fprintf (f, " global size: %i\n", s->size); |
| fprintf (f, " min size: %i\n", s->min_size); |
| fprintf (f, " self stack: %i\n", |
| (int) s->estimated_self_stack_size); |
| fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size); |
| if (s->growth) |
| fprintf (f, " estimated growth:%i\n", (int) s->growth); |
| if (s->scc_no) |
| fprintf (f, " In SCC: %i\n", (int) s->scc_no); |
| for (i = 0; vec_safe_iterate (s->entry, i, &e); i++) |
| { |
| fprintf (f, " size:%f, time:%f, predicate:", |
| (double) e->size / INLINE_SIZE_SCALE, |
| (double) e->time / INLINE_TIME_SCALE); |
| dump_predicate (f, s->conds, &e->predicate); |
| } |
| if (s->loop_iterations) |
| { |
| fprintf (f, " loop iterations:"); |
| dump_predicate (f, s->conds, s->loop_iterations); |
| } |
| if (s->loop_stride) |
| { |
| fprintf (f, " loop stride:"); |
| dump_predicate (f, s->conds, s->loop_stride); |
| } |
| if (s->array_index) |
| { |
| fprintf (f, " array index:"); |
| dump_predicate (f, s->conds, s->array_index); |
| } |
| fprintf (f, " calls:\n"); |
| dump_inline_edge_summary (f, 4, node, s); |
| fprintf (f, "\n"); |
| } |
| } |
| |
| DEBUG_FUNCTION void |
| debug_inline_summary (struct cgraph_node *node) |
| { |
| dump_inline_summary (stderr, node); |
| } |
| |
| void |
| dump_inline_summaries (FILE *f) |
| { |
| struct cgraph_node *node; |
| |
| FOR_EACH_DEFINED_FUNCTION (node) |
| if (!node->global.inlined_to) |
| dump_inline_summary (f, node); |
| } |
| |
| /* Give initial reasons why inlining would fail on EDGE. This gets either |
| nullified or usually overwritten by more precise reasons later. */ |
| |
| void |
| initialize_inline_failed (struct cgraph_edge *e) |
| { |
| struct cgraph_node *callee = e->callee; |
| |
| if (e->indirect_unknown_callee) |
| e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL; |
| else if (!callee->definition) |
| e->inline_failed = CIF_BODY_NOT_AVAILABLE; |
| else if (callee->local.redefined_extern_inline) |
| e->inline_failed = CIF_REDEFINED_EXTERN_INLINE; |
| else if (e->call_stmt_cannot_inline_p) |
| e->inline_failed = CIF_MISMATCHED_ARGUMENTS; |
| else if (cfun && fn_contains_cilk_spawn_p (cfun)) |
| /* We can't inline if the function is spawing a function. */ |
| e->inline_failed = CIF_FUNCTION_NOT_INLINABLE; |
| else |
| e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED; |
| } |
| |
| /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the |
| boolean variable pointed to by DATA. */ |
| |
| static bool |
| mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, |
| void *data) |
| { |
| bool *b = (bool *) data; |
| *b = true; |
| return true; |
| } |
| |
| /* If OP refers to value of function parameter, return the corresponding |
| parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the |
| PARM_DECL) will be stored to *SIZE_P in that case too. */ |
| |
| static tree |
| unmodified_parm_1 (gimple stmt, tree op, HOST_WIDE_INT *size_p) |
| { |
| /* SSA_NAME referring to parm default def? */ |
| if (TREE_CODE (op) == SSA_NAME |
| && SSA_NAME_IS_DEFAULT_DEF (op) |
| && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL) |
| { |
| if (size_p) |
| *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); |
| return SSA_NAME_VAR (op); |
| } |
| /* Non-SSA parm reference? */ |
| if (TREE_CODE (op) == PARM_DECL) |
| { |
| bool modified = false; |
| |
| ao_ref refd; |
| ao_ref_init (&refd, op); |
| walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified, |
| NULL); |
| if (!modified) |
| { |
| if (size_p) |
| *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); |
| return op; |
| } |
| } |
| return NULL_TREE; |
| } |
| |
| /* If OP refers to value of function parameter, return the corresponding |
| parameter. Also traverse chains of SSA register assignments. If non-NULL, |
| the size of the memory load (or the SSA_NAME of the PARM_DECL) will be |
| stored to *SIZE_P in that case too. */ |
| |
| static tree |
| unmodified_parm (gimple stmt, tree op, HOST_WIDE_INT *size_p) |
| { |
| tree res = unmodified_parm_1 (stmt, op, size_p); |
| if (res) |
| return res; |
| |
| if (TREE_CODE (op) == SSA_NAME |
| && !SSA_NAME_IS_DEFAULT_DEF (op) |
| && gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) |
| return unmodified_parm (SSA_NAME_DEF_STMT (op), |
| gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)), |
| size_p); |
| return NULL_TREE; |
| } |
| |
| /* If OP refers to a value of a function parameter or value loaded from an |
| aggregate passed to a parameter (either by value or reference), return TRUE |
| and store the number of the parameter to *INDEX_P, the access size into |
| *SIZE_P, and information whether and how it has been loaded from an |
| aggregate into *AGGPOS. INFO describes the function parameters, STMT is the |
| statement in which OP is used or loaded. */ |
| |
| static bool |
| unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi, |
| gimple stmt, tree op, int *index_p, |
| HOST_WIDE_INT *size_p, |
| struct agg_position_info *aggpos) |
| { |
| tree res = unmodified_parm_1 (stmt, op, size_p); |
| |
| gcc_checking_assert (aggpos); |
| if (res) |
| { |
| *index_p = ipa_get_param_decl_index (fbi->info, res); |
| if (*index_p < 0) |
| return false; |
| aggpos->agg_contents = false; |
| aggpos->by_ref = false; |
| return true; |
| } |
| |
| if (TREE_CODE (op) == SSA_NAME) |
| { |
| if (SSA_NAME_IS_DEFAULT_DEF (op) |
| || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) |
| return false; |
| stmt = SSA_NAME_DEF_STMT (op); |
| op = gimple_assign_rhs1 (stmt); |
| if (!REFERENCE_CLASS_P (op)) |
| return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p, |
| aggpos); |
| } |
| |
| aggpos->agg_contents = true; |
| return ipa_load_from_parm_agg (fbi, fbi->info->descriptors, |
| stmt, op, index_p, &aggpos->offset, |
| size_p, &aggpos->by_ref); |
| } |
| |
| /* See if statement might disappear after inlining. |
| 0 - means not eliminated |
| 1 - half of statements goes away |
| 2 - for sure it is eliminated. |
| We are not terribly sophisticated, basically looking for simple abstraction |
| penalty wrappers. */ |
| |
| static int |
| eliminated_by_inlining_prob (gimple stmt) |
| { |
| enum gimple_code code = gimple_code (stmt); |
| enum tree_code rhs_code; |
| |
| if (!optimize) |
| return 0; |
| |
| switch (code) |
| { |
| case GIMPLE_RETURN: |
| return 2; |
| case GIMPLE_ASSIGN: |
| if (gimple_num_ops (stmt) != 2) |
| return 0; |
| |
| rhs_code = gimple_assign_rhs_code (stmt); |
| |
| /* Casts of parameters, loads from parameters passed by reference |
| and stores to return value or parameters are often free after |
| inlining dua to SRA and further combining. |
| Assume that half of statements goes away. */ |
| if (CONVERT_EXPR_CODE_P (rhs_code) |
| || rhs_code == VIEW_CONVERT_EXPR |
| || rhs_code == ADDR_EXPR |
| || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) |
| { |
| tree rhs = gimple_assign_rhs1 (stmt); |
| tree lhs = gimple_assign_lhs (stmt); |
| tree inner_rhs = get_base_address (rhs); |
| tree inner_lhs = get_base_address (lhs); |
| bool rhs_free = false; |
| bool lhs_free = false; |
| |
| if (!inner_rhs) |
| inner_rhs = rhs; |
| if (!inner_lhs) |
| inner_lhs = lhs; |
| |
| /* Reads of parameter are expected to be free. */ |
| if (unmodified_parm (stmt, inner_rhs, NULL)) |
| rhs_free = true; |
| /* Match expressions of form &this->field. Those will most likely |
| combine with something upstream after inlining. */ |
| else if (TREE_CODE (inner_rhs) == ADDR_EXPR) |
| { |
| tree op = get_base_address (TREE_OPERAND (inner_rhs, 0)); |
| if (TREE_CODE (op) == PARM_DECL) |
| rhs_free = true; |
| else if (TREE_CODE (op) == MEM_REF |
| && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL)) |
| rhs_free = true; |
| } |
| |
| /* When parameter is not SSA register because its address is taken |
| and it is just copied into one, the statement will be completely |
| free after inlining (we will copy propagate backward). */ |
| if (rhs_free && is_gimple_reg (lhs)) |
| return 2; |
| |
| /* Reads of parameters passed by reference |
| expected to be free (i.e. optimized out after inlining). */ |
| if (TREE_CODE (inner_rhs) == MEM_REF |
| && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL)) |
| rhs_free = true; |
| |
| /* Copying parameter passed by reference into gimple register is |
| probably also going to copy propagate, but we can't be quite |
| sure. */ |
| if (rhs_free && is_gimple_reg (lhs)) |
| lhs_free = true; |
| |
| /* Writes to parameters, parameters passed by value and return value |
| (either dirrectly or passed via invisible reference) are free. |
| |
| TODO: We ought to handle testcase like |
| struct a {int a,b;}; |
| struct a |
| retrurnsturct (void) |
| { |
| struct a a ={1,2}; |
| return a; |
| } |
| |
| This translate into: |
| |
| retrurnsturct () |
| { |
| int a$b; |
| int a$a; |
| struct a a; |
| struct a D.2739; |
| |
| <bb 2>: |
| D.2739.a = 1; |
| D.2739.b = 2; |
| return D.2739; |
| |
| } |
| For that we either need to copy ipa-split logic detecting writes |
| to return value. */ |
| if (TREE_CODE (inner_lhs) == PARM_DECL |
| || TREE_CODE (inner_lhs) == RESULT_DECL |
| || (TREE_CODE (inner_lhs) == MEM_REF |
| && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL) |
| || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME |
| && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0)) |
| && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND |
| (inner_lhs, |
| 0))) == RESULT_DECL)))) |
| lhs_free = true; |
| if (lhs_free |
| && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs))) |
| rhs_free = true; |
| if (lhs_free && rhs_free) |
| return 1; |
| } |
| return 0; |
| default: |
| return 0; |
| } |
| } |
| |
| |
| /* If BB ends by a conditional we can turn into predicates, attach corresponding |
| predicates to the CFG edges. */ |
| |
| static void |
| set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, |
| struct inline_summary *summary, |
| basic_block bb) |
| { |
| gimple last; |
| tree op; |
| HOST_WIDE_INT size; |
| int index; |
| struct agg_position_info aggpos; |
| enum tree_code code, inverted_code; |
| edge e; |
| edge_iterator ei; |
| gimple set_stmt; |
| tree op2; |
| |
| last = last_stmt (bb); |
| if (!last || gimple_code (last) != GIMPLE_COND) |
| return; |
| if (!is_gimple_ip_invariant (gimple_cond_rhs (last))) |
| return; |
| op = gimple_cond_lhs (last); |
| /* TODO: handle conditionals like |
| var = op0 < 4; |
| if (var != 0). */ |
| if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) |
| { |
| code = gimple_cond_code (last); |
| inverted_code = invert_tree_comparison (code, HONOR_NANS (op)); |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE |
| ? code : inverted_code); |
| /* invert_tree_comparison will return ERROR_MARK on FP |
| comparsions that are not EQ/NE instead of returning proper |
| unordered one. Be sure it is not confused with NON_CONSTANT. */ |
| if (this_code != ERROR_MARK) |
| { |
| struct predicate p = add_condition (summary, index, size, &aggpos, |
| this_code, |
| gimple_cond_rhs (last)); |
| e->aux = pool_alloc (edge_predicate_pool); |
| *(struct predicate *) e->aux = p; |
| } |
| } |
| } |
| |
| if (TREE_CODE (op) != SSA_NAME) |
| return; |
| /* Special case |
| if (builtin_constant_p (op)) |
| constant_code |
| else |
| nonconstant_code. |
| Here we can predicate nonconstant_code. We can't |
| really handle constant_code since we have no predicate |
| for this and also the constant code is not known to be |
| optimized away when inliner doen't see operand is constant. |
| Other optimizers might think otherwise. */ |
| if (gimple_cond_code (last) != NE_EXPR |
| || !integer_zerop (gimple_cond_rhs (last))) |
| return; |
| set_stmt = SSA_NAME_DEF_STMT (op); |
| if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P) |
| || gimple_call_num_args (set_stmt) != 1) |
| return; |
| op2 = gimple_call_arg (set_stmt, 0); |
| if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size, |
| &aggpos)) |
| return; |
| FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE) |
| { |
| struct predicate p = add_condition (summary, index, size, &aggpos, |
| IS_NOT_CONSTANT, NULL_TREE); |
| e->aux = pool_alloc (edge_predicate_pool); |
| *(struct predicate *) e->aux = p; |
| } |
| } |
| |
| |
| /* If BB ends by a switch we can turn into predicates, attach corresponding |
| predicates to the CFG edges. */ |
| |
| static void |
| set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi, |
| struct inline_summary *summary, |
| basic_block bb) |
| { |
| gimple lastg; |
| tree op; |
| int index; |
| HOST_WIDE_INT size; |
| struct agg_position_info aggpos; |
| edge e; |
| edge_iterator ei; |
| size_t n; |
| size_t case_idx; |
| |
| lastg = last_stmt (bb); |
| if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH) |
| return; |
| gswitch *last = as_a <gswitch *> (lastg); |
| op = gimple_switch_index (last); |
| if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) |
| return; |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| e->aux = pool_alloc (edge_predicate_pool); |
| *(struct predicate *) e->aux = false_predicate (); |
| } |
| n = gimple_switch_num_labels (last); |
| for (case_idx = 0; case_idx < n; ++case_idx) |
| { |
| tree cl = gimple_switch_label (last, case_idx); |
| tree min, max; |
| struct predicate p; |
| |
| e = find_edge (bb, label_to_block (CASE_LABEL (cl))); |
| min = CASE_LOW (cl); |
| max = CASE_HIGH (cl); |
| |
| /* For default we might want to construct predicate that none |
| of cases is met, but it is bit hard to do not having negations |
| of conditionals handy. */ |
| if (!min && !max) |
| p = true_predicate (); |
| else if (!max) |
| p = add_condition (summary, index, size, &aggpos, EQ_EXPR, min); |
| else |
| { |
| struct predicate p1, p2; |
| p1 = add_condition (summary, index, size, &aggpos, GE_EXPR, min); |
| p2 = add_condition (summary, index, size, &aggpos, LE_EXPR, max); |
| p = and_predicates (summary->conds, &p1, &p2); |
| } |
| *(struct predicate *) e->aux |
| = or_predicates (summary->conds, &p, (struct predicate *) e->aux); |
| } |
| } |
| |
| |
| /* For each BB in NODE attach to its AUX pointer predicate under |
| which it is executable. */ |
| |
| static void |
| compute_bb_predicates (struct ipa_func_body_info *fbi, |
| struct cgraph_node *node, |
| struct inline_summary *summary) |
| { |
| struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); |
| bool done = false; |
| basic_block bb; |
| |
| FOR_EACH_BB_FN (bb, my_function) |
| { |
| set_cond_stmt_execution_predicate (fbi, summary, bb); |
| set_switch_stmt_execution_predicate (fbi, summary, bb); |
| } |
| |
| /* Entry block is always executable. */ |
| ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux |
| = pool_alloc (edge_predicate_pool); |
| *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux |
| = true_predicate (); |
| |
| /* A simple dataflow propagation of predicates forward in the CFG. |
| TODO: work in reverse postorder. */ |
| while (!done) |
| { |
| done = true; |
| FOR_EACH_BB_FN (bb, my_function) |
| { |
| struct predicate p = false_predicate (); |
| edge e; |
| edge_iterator ei; |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| { |
| if (e->src->aux) |
| { |
| struct predicate this_bb_predicate |
| = *(struct predicate *) e->src->aux; |
| if (e->aux) |
| this_bb_predicate |
| = and_predicates (summary->conds, &this_bb_predicate, |
| (struct predicate *) e->aux); |
| p = or_predicates (summary->conds, &p, &this_bb_predicate); |
| if (true_predicate_p (&p)) |
| break; |
| } |
| } |
| if (false_predicate_p (&p)) |
| gcc_assert (!bb->aux); |
| else |
| { |
| if (!bb->aux) |
| { |
| done = false; |
| bb->aux = pool_alloc (edge_predicate_pool); |
| *((struct predicate *) bb->aux) = p; |
| } |
| else if (!predicates_equal_p (&p, (struct predicate *) bb->aux)) |
| { |
| /* This OR operation is needed to ensure monotonous data flow |
| in the case we hit the limit on number of clauses and the |
| and/or operations above give approximate answers. */ |
| p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux); |
| if (!predicates_equal_p (&p, (struct predicate *) bb->aux)) |
| { |
| done = false; |
| *((struct predicate *) bb->aux) = p; |
| } |
| } |
| } |
| } |
| } |
| } |
| |
| |
| /* We keep info about constantness of SSA names. */ |
| |
| typedef struct predicate predicate_t; |
| /* Return predicate specifying when the STMT might have result that is not |
| a compile time constant. */ |
| |
| static struct predicate |
| will_be_nonconstant_expr_predicate (struct ipa_node_params *info, |
| struct inline_summary *summary, |
| tree expr, |
| vec<predicate_t> nonconstant_names) |
| { |
| tree parm; |
| int index; |
| HOST_WIDE_INT size; |
| |
| while (UNARY_CLASS_P (expr)) |
| expr = TREE_OPERAND (expr, 0); |
| |
| parm = unmodified_parm (NULL, expr, &size); |
| if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0) |
| return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE); |
| if (is_gimple_min_invariant (expr)) |
| return false_predicate (); |
| if (TREE_CODE (expr) == SSA_NAME) |
| return nonconstant_names[SSA_NAME_VERSION (expr)]; |
| if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr)) |
| { |
| struct predicate p1 = will_be_nonconstant_expr_predicate |
| (info, summary, TREE_OPERAND (expr, 0), |
| nonconstant_names); |
| struct predicate p2; |
| if (true_predicate_p (&p1)) |
| return p1; |
| p2 = will_be_nonconstant_expr_predicate (info, summary, |
| TREE_OPERAND (expr, 1), |
| nonconstant_names); |
| return or_predicates (summary->conds, &p1, &p2); |
| } |
| else if (TREE_CODE (expr) == COND_EXPR) |
| { |
| struct predicate p1 = will_be_nonconstant_expr_predicate |
| (info, summary, TREE_OPERAND (expr, 0), |
| nonconstant_names); |
| struct predicate p2; |
| if (true_predicate_p (&p1)) |
| return p1; |
| p2 = will_be_nonconstant_expr_predicate (info, summary, |
| TREE_OPERAND (expr, 1), |
| nonconstant_names); |
| if (true_predicate_p (&p2)) |
| return p2; |
| p1 = or_predicates (summary->conds, &p1, &p2); |
| p2 = will_be_nonconstant_expr_predicate (info, summary, |
| TREE_OPERAND (expr, 2), |
| nonconstant_names); |
| return or_predicates (summary->conds, &p1, &p2); |
| } |
| else |
| { |
| debug_tree (expr); |
| gcc_unreachable (); |
| } |
| return false_predicate (); |
| } |
| |
| |
| /* Return predicate specifying when the STMT might have result that is not |
| a compile time constant. */ |
| |
| static struct predicate |
| will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, |
| struct inline_summary *summary, |
| gimple stmt, |
| vec<predicate_t> nonconstant_names) |
| { |
| struct predicate p = true_predicate (); |
| ssa_op_iter iter; |
| tree use; |
| struct predicate op_non_const; |
| bool is_load; |
| int base_index; |
| HOST_WIDE_INT size; |
| struct agg_position_info aggpos; |
| |
| /* What statments might be optimized away |
| when their arguments are constant. */ |
| if (gimple_code (stmt) != GIMPLE_ASSIGN |
| && gimple_code (stmt) != GIMPLE_COND |
| && gimple_code (stmt) != GIMPLE_SWITCH |
| && (gimple_code (stmt) != GIMPLE_CALL |
| || !(gimple_call_flags (stmt) & ECF_CONST))) |
| return p; |
| |
| /* Stores will stay anyway. */ |
| if (gimple_store_p (stmt)) |
| return p; |
| |
| is_load = gimple_assign_load_p (stmt); |
| |
| /* Loads can be optimized when the value is known. */ |
| if (is_load) |
| { |
| tree op; |
| gcc_assert (gimple_assign_single_p (stmt)); |
| op = gimple_assign_rhs1 (stmt); |
| if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size, |
| &aggpos)) |
| return p; |
| } |
| else |
| base_index = -1; |
| |
| /* See if we understand all operands before we start |
| adding conditionals. */ |
| FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) |
| { |
| tree parm = unmodified_parm (stmt, use, NULL); |
| /* For arguments we can build a condition. */ |
| if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0) |
| continue; |
| if (TREE_CODE (use) != SSA_NAME) |
| return p; |
| /* If we know when operand is constant, |
| we still can say something useful. */ |
| if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)])) |
| continue; |
| return p; |
| } |
| |
| if (is_load) |
| op_non_const = |
| add_condition (summary, base_index, size, &aggpos, CHANGED, NULL); |
| else |
| op_non_const = false_predicate (); |
| FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) |
| { |
| HOST_WIDE_INT size; |
| tree parm = unmodified_parm (stmt, use, &size); |
| int index; |
| |
| if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0) |
| { |
| if (index != base_index) |
| p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE); |
| else |
| continue; |
| } |
| else |
| p = nonconstant_names[SSA_NAME_VERSION (use)]; |
| op_non_const = or_predicates (summary->conds, &p, &op_non_const); |
| } |
| if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL) |
| && gimple_op (stmt, 0) |
| && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME) |
| nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))] |
| = op_non_const; |
| return op_non_const; |
| } |
| |
| struct record_modified_bb_info |
| { |
| bitmap bb_set; |
| gimple stmt; |
| }; |
| |
| /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be |
| set except for info->stmt. */ |
| |
| static bool |
| record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) |
| { |
| struct record_modified_bb_info *info = |
| (struct record_modified_bb_info *) data; |
| if (SSA_NAME_DEF_STMT (vdef) == info->stmt) |
| return false; |
| bitmap_set_bit (info->bb_set, |
| SSA_NAME_IS_DEFAULT_DEF (vdef) |
| ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index |
| : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index); |
| return false; |
| } |
| |
| /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT |
| will change since last invocation of STMT. |
| |
| Value 0 is reserved for compile time invariants. |
| For common parameters it is REG_BR_PROB_BASE. For loop invariants it |
| ought to be REG_BR_PROB_BASE / estimated_iters. */ |
| |
| static int |
| param_change_prob (gimple stmt, int i) |
| { |
| tree op = gimple_call_arg (stmt, i); |
| basic_block bb = gimple_bb (stmt); |
| tree base; |
| |
| /* Global invariants neve change. */ |
| if (is_gimple_min_invariant (op)) |
| return 0; |
| /* We would have to do non-trivial analysis to really work out what |
| is the probability of value to change (i.e. when init statement |
| is in a sibling loop of the call). |
| |
| We do an conservative estimate: when call is executed N times more often |
| than the statement defining value, we take the frequency 1/N. */ |
| if (TREE_CODE (op) == SSA_NAME) |
| { |
| int init_freq; |
| |
| if (!bb->frequency) |
| return REG_BR_PROB_BASE; |
| |
| if (SSA_NAME_IS_DEFAULT_DEF (op)) |
| init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency; |
| else |
| init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency; |
| |
| if (!init_freq) |
| init_freq = 1; |
| if (init_freq < bb->frequency) |
| return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1); |
| else |
| return REG_BR_PROB_BASE; |
| } |
| |
| base = get_base_address (op); |
| if (base) |
| { |
| ao_ref refd; |
| int max; |
| struct record_modified_bb_info info; |
| bitmap_iterator bi; |
| unsigned index; |
| tree init = ctor_for_folding (base); |
| |
| if (init != error_mark_node) |
| return 0; |
| if (!bb->frequency) |
| return REG_BR_PROB_BASE; |
| ao_ref_init (&refd, op); |
| info.stmt = stmt; |
| info.bb_set = BITMAP_ALLOC (NULL); |
| walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info, |
| NULL); |
| if (bitmap_bit_p (info.bb_set, bb->index)) |
| { |
| BITMAP_FREE (info.bb_set); |
| return REG_BR_PROB_BASE; |
| } |
| |
| /* Assume that every memory is initialized at entry. |
| TODO: Can we easilly determine if value is always defined |
| and thus we may skip entry block? */ |
| if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency) |
| max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency; |
| else |
| max = 1; |
| |
| EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi) |
| max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency); |
| |
| BITMAP_FREE (info.bb_set); |
| if (max < bb->frequency) |
| return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1); |
| else |
| return REG_BR_PROB_BASE; |
| } |
| return REG_BR_PROB_BASE; |
| } |
| |
| /* Find whether a basic block BB is the final block of a (half) diamond CFG |
| sub-graph and if the predicate the condition depends on is known. If so, |
| return true and store the pointer the predicate in *P. */ |
| |
| static bool |
| phi_result_unknown_predicate (struct ipa_node_params *info, |
| inline_summary *summary, basic_block bb, |
| struct predicate *p, |
| vec<predicate_t> nonconstant_names) |
| { |
| edge e; |
| edge_iterator ei; |
| basic_block first_bb = NULL; |
| gimple stmt; |
| |
| if (single_pred_p (bb)) |
| { |
| *p = false_predicate (); |
| return true; |
| } |
| |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| { |
| if (single_succ_p (e->src)) |
| { |
| if (!single_pred_p (e->src)) |
| return false; |
| if (!first_bb) |
| first_bb = single_pred (e->src); |
| else if (single_pred (e->src) != first_bb) |
| return false; |
| } |
| else |
| { |
| if (!first_bb) |
| first_bb = e->src; |
| else if (e->src != first_bb) |
| return false; |
| } |
| } |
| |
| if (!first_bb) |
| return false; |
| |
| stmt = last_stmt (first_bb); |
| if (!stmt |
| || gimple_code (stmt) != GIMPLE_COND |
| || !is_gimple_ip_invariant (gimple_cond_rhs (stmt))) |
| return false; |
| |
| *p = will_be_nonconstant_expr_predicate (info, summary, |
| gimple_cond_lhs (stmt), |
| nonconstant_names); |
| if (true_predicate_p (p)) |
| return false; |
| else |
| return true; |
| } |
| |
| /* Given a PHI statement in a function described by inline properties SUMMARY |
| and *P being the predicate describing whether the selected PHI argument is |
| known, store a predicate for the result of the PHI statement into |
| NONCONSTANT_NAMES, if possible. */ |
| |
| static void |
| predicate_for_phi_result (struct inline_summary *summary, gphi *phi, |
| struct predicate *p, |
| vec<predicate_t> nonconstant_names) |
| { |
| unsigned i; |
| |
| for (i = 0; i < gimple_phi_num_args (phi); i++) |
| { |
| tree arg = gimple_phi_arg (phi, i)->def; |
| if (!is_gimple_min_invariant (arg)) |
| { |
| gcc_assert (TREE_CODE (arg) == SSA_NAME); |
| *p = or_predicates (summary->conds, p, |
| &nonconstant_names[SSA_NAME_VERSION (arg)]); |
| if (true_predicate_p (p)) |
| return; |
| } |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "\t\tphi predicate: "); |
| dump_predicate (dump_file, summary->conds, p); |
| } |
| nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p; |
| } |
| |
| /* Return predicate specifying when array index in access OP becomes non-constant. */ |
| |
| static struct predicate |
| array_index_predicate (inline_summary *info, |
| vec< predicate_t> nonconstant_names, tree op) |
| { |
| struct predicate p = false_predicate (); |
| while (handled_component_p (op)) |
| { |
| if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF) |
| { |
| if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME) |
| p = or_predicates (info->conds, &p, |
| &nonconstant_names[SSA_NAME_VERSION |
| (TREE_OPERAND (op, 1))]); |
| } |
| op = TREE_OPERAND (op, 0); |
| } |
| return p; |
| } |
| |
| /* For a typical usage of __builtin_expect (a<b, 1), we |
| may introduce an extra relation stmt: |
| With the builtin, we have |
| t1 = a <= b; |
| t2 = (long int) t1; |
| t3 = __builtin_expect (t2, 1); |
| if (t3 != 0) |
| goto ... |
| Without the builtin, we have |
| if (a<=b) |
| goto... |
| This affects the size/time estimation and may have |
| an impact on the earlier inlining. |
| Here find this pattern and fix it up later. */ |
| |
| static gimple |
| find_foldable_builtin_expect (basic_block bb) |
| { |
| gimple_stmt_iterator bsi; |
| |
| for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) |
| { |
| gimple stmt = gsi_stmt (bsi); |
| if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT) |
| || (is_gimple_call (stmt) |
| && gimple_call_internal_p (stmt) |
| && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)) |
| { |
| tree var = gimple_call_lhs (stmt); |
| tree arg = gimple_call_arg (stmt, 0); |
| use_operand_p use_p; |
| gimple use_stmt; |
| bool match = false; |
| bool done = false; |
| |
| if (!var || !arg) |
| continue; |
| gcc_assert (TREE_CODE (var) == SSA_NAME); |
| |
| while (TREE_CODE (arg) == SSA_NAME) |
| { |
| gimple stmt_tmp = SSA_NAME_DEF_STMT (arg); |
| if (!is_gimple_assign (stmt_tmp)) |
| break; |
| switch (gimple_assign_rhs_code (stmt_tmp)) |
| { |
| case LT_EXPR: |
| case LE_EXPR: |
| case GT_EXPR: |
| case GE_EXPR: |
| case EQ_EXPR: |
| case NE_EXPR: |
| match = true; |
| done = true; |
| break; |
| CASE_CONVERT: |
| break; |
| default: |
| done = true; |
| break; |
| } |
| if (done) |
| break; |
| arg = gimple_assign_rhs1 (stmt_tmp); |
| } |
| |
| if (match && single_imm_use (var, &use_p, &use_stmt) |
| && gimple_code (use_stmt) == GIMPLE_COND) |
| return use_stmt; |
| } |
| } |
| return NULL; |
| } |
| |
| /* Return true when the basic blocks contains only clobbers followed by RESX. |
| Such BBs are kept around to make removal of dead stores possible with |
| presence of EH and will be optimized out by optimize_clobbers later in the |
| game. |
| |
| NEED_EH is used to recurse in case the clobber has non-EH predecestors |
| that can be clobber only, too.. When it is false, the RESX is not necessary |
| on the end of basic block. */ |
| |
| static bool |
| clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) |
| { |
| gimple_stmt_iterator gsi = gsi_last_bb (bb); |
| edge_iterator ei; |
| edge e; |
| |
| if (need_eh) |
| { |
| if (gsi_end_p (gsi)) |
| return false; |
| if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX) |
| return false; |
| gsi_prev (&gsi); |
| } |
| else if (!single_succ_p (bb)) |
| return false; |
| |
| for (; !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple stmt = gsi_stmt (gsi); |
| if (is_gimple_debug (stmt)) |
| continue; |
| if (gimple_clobber_p (stmt)) |
| continue; |
| if (gimple_code (stmt) == GIMPLE_LABEL) |
| break; |
| return false; |
| } |
| |
| /* See if all predecestors are either throws or clobber only BBs. */ |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| if (!(e->flags & EDGE_EH) |
| && !clobber_only_eh_bb_p (e->src, false)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Compute function body size parameters for NODE. |
| When EARLY is true, we compute only simple summaries without |
| non-trivial predicates to drive the early inliner. */ |
| |
| static void |
| estimate_function_body_sizes (struct cgraph_node *node, bool early) |
| { |
| gcov_type time = 0; |
| /* Estimate static overhead for function prologue/epilogue and alignment. */ |
| int size = 2; |
| /* Benefits are scaled by probability of elimination that is in range |
| <0,2>. */ |
| basic_block bb; |
| struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); |
| int freq; |
| struct inline_summary *info = inline_summaries->get (node); |
| struct predicate bb_predicate; |
| struct ipa_func_body_info fbi; |
| vec<predicate_t> nonconstant_names = vNULL; |
| int nblocks, n; |
| int *order; |
| predicate array_index = true_predicate (); |
| gimple fix_builtin_expect_stmt; |
| |
| gcc_assert (my_function && my_function->cfg); |
| gcc_assert (cfun == my_function); |
| |
| memset(&fbi, 0, sizeof(fbi)); |
| info->conds = NULL; |
| info->entry = NULL; |
| |
| /* When optimizing and analyzing for IPA inliner, initialize loop optimizer |
| so we can produce proper inline hints. |
| |
| When optimizing and analyzing for early inliner, initialize node params |
| so we can produce correct BB predicates. */ |
| |
| if (opt_for_fn (node->decl, optimize)) |
| { |
| calculate_dominance_info (CDI_DOMINATORS); |
| if (!early) |
| loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
| else |
| { |
| ipa_check_create_node_params (); |
| ipa_initialize_node_params (node); |
| } |
| |
| if (ipa_node_params_sum) |
| { |
| fbi.node = node; |
| fbi.info = IPA_NODE_REF (node); |
| fbi.bb_infos = vNULL; |
| fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
| fbi.param_count = count_formal_params(node->decl); |
| nonconstant_names.safe_grow_cleared |
| (SSANAMES (my_function)->length ()); |
| } |
| } |
| |
| if (dump_file) |
| fprintf (dump_file, "\nAnalyzing function body size: %s\n", |
| node->name ()); |
| |
| /* When we run into maximal number of entries, we assign everything to the |
| constant truth case. Be sure to have it in list. */ |
| bb_predicate = true_predicate (); |
| account_size_time (info, 0, 0, &bb_predicate); |
| |
| bb_predicate = not_inlined_predicate (); |
| account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate); |
| |
| if (fbi.info) |
| compute_bb_predicates (&fbi, node, info); |
| order = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
| nblocks = pre_and_rev_post_order_compute (NULL, order, false); |
| for (n = 0; n < nblocks; n++) |
| { |
| bb = BASIC_BLOCK_FOR_FN (cfun, order[n]); |
| freq = compute_call_stmt_bb_frequency (node->decl, bb); |
| if (clobber_only_eh_bb_p (bb)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "\n Ignoring BB %i;" |
| " it will be optimized away by cleanup_clobbers\n", |
| bb->index); |
| continue; |
| } |
| |
| /* TODO: Obviously predicates can be propagated down across CFG. */ |
| if (fbi.info) |
| { |
| if (bb->aux) |
| bb_predicate = *(struct predicate *) bb->aux; |
| else |
| bb_predicate = false_predicate (); |
| } |
| else |
| bb_predicate = true_predicate (); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "\n BB %i predicate:", bb->index); |
| dump_predicate (dump_file, info->conds, &bb_predicate); |
| } |
| |
| if (fbi.info && nonconstant_names.exists ()) |
| { |
| struct predicate phi_predicate; |
| bool first_phi = true; |
| |
| for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); |
| gsi_next (&bsi)) |
| { |
| if (first_phi |
| && !phi_result_unknown_predicate (fbi.info, info, bb, |
| &phi_predicate, |
| nonconstant_names)) |
| break; |
| first_phi = false; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, " "); |
| print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0); |
| } |
| predicate_for_phi_result (info, bsi.phi (), &phi_predicate, |
| nonconstant_names); |
| } |
| } |
| |
| fix_builtin_expect_stmt = find_foldable_builtin_expect (bb); |
| |
| for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); |
| gsi_next (&bsi)) |
| { |
| gimple stmt = gsi_stmt (bsi); |
| int this_size = estimate_num_insns (stmt, &eni_size_weights); |
| int this_time = estimate_num_insns (stmt, &eni_time_weights); |
| int prob; |
| struct predicate will_be_nonconstant; |
| |
| /* This relation stmt should be folded after we remove |
| buildin_expect call. Adjust the cost here. */ |
| if (stmt == fix_builtin_expect_stmt) |
| { |
| this_size--; |
| this_time--; |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, " "); |
| print_gimple_stmt (dump_file, stmt, 0, 0); |
| fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n", |
| ((double) freq) / CGRAPH_FREQ_BASE, this_size, |
| this_time); |
| } |
| |
| if (gimple_assign_load_p (stmt) && nonconstant_names.exists ()) |
| { |
| struct predicate this_array_index; |
| this_array_index = |
| array_index_predicate (info, nonconstant_names, |
| gimple_assign_rhs1 (stmt)); |
| if (!false_predicate_p (&this_array_index)) |
| array_index = |
| and_predicates (info->conds, &array_index, |
| &this_array_index); |
| } |
| if (gimple_store_p (stmt) && nonconstant_names.exists ()) |
| { |
| struct predicate this_array_index; |
| this_array_index = |
| array_index_predicate (info, nonconstant_names, |
| gimple_get_lhs (stmt)); |
| if (!false_predicate_p (&this_array_index)) |
| array_index = |
| and_predicates (info->conds, &array_index, |
| &this_array_index); |
| } |
| |
| |
| if (is_gimple_call (stmt) |
| && !gimple_call_internal_p (stmt)) |
| { |
| struct cgraph_edge *edge = node->get_edge (stmt); |
| struct inline_edge_summary *es = inline_edge_summary (edge); |
| |
| /* Special case: results of BUILT_IN_CONSTANT_P will be always |
| resolved as constant. We however don't want to optimize |
| out the cgraph edges. */ |
| if (nonconstant_names.exists () |
| && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P) |
| && gimple_call_lhs (stmt) |
| && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME) |
| { |
| struct predicate false_p = false_predicate (); |
| nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))] |
| = false_p; |
| } |
| if (ipa_node_params_sum) |
| { |
| int count = gimple_call_num_args (stmt); |
| int i; |
| |
| if (count) |
| es->param.safe_grow_cleared (count); |
| for (i = 0; i < count; i++) |
| { |
| int prob = param_change_prob (stmt, i); |
| gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE); |
| es->param[i].change_prob = prob; |
| } |
| } |
| |
| es->call_stmt_size = this_size; |
| es->call_stmt_time = this_time; |
| es->loop_depth = bb_loop_depth (bb); |
| edge_set_predicate (edge, &bb_predicate); |
| } |
| |
| /* TODO: When conditional jump or swithc is known to be constant, but |
| we did not translate it into the predicates, we really can account |
| just maximum of the possible paths. */ |
| if (fbi.info) |
| will_be_nonconstant |
| = will_be_nonconstant_predicate (&fbi, info, |
| stmt, nonconstant_names); |
| if (this_time || this_size) |
| { |
| struct predicate p; |
| |
| this_time *= freq; |
| |
| prob = eliminated_by_inlining_prob (stmt); |
| if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, |
| "\t\t50%% will be eliminated by inlining\n"); |
| if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "\t\tWill be eliminated by inlining\n"); |
| |
| if (fbi.info) |
| p = and_predicates (info->conds, &bb_predicate, |
| &will_be_nonconstant); |
| else |
| p = true_predicate (); |
| |
| if (!false_predicate_p (&p) |
| || (is_gimple_call (stmt) |
| && !false_predicate_p (&bb_predicate))) |
| { |
| time += this_time; |
| size += this_size; |
| if (time > MAX_TIME * INLINE_TIME_SCALE) |
| time = MAX_TIME * INLINE_TIME_SCALE; |
| } |
| |
| /* We account everything but the calls. Calls have their own |
| size/time info attached to cgraph edges. This is necessary |
| in order to make the cost disappear after inlining. */ |
| if (!is_gimple_call (stmt)) |
| { |
| if (prob) |
| { |
| struct predicate ip = not_inlined_predicate (); |
| ip = and_predicates (info->conds, &ip, &p); |
| account_size_time (info, this_size * prob, |
| this_time * prob, &ip); |
| } |
| if (prob != 2) |
| account_size_time (info, this_size * (2 - prob), |
| this_time * (2 - prob), &p); |
| } |
| |
| gcc_assert (time >= 0); |
| gcc_assert (size >= 0); |
| } |
| } |
| } |
| set_hint_predicate (&inline_summaries->get (node)->array_index, array_index); |
| time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE; |
| if (time > MAX_TIME) |
| time = MAX_TIME; |
| free (order); |
| |
| if (nonconstant_names.exists () && !early) |
| { |
| struct loop *loop; |
| predicate loop_iterations = true_predicate (); |
| predicate loop_stride = true_predicate (); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| flow_loops_dump (dump_file, NULL, 0); |
| scev_initialize (); |
| FOR_EACH_LOOP (loop, 0) |
| { |
| vec<edge> exits; |
| edge ex; |
| unsigned int j; |
| struct tree_niter_desc niter_desc; |
| bb_predicate = *(struct predicate *) loop->header->aux; |
| |
| exits = get_loop_exit_edges (loop); |
| FOR_EACH_VEC_ELT (exits, j, ex) |
| if (number_of_iterations_exit (loop, ex, &niter_desc, false) |
| && !is_gimple_min_invariant (niter_desc.niter)) |
| { |
| predicate will_be_nonconstant |
| = will_be_nonconstant_expr_predicate (fbi.info, info, |
| niter_desc.niter, |
| nonconstant_names); |
| if (!true_predicate_p (&will_be_nonconstant)) |
| will_be_nonconstant = and_predicates (info->conds, |
| &bb_predicate, |
| &will_be_nonconstant); |
| if (!true_predicate_p (&will_be_nonconstant) |
| && !false_predicate_p (&will_be_nonconstant)) |
| /* This is slightly inprecise. We may want to represent each |
| loop with independent predicate. */ |
| loop_iterations = |
| and_predicates (info->conds, &loop_iterations, |
| &will_be_nonconstant); |
| } |
| exits.release (); |
| |
| for (gphi_iterator gsi = gsi_start_phis (loop->header); |
| !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| tree use = gimple_phi_result (phi); |
| affine_iv iv; |
| predicate will_be_nonconstant; |
| if (virtual_operand_p (use) |
| || !simple_iv (loop, loop, use, &iv, true) |
| || is_gimple_min_invariant (iv.step)) |
| continue; |
| will_be_nonconstant |
| = will_be_nonconstant_expr_predicate (fbi.info, info, |
| iv.step, |
| nonconstant_names); |
| if (!true_predicate_p (&will_be_nonconstant)) |
| will_be_nonconstant = and_predicates (info->conds, |
| &bb_predicate, |
| &will_be_nonconstant); |
| if (!true_predicate_p (&will_be_nonconstant) |
| && !false_predicate_p (&will_be_nonconstant)) |
| /* This is slightly inprecise. We may want to represent |
| each loop with independent predicate. */ |
| loop_stride = and_predicates (info->conds, &loop_stride, |
| &will_be_nonconstant); |
| } |
| } |
| set_hint_predicate (&inline_summaries->get (node)->loop_iterations, |
| loop_iterations); |
| set_hint_predicate (&inline_summaries->get (node)->loop_stride, loop_stride); |
| scev_finalize (); |
| } |
| FOR_ALL_BB_FN (bb, my_function) |
| { |
| edge e; |
| edge_iterator ei; |
| |
| if (bb->aux) |
| pool_free (edge_predicate_pool, bb->aux); |
| bb->aux = NULL; |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| if (e->aux) |
| pool_free (edge_predicate_pool, e->aux); |
| e->aux = NULL; |
| } |
| } |
| inline_summaries->get (node)->self_time = time; |
| inline_summaries->get (node)->self_size = size; |
| nonconstant_names.release (); |
| if (opt_for_fn (node->decl, optimize)) |
| { |
| if (!early) |
| loop_optimizer_finalize (); |
| else if (!ipa_edge_args_vector) |
| ipa_free_all_node_params (); |
| free_dominance_info (CDI_DOMINATORS); |
| } |
| if (dump_file) |
| { |
| fprintf (dump_file, "\n"); |
| dump_inline_summary (dump_file, node); |
| } |
| } |
| |
| |
| /* Compute parameters of functions used by inliner. |
| EARLY is true when we compute parameters for the early inliner */ |
| |
| void |
| compute_inline_parameters (struct cgraph_node *node, bool early) |
| { |
| HOST_WIDE_INT self_stack_size; |
| struct cgraph_edge *e; |
| struct inline_summary *info; |
| |
| gcc_assert (!node->global.inlined_to); |
| |
| inline_summary_alloc (); |
| |
| info = inline_summaries->get (node); |
| reset_inline_summary (node, info); |
| |
| /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that. |
| Once this happen, we will need to more curefully predict call |
| statement size. */ |
| if (node->thunk.thunk_p) |
| { |
| struct inline_edge_summary *es = inline_edge_summary (node->callees); |
| struct predicate t = true_predicate (); |
| |
| info->inlinable = 0; |
| node->callees->call_stmt_cannot_inline_p = true; |
| node->local.can_change_signature = false; |
| es->call_stmt_time = 1; |
| es->call_stmt_size = 1; |
| account_size_time (info, 0, 0, &t); |
| return; |
| } |
| |
| /* Even is_gimple_min_invariant rely on current_function_decl. */ |
| push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
| |
| /* Estimate the stack size for the function if we're optimizing. */ |
| self_stack_size = optimize ? estimated_stack_frame_size (node) : 0; |
| info->estimated_self_stack_size = self_stack_size; |
| info->estimated_stack_size = self_stack_size; |
| info->stack_frame_offset = 0; |
| |
| /* Can this function be inlined at all? */ |
| if (!opt_for_fn (node->decl, optimize) |
| && !lookup_attribute ("always_inline", |
| DECL_ATTRIBUTES (node->decl))) |
| info->inlinable = false; |
| else |
| info->inlinable = tree_inlinable_function_p (node->decl); |
| |
| info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun); |
| |
| /* Type attributes can use parameter indices to describe them. */ |
| if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))) |
| node->local.can_change_signature = false; |
| else |
| { |
| /* Otherwise, inlinable functions always can change signature. */ |
| if (info->inlinable) |
| node->local.can_change_signature = true; |
| else |
| { |
| /* Functions calling builtin_apply can not change signature. */ |
| for (e = node->callees; e; e = e->next_callee) |
| { |
| tree cdecl = e->callee->decl; |
| if (DECL_BUILT_IN (cdecl) |
| && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL |
| && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS |
| || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START)) |
| break; |
| } |
| node->local.can_change_signature = !e; |
| } |
| } |
| estimate_function_body_sizes (node, early); |
| |
| for (e = node->callees; e; e = e->next_callee) |
| if (e->callee->comdat_local_p ()) |
| break; |
| node->calls_comdat_local = (e != NULL); |
| |
| /* Inlining characteristics are maintained by the cgraph_mark_inline. */ |
| info->time = info->self_time; |
| info->size = info->self_size; |
| info->stack_frame_offset = 0; |
| info->estimated_stack_size = info->estimated_self_stack_size; |
| #ifdef ENABLE_CHECKING |
| inline_update_overall_summary (node); |
| gcc_assert (info->time == info->self_time && info->size == info->self_size); |
| #endif |
| |
| pop_cfun (); |
| } |
| |
| |
| /* Compute parameters of functions used by inliner using |
| current_function_decl. */ |
| |
| static unsigned int |
| compute_inline_parameters_for_current (void) |
| { |
| compute_inline_parameters (cgraph_node::get (current_function_decl), true); |
| return 0; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_inline_parameters = |
| { |
| GIMPLE_PASS, /* type */ |
| "inline_param", /* name */ |
| OPTGROUP_INLINE, /* optinfo_flags */ |
| TV_INLINE_PARAMETERS, /* tv_id */ |
| 0, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_inline_parameters : public gimple_opt_pass |
| { |
| public: |
| pass_inline_parameters (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_inline_parameters, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| opt_pass * clone () { return new pass_inline_parameters (m_ctxt); } |
| virtual unsigned int execute (function *) |
| { |
| return compute_inline_parameters_for_current (); |
| } |
| |
| }; // class pass_inline_parameters |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_inline_parameters (gcc::context *ctxt) |
| { |
| return new pass_inline_parameters (ctxt); |
| } |
| |
| |
| /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS, |
| KNOWN_CONTEXTS and KNOWN_AGGS. */ |
| |
| static bool |
| estimate_edge_devirt_benefit (struct cgraph_edge *ie, |
| int *size, int *time, |
| vec<tree> known_vals, |
| vec<ipa_polymorphic_call_context> known_contexts, |
| vec<ipa_agg_jump_function_p> known_aggs) |
| { |
| tree target; |
| struct cgraph_node *callee; |
| struct inline_summary *isummary; |
| enum availability avail; |
| bool speculative; |
| |
| if (!known_vals.exists () && !known_contexts.exists ()) |
| return false; |
| if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining)) |
| return false; |
| |
| target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts, |
| known_aggs, &speculative); |
| if (!target || speculative) |
| return false; |
| |
| /* Account for difference in cost between indirect and direct calls. */ |
| *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); |
| *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); |
| gcc_checking_assert (*time >= 0); |
| gcc_checking_assert (*size >= 0); |
| |
| callee = cgraph_node::get (target); |
| if (!callee || !callee->definition) |
| return false; |
| callee = callee->function_symbol (&avail); |
| if |