| /* Control flow functions for trees. |
| Copyright (C) 2001-2020 Free Software Foundation, Inc. |
| Contributed by Diego Novillo <dnovillo@redhat.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "cgraph.h" |
| #include "gimple-pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "trans-mem.h" |
| #include "stor-layout.h" |
| #include "print-tree.h" |
| #include "cfganal.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-into-ssa.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "except.h" |
| #include "cfgloop.h" |
| #include "tree-ssa-propagate.h" |
| #include "value-prof.h" |
| #include "tree-inline.h" |
| #include "tree-ssa-live.h" |
| #include "omp-general.h" |
| #include "omp-expand.h" |
| #include "tree-cfgcleanup.h" |
| #include "gimplify.h" |
| #include "attribs.h" |
| #include "selftest.h" |
| #include "opts.h" |
| #include "asan.h" |
| #include "profile.h" |
| |
| /* This file contains functions for building the Control Flow Graph (CFG) |
| for a function tree. */ |
| |
| /* Local declarations. */ |
| |
| /* Initial capacity for the basic block array. */ |
| static const int initial_cfg_capacity = 20; |
| |
| /* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs |
| which use a particular edge. The CASE_LABEL_EXPRs are chained together |
| via their CASE_CHAIN field, which we clear after we're done with the |
| hash table to prevent problems with duplication of GIMPLE_SWITCHes. |
| |
| Access to this list of CASE_LABEL_EXPRs allows us to efficiently |
| update the case vector in response to edge redirections. |
| |
| Right now this table is set up and torn down at key points in the |
| compilation process. It would be nice if we could make the table |
| more persistent. The key is getting notification of changes to |
| the CFG (particularly edge removal, creation and redirection). */ |
| |
| static hash_map<edge, tree> *edge_to_cases; |
| |
| /* If we record edge_to_cases, this bitmap will hold indexes |
| of basic blocks that end in a GIMPLE_SWITCH which we touched |
| due to edge manipulations. */ |
| |
| static bitmap touched_switch_bbs; |
| |
| /* CFG statistics. */ |
| struct cfg_stats_d |
| { |
| long num_merged_labels; |
| }; |
| |
| static struct cfg_stats_d cfg_stats; |
| |
| /* Data to pass to replace_block_vars_by_duplicates_1. */ |
| struct replace_decls_d |
| { |
| hash_map<tree, tree> *vars_map; |
| tree to_context; |
| }; |
| |
| /* Hash table to store last discriminator assigned for each locus. */ |
| struct locus_discrim_map |
| { |
| int location_line; |
| int discriminator; |
| }; |
| |
| /* Hashtable helpers. */ |
| |
| struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map> |
| { |
| static inline hashval_t hash (const locus_discrim_map *); |
| static inline bool equal (const locus_discrim_map *, |
| const locus_discrim_map *); |
| }; |
| |
| /* Trivial hash function for a location_t. ITEM is a pointer to |
| a hash table entry that maps a location_t to a discriminator. */ |
| |
| inline hashval_t |
| locus_discrim_hasher::hash (const locus_discrim_map *item) |
| { |
| return item->location_line; |
| } |
| |
| /* Equality function for the locus-to-discriminator map. A and B |
| point to the two hash table entries to compare. */ |
| |
| inline bool |
| locus_discrim_hasher::equal (const locus_discrim_map *a, |
| const locus_discrim_map *b) |
| { |
| return a->location_line == b->location_line; |
| } |
| |
| static hash_table<locus_discrim_hasher> *discriminator_per_locus; |
| |
| /* Basic blocks and flowgraphs. */ |
| static void make_blocks (gimple_seq); |
| |
| /* Edges. */ |
| static void make_edges (void); |
| static void assign_discriminators (void); |
| static void make_cond_expr_edges (basic_block); |
| static void make_gimple_switch_edges (gswitch *, basic_block); |
| static bool make_goto_expr_edges (basic_block); |
| static void make_gimple_asm_edges (basic_block); |
| static edge gimple_redirect_edge_and_branch (edge, basic_block); |
| static edge gimple_try_redirect_by_replacing_jump (edge, basic_block); |
| |
| /* Various helpers. */ |
| static inline bool stmt_starts_bb_p (gimple *, gimple *); |
| static int gimple_verify_flow_info (void); |
| static void gimple_make_forwarder_block (edge); |
| static gimple *first_non_label_stmt (basic_block); |
| static bool verify_gimple_transaction (gtransaction *); |
| static bool call_can_make_abnormal_goto (gimple *); |
| |
| /* Flowgraph optimization and cleanup. */ |
| static void gimple_merge_blocks (basic_block, basic_block); |
| static bool gimple_can_merge_blocks_p (basic_block, basic_block); |
| static void remove_bb (basic_block); |
| static edge find_taken_edge_computed_goto (basic_block, tree); |
| static edge find_taken_edge_cond_expr (const gcond *, tree); |
| |
| void |
| init_empty_tree_cfg_for_function (struct function *fn) |
| { |
| /* Initialize the basic block array. */ |
| init_flow (fn); |
| profile_status_for_fn (fn) = PROFILE_ABSENT; |
| n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS; |
| last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS; |
| vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity); |
| vec_safe_grow_cleared (basic_block_info_for_fn (fn), |
| initial_cfg_capacity); |
| |
| /* Build a mapping of labels to their associated blocks. */ |
| vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity); |
| vec_safe_grow_cleared (label_to_block_map_for_fn (fn), |
| initial_cfg_capacity); |
| |
| SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn)); |
| SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn)); |
| |
| ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb |
| = EXIT_BLOCK_PTR_FOR_FN (fn); |
| EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb |
| = ENTRY_BLOCK_PTR_FOR_FN (fn); |
| } |
| |
| void |
| init_empty_tree_cfg (void) |
| { |
| init_empty_tree_cfg_for_function (cfun); |
| } |
| |
| /*--------------------------------------------------------------------------- |
| Create basic blocks |
| ---------------------------------------------------------------------------*/ |
| |
| /* Entry point to the CFG builder for trees. SEQ is the sequence of |
| statements to be added to the flowgraph. */ |
| |
| static void |
| build_gimple_cfg (gimple_seq seq) |
| { |
| /* Register specific gimple functions. */ |
| gimple_register_cfg_hooks (); |
| |
| memset ((void *) &cfg_stats, 0, sizeof (cfg_stats)); |
| |
| init_empty_tree_cfg (); |
| |
| make_blocks (seq); |
| |
| /* Make sure there is always at least one block, even if it's empty. */ |
| if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
| create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
| |
| /* Adjust the size of the array. */ |
| if (basic_block_info_for_fn (cfun)->length () |
| < (size_t) n_basic_blocks_for_fn (cfun)) |
| vec_safe_grow_cleared (basic_block_info_for_fn (cfun), |
| n_basic_blocks_for_fn (cfun)); |
| |
| /* To speed up statement iterator walks, we first purge dead labels. */ |
| cleanup_dead_labels (); |
| |
| /* Group case nodes to reduce the number of edges. |
| We do this after cleaning up dead labels because otherwise we miss |
| a lot of obvious case merging opportunities. */ |
| group_case_labels (); |
| |
| /* Create the edges of the flowgraph. */ |
| discriminator_per_locus = new hash_table<locus_discrim_hasher> (13); |
| make_edges (); |
| assign_discriminators (); |
| cleanup_dead_labels (); |
| delete discriminator_per_locus; |
| discriminator_per_locus = NULL; |
| } |
| |
| /* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove |
| them and propagate the information to LOOP. We assume that the annotations |
| come immediately before the condition in BB, if any. */ |
| |
| static void |
| replace_loop_annotate_in_block (basic_block bb, class loop *loop) |
| { |
| gimple_stmt_iterator gsi = gsi_last_bb (bb); |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (!(stmt && gimple_code (stmt) == GIMPLE_COND)) |
| return; |
| |
| for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) != GIMPLE_CALL) |
| break; |
| if (!gimple_call_internal_p (stmt) |
| || gimple_call_internal_fn (stmt) != IFN_ANNOTATE) |
| break; |
| |
| switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))) |
| { |
| case annot_expr_ivdep_kind: |
| loop->safelen = INT_MAX; |
| break; |
| case annot_expr_unroll_kind: |
| loop->unroll |
| = (unsigned short) tree_to_shwi (gimple_call_arg (stmt, 2)); |
| cfun->has_unroll = true; |
| break; |
| case annot_expr_no_vector_kind: |
| loop->dont_vectorize = true; |
| break; |
| case annot_expr_vector_kind: |
| loop->force_vectorize = true; |
| cfun->has_force_vectorize_loops = true; |
| break; |
| case annot_expr_parallel_kind: |
| loop->can_be_parallel = true; |
| loop->safelen = INT_MAX; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| stmt = gimple_build_assign (gimple_call_lhs (stmt), |
| gimple_call_arg (stmt, 0)); |
| gsi_replace (&gsi, stmt, true); |
| } |
| } |
| |
| /* Look for ANNOTATE calls with loop annotation kind; if found, remove |
| them and propagate the information to the loop. We assume that the |
| annotations come immediately before the condition of the loop. */ |
| |
| static void |
| replace_loop_annotate (void) |
| { |
| class loop *loop; |
| basic_block bb; |
| gimple_stmt_iterator gsi; |
| gimple *stmt; |
| |
| FOR_EACH_LOOP (loop, 0) |
| { |
| /* First look into the header. */ |
| replace_loop_annotate_in_block (loop->header, loop); |
| |
| /* Then look into the latch, if any. */ |
| if (loop->latch) |
| replace_loop_annotate_in_block (loop->latch, loop); |
| |
| /* Push the global flag_finite_loops state down to individual loops. */ |
| loop->finite_p = flag_finite_loops; |
| } |
| |
| /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */ |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) != GIMPLE_CALL) |
| continue; |
| if (!gimple_call_internal_p (stmt) |
| || gimple_call_internal_fn (stmt) != IFN_ANNOTATE) |
| continue; |
| |
| switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1))) |
| { |
| case annot_expr_ivdep_kind: |
| case annot_expr_unroll_kind: |
| case annot_expr_no_vector_kind: |
| case annot_expr_vector_kind: |
| case annot_expr_parallel_kind: |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| warning_at (gimple_location (stmt), 0, "ignoring loop annotation"); |
| stmt = gimple_build_assign (gimple_call_lhs (stmt), |
| gimple_call_arg (stmt, 0)); |
| gsi_replace (&gsi, stmt, true); |
| } |
| } |
| } |
| |
| static unsigned int |
| execute_build_cfg (void) |
| { |
| gimple_seq body = gimple_body (current_function_decl); |
| |
| build_gimple_cfg (body); |
| gimple_set_body (current_function_decl, NULL); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Scope blocks:\n"); |
| dump_scope_blocks (dump_file, dump_flags); |
| } |
| cleanup_tree_cfg (); |
| loop_optimizer_init (AVOID_CFG_MODIFICATIONS); |
| replace_loop_annotate (); |
| return 0; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_build_cfg = |
| { |
| GIMPLE_PASS, /* type */ |
| "cfg", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_CFG, /* tv_id */ |
| PROP_gimple_leh, /* properties_required */ |
| ( PROP_cfg | PROP_loops ), /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_build_cfg : public gimple_opt_pass |
| { |
| public: |
| pass_build_cfg (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_build_cfg, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| virtual unsigned int execute (function *) { return execute_build_cfg (); } |
| |
| }; // class pass_build_cfg |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_build_cfg (gcc::context *ctxt) |
| { |
| return new pass_build_cfg (ctxt); |
| } |
| |
| |
| /* Return true if T is a computed goto. */ |
| |
| bool |
| computed_goto_p (gimple *t) |
| { |
| return (gimple_code (t) == GIMPLE_GOTO |
| && TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL); |
| } |
| |
| /* Returns true if the sequence of statements STMTS only contains |
| a call to __builtin_unreachable (). */ |
| |
| bool |
| gimple_seq_unreachable_p (gimple_seq stmts) |
| { |
| if (stmts == NULL |
| /* Return false if -fsanitize=unreachable, we don't want to |
| optimize away those calls, but rather turn them into |
| __ubsan_handle_builtin_unreachable () or __builtin_trap () |
| later. */ |
| || sanitize_flags_p (SANITIZE_UNREACHABLE)) |
| return false; |
| |
| gimple_stmt_iterator gsi = gsi_last (stmts); |
| |
| if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_UNREACHABLE)) |
| return false; |
| |
| for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) != GIMPLE_LABEL |
| && !is_gimple_debug (stmt) |
| && !gimple_clobber_p (stmt)) |
| return false; |
| } |
| return true; |
| } |
| |
| /* Returns true for edge E where e->src ends with a GIMPLE_COND and |
| the other edge points to a bb with just __builtin_unreachable (). |
| I.e. return true for C->M edge in: |
| <bb C>: |
| ... |
| if (something) |
| goto <bb N>; |
| else |
| goto <bb M>; |
| <bb N>: |
| __builtin_unreachable (); |
| <bb M>: */ |
| |
| bool |
| assert_unreachable_fallthru_edge_p (edge e) |
| { |
| basic_block pred_bb = e->src; |
| gimple *last = last_stmt (pred_bb); |
| if (last && gimple_code (last) == GIMPLE_COND) |
| { |
| basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest; |
| if (other_bb == e->dest) |
| other_bb = EDGE_SUCC (pred_bb, 1)->dest; |
| if (EDGE_COUNT (other_bb->succs) == 0) |
| return gimple_seq_unreachable_p (bb_seq (other_bb)); |
| } |
| return false; |
| } |
| |
| |
| /* Initialize GF_CALL_CTRL_ALTERING flag, which indicates the call |
| could alter control flow except via eh. We initialize the flag at |
| CFG build time and only ever clear it later. */ |
| |
| static void |
| gimple_call_initialize_ctrl_altering (gimple *stmt) |
| { |
| int flags = gimple_call_flags (stmt); |
| |
| /* A call alters control flow if it can make an abnormal goto. */ |
| if (call_can_make_abnormal_goto (stmt) |
| /* A call also alters control flow if it does not return. */ |
| || flags & ECF_NORETURN |
| /* TM ending statements have backedges out of the transaction. |
| Return true so we split the basic block containing them. |
| Note that the TM_BUILTIN test is merely an optimization. */ |
| || ((flags & ECF_TM_BUILTIN) |
| && is_tm_ending_fndecl (gimple_call_fndecl (stmt))) |
| /* BUILT_IN_RETURN call is same as return statement. */ |
| || gimple_call_builtin_p (stmt, BUILT_IN_RETURN) |
| /* IFN_UNIQUE should be the last insn, to make checking for it |
| as cheap as possible. */ |
| || (gimple_call_internal_p (stmt) |
| && gimple_call_internal_unique_p (stmt))) |
| gimple_call_set_ctrl_altering (stmt, true); |
| else |
| gimple_call_set_ctrl_altering (stmt, false); |
| } |
| |
| |
| /* Insert SEQ after BB and build a flowgraph. */ |
| |
| static basic_block |
| make_blocks_1 (gimple_seq seq, basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_start (seq); |
| gimple *stmt = NULL; |
| gimple *prev_stmt = NULL; |
| bool start_new_block = true; |
| bool first_stmt_of_seq = true; |
| |
| while (!gsi_end_p (i)) |
| { |
| /* PREV_STMT should only be set to a debug stmt if the debug |
| stmt is before nondebug stmts. Once stmt reaches a nondebug |
| nonlabel, prev_stmt will be set to it, so that |
| stmt_starts_bb_p will know to start a new block if a label is |
| found. However, if stmt was a label after debug stmts only, |
| keep the label in prev_stmt even if we find further debug |
| stmts, for there may be other labels after them, and they |
| should land in the same block. */ |
| if (!prev_stmt || !stmt || !is_gimple_debug (stmt)) |
| prev_stmt = stmt; |
| stmt = gsi_stmt (i); |
| |
| if (stmt && is_gimple_call (stmt)) |
| gimple_call_initialize_ctrl_altering (stmt); |
| |
| /* If the statement starts a new basic block or if we have determined |
| in a previous pass that we need to create a new block for STMT, do |
| so now. */ |
| if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt)) |
| { |
| if (!first_stmt_of_seq) |
| gsi_split_seq_before (&i, &seq); |
| bb = create_basic_block (seq, bb); |
| start_new_block = false; |
| prev_stmt = NULL; |
| } |
| |
| /* Now add STMT to BB and create the subgraphs for special statement |
| codes. */ |
| gimple_set_bb (stmt, bb); |
| |
| /* If STMT is a basic block terminator, set START_NEW_BLOCK for the |
| next iteration. */ |
| if (stmt_ends_bb_p (stmt)) |
| { |
| /* If the stmt can make abnormal goto use a new temporary |
| for the assignment to the LHS. This makes sure the old value |
| of the LHS is available on the abnormal edge. Otherwise |
| we will end up with overlapping life-ranges for abnormal |
| SSA names. */ |
| if (gimple_has_lhs (stmt) |
| && stmt_can_make_abnormal_goto (stmt) |
| && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt)))) |
| { |
| tree lhs = gimple_get_lhs (stmt); |
| tree tmp = create_tmp_var (TREE_TYPE (lhs)); |
| gimple *s = gimple_build_assign (lhs, tmp); |
| gimple_set_location (s, gimple_location (stmt)); |
| gimple_set_block (s, gimple_block (stmt)); |
| gimple_set_lhs (stmt, tmp); |
| if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE |
| || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE) |
| DECL_GIMPLE_REG_P (tmp) = 1; |
| gsi_insert_after (&i, s, GSI_SAME_STMT); |
| } |
| start_new_block = true; |
| } |
| |
| gsi_next (&i); |
| first_stmt_of_seq = false; |
| } |
| return bb; |
| } |
| |
| /* Build a flowgraph for the sequence of stmts SEQ. */ |
| |
| static void |
| make_blocks (gimple_seq seq) |
| { |
| /* Look for debug markers right before labels, and move the debug |
| stmts after the labels. Accepting labels among debug markers |
| adds no value, just complexity; if we wanted to annotate labels |
| with view numbers (so sequencing among markers would matter) or |
| somesuch, we're probably better off still moving the labels, but |
| adding other debug annotations in their original positions or |
| emitting nonbind or bind markers associated with the labels in |
| the original position of the labels. |
| |
| Moving labels would probably be simpler, but we can't do that: |
| moving labels assigns label ids to them, and doing so because of |
| debug markers makes for -fcompare-debug and possibly even codegen |
| differences. So, we have to move the debug stmts instead. To |
| that end, we scan SEQ backwards, marking the position of the |
| latest (earliest we find) label, and moving debug stmts that are |
| not separated from it by nondebug nonlabel stmts after the |
| label. */ |
| if (MAY_HAVE_DEBUG_MARKER_STMTS) |
| { |
| gimple_stmt_iterator label = gsi_none (); |
| |
| for (gimple_stmt_iterator i = gsi_last (seq); !gsi_end_p (i); gsi_prev (&i)) |
| { |
| gimple *stmt = gsi_stmt (i); |
| |
| /* If this is the first label we encounter (latest in SEQ) |
| before nondebug stmts, record its position. */ |
| if (is_a <glabel *> (stmt)) |
| { |
| if (gsi_end_p (label)) |
| label = i; |
| continue; |
| } |
| |
| /* Without a recorded label position to move debug stmts to, |
| there's nothing to do. */ |
| if (gsi_end_p (label)) |
| continue; |
| |
| /* Move the debug stmt at I after LABEL. */ |
| if (is_gimple_debug (stmt)) |
| { |
| gcc_assert (gimple_debug_nonbind_marker_p (stmt)); |
| /* As STMT is removed, I advances to the stmt after |
| STMT, so the gsi_prev in the for "increment" |
| expression gets us to the stmt we're to visit after |
| STMT. LABEL, however, would advance to the moved |
| stmt if we passed it to gsi_move_after, so pass it a |
| copy instead, so as to keep LABEL pointing to the |
| LABEL. */ |
| gimple_stmt_iterator copy = label; |
| gsi_move_after (&i, ©); |
| continue; |
| } |
| |
| /* There aren't any (more?) debug stmts before label, so |
| there isn't anything else to move after it. */ |
| label = gsi_none (); |
| } |
| } |
| |
| make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
| } |
| |
| /* Create and return a new empty basic block after bb AFTER. */ |
| |
| static basic_block |
| create_bb (void *h, void *e, basic_block after) |
| { |
| basic_block bb; |
| |
| gcc_assert (!e); |
| |
| /* Create and initialize a new basic block. Since alloc_block uses |
| GC allocation that clears memory to allocate a basic block, we do |
| not have to clear the newly allocated basic block here. */ |
| bb = alloc_block (); |
| |
| bb->index = last_basic_block_for_fn (cfun); |
| bb->flags = BB_NEW; |
| set_bb_seq (bb, h ? (gimple_seq) h : NULL); |
| |
| /* Add the new block to the linked list of blocks. */ |
| link_block (bb, after); |
| |
| /* Grow the basic block array if needed. */ |
| if ((size_t) last_basic_block_for_fn (cfun) |
| == basic_block_info_for_fn (cfun)->length ()) |
| { |
| size_t new_size = |
| (last_basic_block_for_fn (cfun) |
| + (last_basic_block_for_fn (cfun) + 3) / 4); |
| vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size); |
| } |
| |
| /* Add the newly created block to the array. */ |
| SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb); |
| |
| n_basic_blocks_for_fn (cfun)++; |
| last_basic_block_for_fn (cfun)++; |
| |
| return bb; |
| } |
| |
| |
| /*--------------------------------------------------------------------------- |
| Edge creation |
| ---------------------------------------------------------------------------*/ |
| |
| /* If basic block BB has an abnormal edge to a basic block |
| containing IFN_ABNORMAL_DISPATCHER internal call, return |
| that the dispatcher's basic block, otherwise return NULL. */ |
| |
| basic_block |
| get_abnormal_succ_dispatcher (basic_block bb) |
| { |
| edge e; |
| edge_iterator ei; |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL) |
| { |
| gimple_stmt_iterator gsi |
| = gsi_start_nondebug_after_labels_bb (e->dest); |
| gimple *g = gsi_stmt (gsi); |
| if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER)) |
| return e->dest; |
| } |
| return NULL; |
| } |
| |
| /* Helper function for make_edges. Create a basic block with |
| with ABNORMAL_DISPATCHER internal call in it if needed, and |
| create abnormal edges from BBS to it and from it to FOR_BB |
| if COMPUTED_GOTO is false, otherwise factor the computed gotos. */ |
| |
| static void |
| handle_abnormal_edges (basic_block *dispatcher_bbs, |
| basic_block for_bb, int *bb_to_omp_idx, |
| auto_vec<basic_block> *bbs, bool computed_goto) |
| { |
| basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0); |
| unsigned int idx = 0; |
| basic_block bb; |
| bool inner = false; |
| |
| if (bb_to_omp_idx) |
| { |
| dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index]; |
| if (bb_to_omp_idx[for_bb->index] != 0) |
| inner = true; |
| } |
| |
| /* If the dispatcher has been created already, then there are basic |
| blocks with abnormal edges to it, so just make a new edge to |
| for_bb. */ |
| if (*dispatcher == NULL) |
| { |
| /* Check if there are any basic blocks that need to have |
| abnormal edges to this dispatcher. If there are none, return |
| early. */ |
| if (bb_to_omp_idx == NULL) |
| { |
| if (bbs->is_empty ()) |
| return; |
| } |
| else |
| { |
| FOR_EACH_VEC_ELT (*bbs, idx, bb) |
| if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index]) |
| break; |
| if (bb == NULL) |
| return; |
| } |
| |
| /* Create the dispatcher bb. */ |
| *dispatcher = create_basic_block (NULL, for_bb); |
| if (computed_goto) |
| { |
| /* Factor computed gotos into a common computed goto site. Also |
| record the location of that site so that we can un-factor the |
| gotos after we have converted back to normal form. */ |
| gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher); |
| |
| /* Create the destination of the factored goto. Each original |
| computed goto will put its desired destination into this |
| variable and jump to the label we create immediately below. */ |
| tree var = create_tmp_var (ptr_type_node, "gotovar"); |
| |
| /* Build a label for the new block which will contain the |
| factored computed goto. */ |
| tree factored_label_decl |
| = create_artificial_label (UNKNOWN_LOCATION); |
| gimple *factored_computed_goto_label |
| = gimple_build_label (factored_label_decl); |
| gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT); |
| |
| /* Build our new computed goto. */ |
| gimple *factored_computed_goto = gimple_build_goto (var); |
| gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT); |
| |
| FOR_EACH_VEC_ELT (*bbs, idx, bb) |
| { |
| if (bb_to_omp_idx |
| && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index]) |
| continue; |
| |
| gsi = gsi_last_bb (bb); |
| gimple *last = gsi_stmt (gsi); |
| |
| gcc_assert (computed_goto_p (last)); |
| |
| /* Copy the original computed goto's destination into VAR. */ |
| gimple *assignment |
| = gimple_build_assign (var, gimple_goto_dest (last)); |
| gsi_insert_before (&gsi, assignment, GSI_SAME_STMT); |
| |
| edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU); |
| e->goto_locus = gimple_location (last); |
| gsi_remove (&gsi, true); |
| } |
| } |
| else |
| { |
| tree arg = inner ? boolean_true_node : boolean_false_node; |
| gimple *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER, |
| 1, arg); |
| gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher); |
| gsi_insert_after (&gsi, g, GSI_NEW_STMT); |
| |
| /* Create predecessor edges of the dispatcher. */ |
| FOR_EACH_VEC_ELT (*bbs, idx, bb) |
| { |
| if (bb_to_omp_idx |
| && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index]) |
| continue; |
| make_edge (bb, *dispatcher, EDGE_ABNORMAL); |
| } |
| } |
| } |
| |
| make_edge (*dispatcher, for_bb, EDGE_ABNORMAL); |
| } |
| |
| /* Creates outgoing edges for BB. Returns 1 when it ends with an |
| computed goto, returns 2 when it ends with a statement that |
| might return to this function via an nonlocal goto, otherwise |
| return 0. Updates *PCUR_REGION with the OMP region this BB is in. */ |
| |
| static int |
| make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index) |
| { |
| gimple *last = last_stmt (bb); |
| bool fallthru = false; |
| int ret = 0; |
| |
| if (!last) |
| return ret; |
| |
| switch (gimple_code (last)) |
| { |
| case GIMPLE_GOTO: |
| if (make_goto_expr_edges (bb)) |
| ret = 1; |
| fallthru = false; |
| break; |
| case GIMPLE_RETURN: |
| { |
| edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0); |
| e->goto_locus = gimple_location (last); |
| fallthru = false; |
| } |
| break; |
| case GIMPLE_COND: |
| make_cond_expr_edges (bb); |
| fallthru = false; |
| break; |
| case GIMPLE_SWITCH: |
| make_gimple_switch_edges (as_a <gswitch *> (last), bb); |
| fallthru = false; |
| break; |
| case GIMPLE_RESX: |
| make_eh_edges (last); |
| fallthru = false; |
| break; |
| case GIMPLE_EH_DISPATCH: |
| fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (last)); |
| break; |
| |
| case GIMPLE_CALL: |
| /* If this function receives a nonlocal goto, then we need to |
| make edges from this call site to all the nonlocal goto |
| handlers. */ |
| if (stmt_can_make_abnormal_goto (last)) |
| ret = 2; |
| |
| /* If this statement has reachable exception handlers, then |
| create abnormal edges to them. */ |
| make_eh_edges (last); |
| |
| /* BUILTIN_RETURN is really a return statement. */ |
| if (gimple_call_builtin_p (last, BUILT_IN_RETURN)) |
| { |
| make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0); |
| fallthru = false; |
| } |
| /* Some calls are known not to return. */ |
| else |
| fallthru = !gimple_call_noreturn_p (last); |
| break; |
| |
| case GIMPLE_ASSIGN: |
| /* A GIMPLE_ASSIGN may throw internally and thus be considered |
| control-altering. */ |
| if (is_ctrl_altering_stmt (last)) |
| make_eh_edges (last); |
| fallthru = true; |
| break; |
| |
| case GIMPLE_ASM: |
| make_gimple_asm_edges (bb); |
| fallthru = true; |
| break; |
| |
| CASE_GIMPLE_OMP: |
| fallthru = omp_make_gimple_edges (bb, pcur_region, pomp_index); |
| break; |
| |
| case GIMPLE_TRANSACTION: |
| { |
| gtransaction *txn = as_a <gtransaction *> (last); |
| tree label1 = gimple_transaction_label_norm (txn); |
| tree label2 = gimple_transaction_label_uninst (txn); |
| |
| if (label1) |
| make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU); |
| if (label2) |
| make_edge (bb, label_to_block (cfun, label2), |
| EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU)); |
| |
| tree label3 = gimple_transaction_label_over (txn); |
| if (gimple_transaction_subcode (txn) |
| & (GTMA_HAVE_ABORT | GTMA_IS_OUTER)) |
| make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT); |
| |
| fallthru = false; |
| } |
| break; |
| |
| default: |
| gcc_assert (!stmt_ends_bb_p (last)); |
| fallthru = true; |
| break; |
| } |
| |
| if (fallthru) |
| make_edge (bb, bb->next_bb, EDGE_FALLTHRU); |
| |
| return ret; |
| } |
| |
| /* Join all the blocks in the flowgraph. */ |
| |
| static void |
| make_edges (void) |
| { |
| basic_block bb; |
| struct omp_region *cur_region = NULL; |
| auto_vec<basic_block> ab_edge_goto; |
| auto_vec<basic_block> ab_edge_call; |
| int *bb_to_omp_idx = NULL; |
| int cur_omp_region_idx = 0; |
| |
| /* Create an edge from entry to the first block with executable |
| statements in it. */ |
| make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), |
| BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS), |
| EDGE_FALLTHRU); |
| |
| /* Traverse the basic block array placing edges. */ |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| int mer; |
| |
| if (bb_to_omp_idx) |
| bb_to_omp_idx[bb->index] = cur_omp_region_idx; |
| |
| mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx); |
| if (mer == 1) |
| ab_edge_goto.safe_push (bb); |
| else if (mer == 2) |
| ab_edge_call.safe_push (bb); |
| |
| if (cur_region && bb_to_omp_idx == NULL) |
| bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
| } |
| |
| /* Computed gotos are hell to deal with, especially if there are |
| lots of them with a large number of destinations. So we factor |
| them to a common computed goto location before we build the |
| edge list. After we convert back to normal form, we will un-factor |
| the computed gotos since factoring introduces an unwanted jump. |
| For non-local gotos and abnormal edges from calls to calls that return |
| twice or forced labels, factor the abnormal edges too, by having all |
| abnormal edges from the calls go to a common artificial basic block |
| with ABNORMAL_DISPATCHER internal call and abnormal edges from that |
| basic block to all forced labels and calls returning twice. |
| We do this per-OpenMP structured block, because those regions |
| are guaranteed to be single entry single exit by the standard, |
| so it is not allowed to enter or exit such regions abnormally this way, |
| thus all computed gotos, non-local gotos and setjmp/longjmp calls |
| must not transfer control across SESE region boundaries. */ |
| if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ()) |
| { |
| gimple_stmt_iterator gsi; |
| basic_block dispatcher_bb_array[2] = { NULL, NULL }; |
| basic_block *dispatcher_bbs = dispatcher_bb_array; |
| int count = n_basic_blocks_for_fn (cfun); |
| |
| if (bb_to_omp_idx) |
| dispatcher_bbs = XCNEWVEC (basic_block, 2 * count); |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)); |
| tree target; |
| |
| if (!label_stmt) |
| break; |
| |
| target = gimple_label_label (label_stmt); |
| |
| /* Make an edge to every label block that has been marked as a |
| potential target for a computed goto or a non-local goto. */ |
| if (FORCED_LABEL (target)) |
| handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx, |
| &ab_edge_goto, true); |
| if (DECL_NONLOCAL (target)) |
| { |
| handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx, |
| &ab_edge_call, false); |
| break; |
| } |
| } |
| |
| if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) |
| gsi_next_nondebug (&gsi); |
| if (!gsi_end_p (gsi)) |
| { |
| /* Make an edge to every setjmp-like call. */ |
| gimple *call_stmt = gsi_stmt (gsi); |
| if (is_gimple_call (call_stmt) |
| && ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE) |
| || gimple_call_builtin_p (call_stmt, |
| BUILT_IN_SETJMP_RECEIVER))) |
| handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx, |
| &ab_edge_call, false); |
| } |
| } |
| |
| if (bb_to_omp_idx) |
| XDELETE (dispatcher_bbs); |
| } |
| |
| XDELETE (bb_to_omp_idx); |
| |
| omp_free_regions (); |
| } |
| |
| /* Add SEQ after GSI. Start new bb after GSI, and created further bbs as |
| needed. Returns true if new bbs were created. |
| Note: This is transitional code, and should not be used for new code. We |
| should be able to get rid of this by rewriting all target va-arg |
| gimplification hooks to use an interface gimple_build_cond_value as described |
| in https://gcc.gnu.org/ml/gcc-patches/2015-02/msg01194.html. */ |
| |
| bool |
| gimple_find_sub_bbs (gimple_seq seq, gimple_stmt_iterator *gsi) |
| { |
| gimple *stmt = gsi_stmt (*gsi); |
| basic_block bb = gimple_bb (stmt); |
| basic_block lastbb, afterbb; |
| int old_num_bbs = n_basic_blocks_for_fn (cfun); |
| edge e; |
| lastbb = make_blocks_1 (seq, bb); |
| if (old_num_bbs == n_basic_blocks_for_fn (cfun)) |
| return false; |
| e = split_block (bb, stmt); |
| /* Move e->dest to come after the new basic blocks. */ |
| afterbb = e->dest; |
| unlink_block (afterbb); |
| link_block (afterbb, lastbb); |
| redirect_edge_succ (e, bb->next_bb); |
| bb = bb->next_bb; |
| while (bb != afterbb) |
| { |
| struct omp_region *cur_region = NULL; |
| profile_count cnt = profile_count::zero (); |
| bool all = true; |
| |
| int cur_omp_region_idx = 0; |
| int mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx); |
| gcc_assert (!mer && !cur_region); |
| add_bb_to_loop (bb, afterbb->loop_father); |
| |
| edge e; |
| edge_iterator ei; |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| { |
| if (e->count ().initialized_p ()) |
| cnt += e->count (); |
| else |
| all = false; |
| } |
| tree_guess_outgoing_edge_probabilities (bb); |
| if (all || profile_status_for_fn (cfun) == PROFILE_READ) |
| bb->count = cnt; |
| |
| bb = bb->next_bb; |
| } |
| return true; |
| } |
| |
| /* Find the next available discriminator value for LOCUS. The |
| discriminator distinguishes among several basic blocks that |
| share a common locus, allowing for more accurate sample-based |
| profiling. */ |
| |
| static int |
| next_discriminator_for_locus (int line) |
| { |
| struct locus_discrim_map item; |
| struct locus_discrim_map **slot; |
| |
| item.location_line = line; |
| item.discriminator = 0; |
| slot = discriminator_per_locus->find_slot_with_hash (&item, line, INSERT); |
| gcc_assert (slot); |
| if (*slot == HTAB_EMPTY_ENTRY) |
| { |
| *slot = XNEW (struct locus_discrim_map); |
| gcc_assert (*slot); |
| (*slot)->location_line = line; |
| (*slot)->discriminator = 0; |
| } |
| (*slot)->discriminator++; |
| return (*slot)->discriminator; |
| } |
| |
| /* Return TRUE if LOCUS1 and LOCUS2 refer to the same source line. */ |
| |
| static bool |
| same_line_p (location_t locus1, expanded_location *from, location_t locus2) |
| { |
| expanded_location to; |
| |
| if (locus1 == locus2) |
| return true; |
| |
| to = expand_location (locus2); |
| |
| if (from->line != to.line) |
| return false; |
| if (from->file == to.file) |
| return true; |
| return (from->file != NULL |
| && to.file != NULL |
| && filename_cmp (from->file, to.file) == 0); |
| } |
| |
| /* Assign discriminators to each basic block. */ |
| |
| static void |
| assign_discriminators (void) |
| { |
| basic_block bb; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| edge e; |
| edge_iterator ei; |
| gimple *last = last_stmt (bb); |
| location_t locus = last ? gimple_location (last) : UNKNOWN_LOCATION; |
| |
| if (locus == UNKNOWN_LOCATION) |
| continue; |
| |
| expanded_location locus_e = expand_location (locus); |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| gimple *first = first_non_label_stmt (e->dest); |
| gimple *last = last_stmt (e->dest); |
| if ((first && same_line_p (locus, &locus_e, |
| gimple_location (first))) |
| || (last && same_line_p (locus, &locus_e, |
| gimple_location (last)))) |
| { |
| if (e->dest->discriminator != 0 && bb->discriminator == 0) |
| bb->discriminator |
| = next_discriminator_for_locus (locus_e.line); |
| else |
| e->dest->discriminator |
| = next_discriminator_for_locus (locus_e.line); |
| } |
| } |
| } |
| } |
| |
| /* Create the edges for a GIMPLE_COND starting at block BB. */ |
| |
| static void |
| make_cond_expr_edges (basic_block bb) |
| { |
| gcond *entry = as_a <gcond *> (last_stmt (bb)); |
| gimple *then_stmt, *else_stmt; |
| basic_block then_bb, else_bb; |
| tree then_label, else_label; |
| edge e; |
| |
| gcc_assert (entry); |
| gcc_assert (gimple_code (entry) == GIMPLE_COND); |
| |
| /* Entry basic blocks for each component. */ |
| then_label = gimple_cond_true_label (entry); |
| else_label = gimple_cond_false_label (entry); |
| then_bb = label_to_block (cfun, then_label); |
| else_bb = label_to_block (cfun, else_label); |
| then_stmt = first_stmt (then_bb); |
| else_stmt = first_stmt (else_bb); |
| |
| e = make_edge (bb, then_bb, EDGE_TRUE_VALUE); |
| e->goto_locus = gimple_location (then_stmt); |
| e = make_edge (bb, else_bb, EDGE_FALSE_VALUE); |
| if (e) |
| e->goto_locus = gimple_location (else_stmt); |
| |
| /* We do not need the labels anymore. */ |
| gimple_cond_set_true_label (entry, NULL_TREE); |
| gimple_cond_set_false_label (entry, NULL_TREE); |
| } |
| |
| |
| /* Called for each element in the hash table (P) as we delete the |
| edge to cases hash table. |
| |
| Clear all the CASE_CHAINs to prevent problems with copying of |
| SWITCH_EXPRs and structure sharing rules, then free the hash table |
| element. */ |
| |
| bool |
| edge_to_cases_cleanup (edge const &, tree const &value, void *) |
| { |
| tree t, next; |
| |
| for (t = value; t; t = next) |
| { |
| next = CASE_CHAIN (t); |
| CASE_CHAIN (t) = NULL; |
| } |
| |
| return true; |
| } |
| |
| /* Start recording information mapping edges to case labels. */ |
| |
| void |
| start_recording_case_labels (void) |
| { |
| gcc_assert (edge_to_cases == NULL); |
| edge_to_cases = new hash_map<edge, tree>; |
| touched_switch_bbs = BITMAP_ALLOC (NULL); |
| } |
| |
| /* Return nonzero if we are recording information for case labels. */ |
| |
| static bool |
| recording_case_labels_p (void) |
| { |
| return (edge_to_cases != NULL); |
| } |
| |
| /* Stop recording information mapping edges to case labels and |
| remove any information we have recorded. */ |
| void |
| end_recording_case_labels (void) |
| { |
| bitmap_iterator bi; |
| unsigned i; |
| edge_to_cases->traverse<void *, edge_to_cases_cleanup> (NULL); |
| delete edge_to_cases; |
| edge_to_cases = NULL; |
| EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi) |
| { |
| basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); |
| if (bb) |
| { |
| gimple *stmt = last_stmt (bb); |
| if (stmt && gimple_code (stmt) == GIMPLE_SWITCH) |
| group_case_labels_stmt (as_a <gswitch *> (stmt)); |
| } |
| } |
| BITMAP_FREE (touched_switch_bbs); |
| } |
| |
| /* If we are inside a {start,end}_recording_cases block, then return |
| a chain of CASE_LABEL_EXPRs from T which reference E. |
| |
| Otherwise return NULL. */ |
| |
| static tree |
| get_cases_for_edge (edge e, gswitch *t) |
| { |
| tree *slot; |
| size_t i, n; |
| |
| /* If we are not recording cases, then we do not have CASE_LABEL_EXPR |
| chains available. Return NULL so the caller can detect this case. */ |
| if (!recording_case_labels_p ()) |
| return NULL; |
| |
| slot = edge_to_cases->get (e); |
| if (slot) |
| return *slot; |
| |
| /* If we did not find E in the hash table, then this must be the first |
| time we have been queried for information about E & T. Add all the |
| elements from T to the hash table then perform the query again. */ |
| |
| n = gimple_switch_num_labels (t); |
| for (i = 0; i < n; i++) |
| { |
| tree elt = gimple_switch_label (t, i); |
| tree lab = CASE_LABEL (elt); |
| basic_block label_bb = label_to_block (cfun, lab); |
| edge this_edge = find_edge (e->src, label_bb); |
| |
| /* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create |
| a new chain. */ |
| tree &s = edge_to_cases->get_or_insert (this_edge); |
| CASE_CHAIN (elt) = s; |
| s = elt; |
| } |
| |
| return *edge_to_cases->get (e); |
| } |
| |
| /* Create the edges for a GIMPLE_SWITCH starting at block BB. */ |
| |
| static void |
| make_gimple_switch_edges (gswitch *entry, basic_block bb) |
| { |
| size_t i, n; |
| |
| n = gimple_switch_num_labels (entry); |
| |
| for (i = 0; i < n; ++i) |
| { |
| basic_block label_bb = gimple_switch_label_bb (cfun, entry, i); |
| make_edge (bb, label_bb, 0); |
| } |
| } |
| |
| |
| /* Return the basic block holding label DEST. */ |
| |
| basic_block |
| label_to_block (struct function *ifun, tree dest) |
| { |
| int uid = LABEL_DECL_UID (dest); |
| |
| /* We would die hard when faced by an undefined label. Emit a label to |
| the very first basic block. This will hopefully make even the dataflow |
| and undefined variable warnings quite right. */ |
| if (seen_error () && uid < 0) |
| { |
| gimple_stmt_iterator gsi = |
| gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS)); |
| gimple *stmt; |
| |
| stmt = gimple_build_label (dest); |
| gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); |
| uid = LABEL_DECL_UID (dest); |
| } |
| if (vec_safe_length (ifun->cfg->x_label_to_block_map) <= (unsigned int) uid) |
| return NULL; |
| return (*ifun->cfg->x_label_to_block_map)[uid]; |
| } |
| |
| /* Create edges for a goto statement at block BB. Returns true |
| if abnormal edges should be created. */ |
| |
| static bool |
| make_goto_expr_edges (basic_block bb) |
| { |
| gimple_stmt_iterator last = gsi_last_bb (bb); |
| gimple *goto_t = gsi_stmt (last); |
| |
| /* A simple GOTO creates normal edges. */ |
| if (simple_goto_p (goto_t)) |
| { |
| tree dest = gimple_goto_dest (goto_t); |
| basic_block label_bb = label_to_block (cfun, dest); |
| edge e = make_edge (bb, label_bb, EDGE_FALLTHRU); |
| e->goto_locus = gimple_location (goto_t); |
| gsi_remove (&last, true); |
| return false; |
| } |
| |
| /* A computed GOTO creates abnormal edges. */ |
| return true; |
| } |
| |
| /* Create edges for an asm statement with labels at block BB. */ |
| |
| static void |
| make_gimple_asm_edges (basic_block bb) |
| { |
| gasm *stmt = as_a <gasm *> (last_stmt (bb)); |
| int i, n = gimple_asm_nlabels (stmt); |
| |
| for (i = 0; i < n; ++i) |
| { |
| tree label = TREE_VALUE (gimple_asm_label_op (stmt, i)); |
| basic_block label_bb = label_to_block (cfun, label); |
| make_edge (bb, label_bb, 0); |
| } |
| } |
| |
| /*--------------------------------------------------------------------------- |
| Flowgraph analysis |
| ---------------------------------------------------------------------------*/ |
| |
| /* Cleanup useless labels in basic blocks. This is something we wish |
| to do early because it allows us to group case labels before creating |
| the edges for the CFG, and it speeds up block statement iterators in |
| all passes later on. |
| We rerun this pass after CFG is created, to get rid of the labels that |
| are no longer referenced. After then we do not run it any more, since |
| (almost) no new labels should be created. */ |
| |
| /* A map from basic block index to the leading label of that block. */ |
| struct label_record |
| { |
| /* The label. */ |
| tree label; |
| |
| /* True if the label is referenced from somewhere. */ |
| bool used; |
| }; |
| |
| /* Given LABEL return the first label in the same basic block. */ |
| |
| static tree |
| main_block_label (tree label, label_record *label_for_bb) |
| { |
| basic_block bb = label_to_block (cfun, label); |
| tree main_label = label_for_bb[bb->index].label; |
| |
| /* label_to_block possibly inserted undefined label into the chain. */ |
| if (!main_label) |
| { |
| label_for_bb[bb->index].label = label; |
| main_label = label; |
| } |
| |
| label_for_bb[bb->index].used = true; |
| return main_label; |
| } |
| |
| /* Clean up redundant labels within the exception tree. */ |
| |
| static void |
| cleanup_dead_labels_eh (label_record *label_for_bb) |
| { |
| eh_landing_pad lp; |
| eh_region r; |
| tree lab; |
| int i; |
| |
| if (cfun->eh == NULL) |
| return; |
| |
| for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) |
| if (lp && lp->post_landing_pad) |
| { |
| lab = main_block_label (lp->post_landing_pad, label_for_bb); |
| if (lab != lp->post_landing_pad) |
| { |
| EH_LANDING_PAD_NR (lp->post_landing_pad) = 0; |
| EH_LANDING_PAD_NR (lab) = lp->index; |
| } |
| } |
| |
| FOR_ALL_EH_REGION (r) |
| switch (r->type) |
| { |
| case ERT_CLEANUP: |
| case ERT_MUST_NOT_THROW: |
| break; |
| |
| case ERT_TRY: |
| { |
| eh_catch c; |
| for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
| { |
| lab = c->label; |
| if (lab) |
| c->label = main_block_label (lab, label_for_bb); |
| } |
| } |
| break; |
| |
| case ERT_ALLOWED_EXCEPTIONS: |
| lab = r->u.allowed.label; |
| if (lab) |
| r->u.allowed.label = main_block_label (lab, label_for_bb); |
| break; |
| } |
| } |
| |
| |
| /* Cleanup redundant labels. This is a three-step process: |
| 1) Find the leading label for each block. |
| 2) Redirect all references to labels to the leading labels. |
| 3) Cleanup all useless labels. */ |
| |
| void |
| cleanup_dead_labels (void) |
| { |
| basic_block bb; |
| label_record *label_for_bb = XCNEWVEC (struct label_record, |
| last_basic_block_for_fn (cfun)); |
| |
| /* Find a suitable label for each block. We use the first user-defined |
| label if there is one, or otherwise just the first label we see. */ |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple_stmt_iterator i; |
| |
| for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) |
| { |
| tree label; |
| glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i)); |
| |
| if (!label_stmt) |
| break; |
| |
| label = gimple_label_label (label_stmt); |
| |
| /* If we have not yet seen a label for the current block, |
| remember this one and see if there are more labels. */ |
| if (!label_for_bb[bb->index].label) |
| { |
| label_for_bb[bb->index].label = label; |
| continue; |
| } |
| |
| /* If we did see a label for the current block already, but it |
| is an artificially created label, replace it if the current |
| label is a user defined label. */ |
| if (!DECL_ARTIFICIAL (label) |
| && DECL_ARTIFICIAL (label_for_bb[bb->index].label)) |
| { |
| label_for_bb[bb->index].label = label; |
| break; |
| } |
| } |
| } |
| |
| /* Now redirect all jumps/branches to the selected label. |
| First do so for each block ending in a control statement. */ |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple *stmt = last_stmt (bb); |
| tree label, new_label; |
| |
| if (!stmt) |
| continue; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_COND: |
| { |
| gcond *cond_stmt = as_a <gcond *> (stmt); |
| label = gimple_cond_true_label (cond_stmt); |
| if (label) |
| { |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_cond_set_true_label (cond_stmt, new_label); |
| } |
| |
| label = gimple_cond_false_label (cond_stmt); |
| if (label) |
| { |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_cond_set_false_label (cond_stmt, new_label); |
| } |
| } |
| break; |
| |
| case GIMPLE_SWITCH: |
| { |
| gswitch *switch_stmt = as_a <gswitch *> (stmt); |
| size_t i, n = gimple_switch_num_labels (switch_stmt); |
| |
| /* Replace all destination labels. */ |
| for (i = 0; i < n; ++i) |
| { |
| tree case_label = gimple_switch_label (switch_stmt, i); |
| label = CASE_LABEL (case_label); |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| CASE_LABEL (case_label) = new_label; |
| } |
| break; |
| } |
| |
| case GIMPLE_ASM: |
| { |
| gasm *asm_stmt = as_a <gasm *> (stmt); |
| int i, n = gimple_asm_nlabels (asm_stmt); |
| |
| for (i = 0; i < n; ++i) |
| { |
| tree cons = gimple_asm_label_op (asm_stmt, i); |
| tree label = main_block_label (TREE_VALUE (cons), label_for_bb); |
| TREE_VALUE (cons) = label; |
| } |
| break; |
| } |
| |
| /* We have to handle gotos until they're removed, and we don't |
| remove them until after we've created the CFG edges. */ |
| case GIMPLE_GOTO: |
| if (!computed_goto_p (stmt)) |
| { |
| ggoto *goto_stmt = as_a <ggoto *> (stmt); |
| label = gimple_goto_dest (goto_stmt); |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_goto_set_dest (goto_stmt, new_label); |
| } |
| break; |
| |
| case GIMPLE_TRANSACTION: |
| { |
| gtransaction *txn = as_a <gtransaction *> (stmt); |
| |
| label = gimple_transaction_label_norm (txn); |
| if (label) |
| { |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_transaction_set_label_norm (txn, new_label); |
| } |
| |
| label = gimple_transaction_label_uninst (txn); |
| if (label) |
| { |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_transaction_set_label_uninst (txn, new_label); |
| } |
| |
| label = gimple_transaction_label_over (txn); |
| if (label) |
| { |
| new_label = main_block_label (label, label_for_bb); |
| if (new_label != label) |
| gimple_transaction_set_label_over (txn, new_label); |
| } |
| } |
| break; |
| |
| default: |
| break; |
| } |
| } |
| |
| /* Do the same for the exception region tree labels. */ |
| cleanup_dead_labels_eh (label_for_bb); |
| |
| /* Finally, purge dead labels. All user-defined labels and labels that |
| can be the target of non-local gotos and labels which have their |
| address taken are preserved. */ |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple_stmt_iterator i; |
| tree label_for_this_bb = label_for_bb[bb->index].label; |
| |
| if (!label_for_this_bb) |
| continue; |
| |
| /* If the main label of the block is unused, we may still remove it. */ |
| if (!label_for_bb[bb->index].used) |
| label_for_this_bb = NULL; |
| |
| for (i = gsi_start_bb (bb); !gsi_end_p (i); ) |
| { |
| tree label; |
| glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i)); |
| |
| if (!label_stmt) |
| break; |
| |
| label = gimple_label_label (label_stmt); |
| |
| if (label == label_for_this_bb |
| || !DECL_ARTIFICIAL (label) |
| || DECL_NONLOCAL (label) |
| || FORCED_LABEL (label)) |
| gsi_next (&i); |
| else |
| gsi_remove (&i, true); |
| } |
| } |
| |
| free (label_for_bb); |
| } |
| |
| /* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine |
| the ones jumping to the same label. |
| Eg. three separate entries 1: 2: 3: become one entry 1..3: */ |
| |
| bool |
| group_case_labels_stmt (gswitch *stmt) |
| { |
| int old_size = gimple_switch_num_labels (stmt); |
| int i, next_index, new_size; |
| basic_block default_bb = NULL; |
| hash_set<tree> *removed_labels = NULL; |
| |
| default_bb = gimple_switch_default_bb (cfun, stmt); |
| |
| /* Look for possible opportunities to merge cases. */ |
| new_size = i = 1; |
| while (i < old_size) |
| { |
| tree base_case, base_high; |
| basic_block base_bb; |
| |
| base_case = gimple_switch_label (stmt, i); |
| |
| gcc_assert (base_case); |
| base_bb = label_to_block (cfun, CASE_LABEL (base_case)); |
| |
| /* Discard cases that have the same destination as the default case or |
| whose destination blocks have already been removed as unreachable. */ |
| if (base_bb == NULL |
| || base_bb == default_bb |
| || (removed_labels |
| && removed_labels->contains (CASE_LABEL (base_case)))) |
| { |
| i++; |
| continue; |
| } |
| |
| base_high = CASE_HIGH (base_case) |
| ? CASE_HIGH (base_case) |
| : CASE_LOW (base_case); |
| next_index = i + 1; |
| |
| /* Try to merge case labels. Break out when we reach the end |
| of the label vector or when we cannot merge the next case |
| label with the current one. */ |
| while (next_index < old_size) |
| { |
| tree merge_case = gimple_switch_label (stmt, next_index); |
| basic_block merge_bb = label_to_block (cfun, CASE_LABEL (merge_case)); |
| wide_int bhp1 = wi::to_wide (base_high) + 1; |
| |
| /* Merge the cases if they jump to the same place, |
| and their ranges are consecutive. */ |
| if (merge_bb == base_bb |
| && (removed_labels == NULL |
| || !removed_labels->contains (CASE_LABEL (merge_case))) |
| && wi::to_wide (CASE_LOW (merge_case)) == bhp1) |
| { |
| base_high |
| = (CASE_HIGH (merge_case) |
| ? CASE_HIGH (merge_case) : CASE_LOW (merge_case)); |
| CASE_HIGH (base_case) = base_high; |
| next_index++; |
| } |
| else |
| break; |
| } |
| |
| /* Discard cases that have an unreachable destination block. */ |
| if (EDGE_COUNT (base_bb->succs) == 0 |
| && gimple_seq_unreachable_p (bb_seq (base_bb)) |
| /* Don't optimize this if __builtin_unreachable () is the |
| implicitly added one by the C++ FE too early, before |
| -Wreturn-type can be diagnosed. We'll optimize it later |
| during switchconv pass or any other cfg cleanup. */ |
| && (gimple_in_ssa_p (cfun) |
| || (LOCATION_LOCUS (gimple_location (last_stmt (base_bb))) |
| != BUILTINS_LOCATION))) |
| { |
| edge base_edge = find_edge (gimple_bb (stmt), base_bb); |
| if (base_edge != NULL) |
| { |
| for (gimple_stmt_iterator gsi = gsi_start_bb (base_bb); |
| !gsi_end_p (gsi); gsi_next (&gsi)) |
| if (glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi))) |
| { |
| if (FORCED_LABEL (gimple_label_label (stmt)) |
| || DECL_NONLOCAL (gimple_label_label (stmt))) |
| { |
| /* Forced/non-local labels aren't going to be removed, |
| but they will be moved to some neighbouring basic |
| block. If some later case label refers to one of |
| those labels, we should throw that case away rather |
| than keeping it around and refering to some random |
| other basic block without an edge to it. */ |
| if (removed_labels == NULL) |
| removed_labels = new hash_set<tree>; |
| removed_labels->add (gimple_label_label (stmt)); |
| } |
| } |
| else |
| break; |
| remove_edge_and_dominated_blocks (base_edge); |
| } |
| i = next_index; |
| continue; |
| } |
| |
| if (new_size < i) |
| gimple_switch_set_label (stmt, new_size, |
| gimple_switch_label (stmt, i)); |
| i = next_index; |
| new_size++; |
| } |
| |
| gcc_assert (new_size <= old_size); |
| |
| if (new_size < old_size) |
| gimple_switch_set_num_labels (stmt, new_size); |
| |
| delete removed_labels; |
| return new_size < old_size; |
| } |
| |
| /* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH), |
| and scan the sorted vector of cases. Combine the ones jumping to the |
| same label. */ |
| |
| bool |
| group_case_labels (void) |
| { |
| basic_block bb; |
| bool changed = false; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple *stmt = last_stmt (bb); |
| if (stmt && gimple_code (stmt) == GIMPLE_SWITCH) |
| changed |= group_case_labels_stmt (as_a <gswitch *> (stmt)); |
| } |
| |
| return changed; |
| } |
| |
| /* Checks whether we can merge block B into block A. */ |
| |
| static bool |
| gimple_can_merge_blocks_p (basic_block a, basic_block b) |
| { |
| gimple *stmt; |
| |
| if (!single_succ_p (a)) |
| return false; |
| |
| if (single_succ_edge (a)->flags & EDGE_COMPLEX) |
| return false; |
| |
| if (single_succ (a) != b) |
| return false; |
| |
| if (!single_pred_p (b)) |
| return false; |
| |
| if (a == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
| || b == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
| return false; |
| |
| /* If A ends by a statement causing exceptions or something similar, we |
| cannot merge the blocks. */ |
| stmt = last_stmt (a); |
| if (stmt && stmt_ends_bb_p (stmt)) |
| return false; |
| |
| /* Do not allow a block with only a non-local label to be merged. */ |
| if (stmt) |
| if (glabel *label_stmt = dyn_cast <glabel *> (stmt)) |
| if (DECL_NONLOCAL (gimple_label_label (label_stmt))) |
| return false; |
| |
| /* Examine the labels at the beginning of B. */ |
| for (gimple_stmt_iterator gsi = gsi_start_bb (b); !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| tree lab; |
| glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)); |
| if (!label_stmt) |
| break; |
| lab = gimple_label_label (label_stmt); |
| |
| /* Do not remove user forced labels or for -O0 any user labels. */ |
| if (!DECL_ARTIFICIAL (lab) && (!optimize || FORCED_LABEL (lab))) |
| return false; |
| } |
| |
| /* Protect simple loop latches. We only want to avoid merging |
| the latch with the loop header or with a block in another |
| loop in this case. */ |
| if (current_loops |
| && b->loop_father->latch == b |
| && loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES) |
| && (b->loop_father->header == a |
| || b->loop_father != a->loop_father)) |
| return false; |
| |
| /* It must be possible to eliminate all phi nodes in B. If ssa form |
| is not up-to-date and a name-mapping is registered, we cannot eliminate |
| any phis. Symbols marked for renaming are never a problem though. */ |
| for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| /* Technically only new names matter. */ |
| if (name_registered_for_update_p (PHI_RESULT (phi))) |
| return false; |
| } |
| |
| /* When not optimizing, don't merge if we'd lose goto_locus. */ |
| if (!optimize |
| && single_succ_edge (a)->goto_locus != UNKNOWN_LOCATION) |
| { |
| location_t goto_locus = single_succ_edge (a)->goto_locus; |
| gimple_stmt_iterator prev, next; |
| prev = gsi_last_nondebug_bb (a); |
| next = gsi_after_labels (b); |
| if (!gsi_end_p (next) && is_gimple_debug (gsi_stmt (next))) |
| gsi_next_nondebug (&next); |
| if ((gsi_end_p (prev) |
| || gimple_location (gsi_stmt (prev)) != goto_locus) |
| && (gsi_end_p (next) |
| || gimple_location (gsi_stmt (next)) != goto_locus)) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Replaces all uses of NAME by VAL. */ |
| |
| void |
| replace_uses_by (tree name, tree val) |
| { |
| imm_use_iterator imm_iter; |
| use_operand_p use; |
| gimple *stmt; |
| edge e; |
| |
| FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name) |
| { |
| /* Mark the block if we change the last stmt in it. */ |
| if (cfgcleanup_altered_bbs |
| && stmt_ends_bb_p (stmt)) |
| bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index); |
| |
| FOR_EACH_IMM_USE_ON_STMT (use, imm_iter) |
| { |
| replace_exp (use, val); |
| |
| if (gimple_code (stmt) == GIMPLE_PHI) |
| { |
| e = gimple_phi_arg_edge (as_a <gphi *> (stmt), |
| PHI_ARG_INDEX_FROM_USE (use)); |
| if (e->flags & EDGE_ABNORMAL |
| && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val)) |
| { |
| /* This can only occur for virtual operands, since |
| for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
| would prevent replacement. */ |
| gcc_checking_assert (virtual_operand_p (name)); |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1; |
| } |
| } |
| } |
| |
| if (gimple_code (stmt) != GIMPLE_PHI) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gimple *orig_stmt = stmt; |
| size_t i; |
| |
| /* FIXME. It shouldn't be required to keep TREE_CONSTANT |
| on ADDR_EXPRs up-to-date on GIMPLE. Propagation will |
| only change sth from non-invariant to invariant, and only |
| when propagating constants. */ |
| if (is_gimple_min_invariant (val)) |
| for (i = 0; i < gimple_num_ops (stmt); i++) |
| { |
| tree op = gimple_op (stmt, i); |
| /* Operands may be empty here. For example, the labels |
| of a GIMPLE_COND are nulled out following the creation |
| of the corresponding CFG edges. */ |
| if (op && TREE_CODE (op) == ADDR_EXPR) |
| recompute_tree_invariant_for_addr_expr (op); |
| } |
| |
| if (fold_stmt (&gsi)) |
| stmt = gsi_stmt (gsi); |
| |
| if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt)) |
| gimple_purge_dead_eh_edges (gimple_bb (stmt)); |
| |
| update_stmt (stmt); |
| } |
| } |
| |
| gcc_checking_assert (has_zero_uses (name)); |
| |
| /* Also update the trees stored in loop structures. */ |
| if (current_loops) |
| { |
| class loop *loop; |
| |
| FOR_EACH_LOOP (loop, 0) |
| { |
| substitute_in_loop_info (loop, name, val); |
| } |
| } |
| } |
| |
| /* Merge block B into block A. */ |
| |
| static void |
| gimple_merge_blocks (basic_block a, basic_block b) |
| { |
| gimple_stmt_iterator last, gsi; |
| gphi_iterator psi; |
| |
| if (dump_file) |
| fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index); |
| |
| /* Remove all single-valued PHI nodes from block B of the form |
| V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */ |
| gsi = gsi_last_bb (a); |
| for (psi = gsi_start_phis (b); !gsi_end_p (psi); ) |
| { |
| gimple *phi = gsi_stmt (psi); |
| tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0); |
| gimple *copy; |
| bool may_replace_uses = (virtual_operand_p (def) |
| || may_propagate_copy (def, use)); |
| |
| /* In case we maintain loop closed ssa form, do not propagate arguments |
| of loop exit phi nodes. */ |
| if (current_loops |
| && loops_state_satisfies_p (LOOP_CLOSED_SSA) |
| && !virtual_operand_p (def) |
| && TREE_CODE (use) == SSA_NAME |
| && a->loop_father != b->loop_father) |
| may_replace_uses = false; |
| |
| if (!may_replace_uses) |
| { |
| gcc_assert (!virtual_operand_p (def)); |
| |
| /* Note that just emitting the copies is fine -- there is no problem |
| with ordering of phi nodes. This is because A is the single |
| predecessor of B, therefore results of the phi nodes cannot |
| appear as arguments of the phi nodes. */ |
| copy = gimple_build_assign (def, use); |
| gsi_insert_after (&gsi, copy, GSI_NEW_STMT); |
| remove_phi_node (&psi, false); |
| } |
| else |
| { |
| /* If we deal with a PHI for virtual operands, we can simply |
| propagate these without fussing with folding or updating |
| the stmt. */ |
| if (virtual_operand_p (def)) |
| { |
| imm_use_iterator iter; |
| use_operand_p use_p; |
| gimple *stmt; |
| |
| FOR_EACH_IMM_USE_STMT (stmt, iter, def) |
| FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
| SET_USE (use_p, use); |
| |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)) |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1; |
| } |
| else |
| replace_uses_by (def, use); |
| |
| remove_phi_node (&psi, true); |
| } |
| } |
| |
| /* Ensure that B follows A. */ |
| move_block_after (b, a); |
| |
| gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU); |
| gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a))); |
| |
| /* Remove labels from B and set gimple_bb to A for other statements. */ |
| for (gsi = gsi_start_bb (b); !gsi_end_p (gsi);) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (glabel *label_stmt = dyn_cast <glabel *> (stmt)) |
| { |
| tree label = gimple_label_label (label_stmt); |
| int lp_nr; |
| |
| gsi_remove (&gsi, false); |
| |
| /* Now that we can thread computed gotos, we might have |
| a situation where we have a forced label in block B |
| However, the label at the start of block B might still be |
| used in other ways (think about the runtime checking for |
| Fortran assigned gotos). So we cannot just delete the |
| label. Instead we move the label to the start of block A. */ |
| if (FORCED_LABEL (label)) |
| { |
| gimple_stmt_iterator dest_gsi = gsi_start_bb (a); |
| tree first_label = NULL_TREE; |
| if (!gsi_end_p (dest_gsi)) |
| if (glabel *first_label_stmt |
| = dyn_cast <glabel *> (gsi_stmt (dest_gsi))) |
| first_label = gimple_label_label (first_label_stmt); |
| if (first_label |
| && (DECL_NONLOCAL (first_label) |
| || EH_LANDING_PAD_NR (first_label) != 0)) |
| gsi_insert_after (&dest_gsi, stmt, GSI_NEW_STMT); |
| else |
| gsi_insert_before (&dest_gsi, stmt, GSI_NEW_STMT); |
| } |
| /* Other user labels keep around in a form of a debug stmt. */ |
| else if (!DECL_ARTIFICIAL (label) && MAY_HAVE_DEBUG_BIND_STMTS) |
| { |
| gimple *dbg = gimple_build_debug_bind (label, |
| integer_zero_node, |
| stmt); |
| gimple_debug_bind_reset_value (dbg); |
| gsi_insert_before (&gsi, dbg, GSI_SAME_STMT); |
| } |
| |
| lp_nr = EH_LANDING_PAD_NR (label); |
| if (lp_nr) |
| { |
| eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr); |
| lp->post_landing_pad = NULL; |
| } |
| } |
| else |
| { |
| gimple_set_bb (stmt, a); |
| gsi_next (&gsi); |
| } |
| } |
| |
| /* When merging two BBs, if their counts are different, the larger count |
| is selected as the new bb count. This is to handle inconsistent |
| profiles. */ |
| if (a->loop_father == b->loop_father) |
| { |
| a->count = a->count.merge (b->count); |
| } |
| |
| /* Merge the sequences. */ |
| last = gsi_last_bb (a); |
| gsi_insert_seq_after (&last, bb_seq (b), GSI_NEW_STMT); |
| set_bb_seq (b, NULL); |
| |
| if (cfgcleanup_altered_bbs) |
| bitmap_set_bit (cfgcleanup_altered_bbs, a->index); |
| } |
| |
| |
| /* Return the one of two successors of BB that is not reachable by a |
| complex edge, if there is one. Else, return BB. We use |
| this in optimizations that use post-dominators for their heuristics, |
| to catch the cases in C++ where function calls are involved. */ |
| |
| basic_block |
| single_noncomplex_succ (basic_block bb) |
| { |
| edge e0, e1; |
| if (EDGE_COUNT (bb->succs) != 2) |
| return bb; |
| |
| e0 = EDGE_SUCC (bb, 0); |
| e1 = EDGE_SUCC (bb, 1); |
| if (e0->flags & EDGE_COMPLEX) |
| return e1->dest; |
| if (e1->flags & EDGE_COMPLEX) |
| return e0->dest; |
| |
| return bb; |
| } |
| |
| /* T is CALL_EXPR. Set current_function_calls_* flags. */ |
| |
| void |
| notice_special_calls (gcall *call) |
| { |
| int flags = gimple_call_flags (call); |
| |
| if (flags & ECF_MAY_BE_ALLOCA) |
| cfun->calls_alloca = true; |
| if (flags & ECF_RETURNS_TWICE) |
| cfun->calls_setjmp = true; |
| } |
| |
| |
| /* Clear flags set by notice_special_calls. Used by dead code removal |
| to update the flags. */ |
| |
| void |
| clear_special_calls (void) |
| { |
| cfun->calls_alloca = false; |
| cfun->calls_setjmp = false; |
| } |
| |
| /* Remove PHI nodes associated with basic block BB and all edges out of BB. */ |
| |
| static void |
| remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb) |
| { |
| /* Since this block is no longer reachable, we can just delete all |
| of its PHI nodes. */ |
| remove_phi_nodes (bb); |
| |
| /* Remove edges to BB's successors. */ |
| while (EDGE_COUNT (bb->succs) > 0) |
| remove_edge (EDGE_SUCC (bb, 0)); |
| } |
| |
| |
| /* Remove statements of basic block BB. */ |
| |
| static void |
| remove_bb (basic_block bb) |
| { |
| gimple_stmt_iterator i; |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "Removing basic block %d\n", bb->index); |
| if (dump_flags & TDF_DETAILS) |
| { |
| dump_bb (dump_file, bb, 0, TDF_BLOCKS); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| |
| if (current_loops) |
| { |
| class loop *loop = bb->loop_father; |
| |
| /* If a loop gets removed, clean up the information associated |
| with it. */ |
| if (loop->latch == bb |
| || loop->header == bb) |
| free_numbers_of_iterations_estimates (loop); |
| } |
| |
| /* Remove all the instructions in the block. */ |
| if (bb_seq (bb) != NULL) |
| { |
| /* Walk backwards so as to get a chance to substitute all |
| released DEFs into debug stmts. See |
| eliminate_unnecessary_stmts() in tree-ssa-dce.c for more |
| details. */ |
| for (i = gsi_last_bb (bb); !gsi_end_p (i);) |
| { |
| gimple *stmt = gsi_stmt (i); |
| glabel *label_stmt = dyn_cast <glabel *> (stmt); |
| if (label_stmt |
| && (FORCED_LABEL (gimple_label_label (label_stmt)) |
| || DECL_NONLOCAL (gimple_label_label (label_stmt)))) |
| { |
| basic_block new_bb; |
| gimple_stmt_iterator new_gsi; |
| |
| /* A non-reachable non-local label may still be referenced. |
| But it no longer needs to carry the extra semantics of |
| non-locality. */ |
| if (DECL_NONLOCAL (gimple_label_label (label_stmt))) |
| { |
| DECL_NONLOCAL (gimple_label_label (label_stmt)) = 0; |
| FORCED_LABEL (gimple_label_label (label_stmt)) = 1; |
| } |
| |
| new_bb = bb->prev_bb; |
| /* Don't move any labels into ENTRY block. */ |
| if (new_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
| { |
| new_bb = single_succ (new_bb); |
| gcc_assert (new_bb != bb); |
| } |
| new_gsi = gsi_after_labels (new_bb); |
| gsi_remove (&i, false); |
| gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT); |
| } |
| else |
| { |
| /* Release SSA definitions. */ |
| release_defs (stmt); |
| gsi_remove (&i, true); |
| } |
| |
| if (gsi_end_p (i)) |
| i = gsi_last_bb (bb); |
| else |
| gsi_prev (&i); |
| } |
| } |
| |
| remove_phi_nodes_and_edges_for_unreachable_block (bb); |
| bb->il.gimple.seq = NULL; |
| bb->il.gimple.phi_nodes = NULL; |
| } |
| |
| |
| /* Given a basic block BB and a value VAL for use in the final statement |
| of the block (if a GIMPLE_COND, GIMPLE_SWITCH, or computed goto), return |
| the edge that will be taken out of the block. |
| If VAL is NULL_TREE, then the current value of the final statement's |
| predicate or index is used. |
| If the value does not match a unique edge, NULL is returned. */ |
| |
| edge |
| find_taken_edge (basic_block bb, tree val) |
| { |
| gimple *stmt; |
| |
| stmt = last_stmt (bb); |
| |
| /* Handle ENTRY and EXIT. */ |
| if (!stmt) |
| return NULL; |
| |
| if (gimple_code (stmt) == GIMPLE_COND) |
| return find_taken_edge_cond_expr (as_a <gcond *> (stmt), val); |
| |
| if (gimple_code (stmt) == GIMPLE_SWITCH) |
| return find_taken_edge_switch_expr (as_a <gswitch *> (stmt), val); |
| |
| if (computed_goto_p (stmt)) |
| { |
| /* Only optimize if the argument is a label, if the argument is |
| not a label then we cannot construct a proper CFG. |
| |
| It may be the case that we only need to allow the LABEL_REF to |
| appear inside an ADDR_EXPR, but we also allow the LABEL_REF to |
| appear inside a LABEL_EXPR just to be safe. */ |
| if (val |
| && (TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR) |
| && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL) |
| return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0)); |
| } |
| |
| /* Otherwise we only know the taken successor edge if it's unique. */ |
| return single_succ_p (bb) ? single_succ_edge (bb) : NULL; |
| } |
| |
| /* Given a constant value VAL and the entry block BB to a GOTO_EXPR |
| statement, determine which of the outgoing edges will be taken out of the |
| block. Return NULL if either edge may be taken. */ |
| |
| static edge |
| find_taken_edge_computed_goto (basic_block bb, tree val) |
| { |
| basic_block dest; |
| edge e = NULL; |
| |
| dest = label_to_block (cfun, val); |
| if (dest) |
| e = find_edge (bb, dest); |
| |
| /* It's possible for find_edge to return NULL here on invalid code |
| that abuses the labels-as-values extension (e.g. code that attempts to |
| jump *between* functions via stored labels-as-values; PR 84136). |
| If so, then we simply return that NULL for the edge. |
| We don't currently have a way of detecting such invalid code, so we |
| can't assert that it was the case when a NULL edge occurs here. */ |
| |
| return e; |
| } |
| |
| /* Given COND_STMT and a constant value VAL for use as the predicate, |
| determine which of the two edges will be taken out of |
| the statement's block. Return NULL if either edge may be taken. |
| If VAL is NULL_TREE, then the current value of COND_STMT's predicate |
| is used. */ |
| |
| static edge |
| find_taken_edge_cond_expr (const gcond *cond_stmt, tree val) |
| { |
| edge true_edge, false_edge; |
| |
| if (val == NULL_TREE) |
| { |
| /* Use the current value of the predicate. */ |
| if (gimple_cond_true_p (cond_stmt)) |
| val = integer_one_node; |
| else if (gimple_cond_false_p (cond_stmt)) |
| val = integer_zero_node; |
| else |
| return NULL; |
| } |
| else if (TREE_CODE (val) != INTEGER_CST) |
| return NULL; |
| |
| extract_true_false_edges_from_block (gimple_bb (cond_stmt), |
| &true_edge, &false_edge); |
| |
| return (integer_zerop (val) ? false_edge : true_edge); |
| } |
| |
| /* Given SWITCH_STMT and an INTEGER_CST VAL for use as the index, determine |
| which edge will be taken out of the statement's block. Return NULL if any |
| edge may be taken. |
| If VAL is NULL_TREE, then the current value of SWITCH_STMT's index |
| is used. */ |
| |
| edge |
| find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val) |
| { |
| basic_block dest_bb; |
| edge e; |
| tree taken_case; |
| |
| if (gimple_switch_num_labels (switch_stmt) == 1) |
| taken_case = gimple_switch_default_label (switch_stmt); |
| else |
| { |
| if (val == NULL_TREE) |
| val = gimple_switch_index (switch_stmt); |
| if (TREE_CODE (val) != INTEGER_CST) |
| return NULL; |
| else |
| taken_case = find_case_label_for_value (switch_stmt, val); |
| } |
| dest_bb = label_to_block (cfun, CASE_LABEL (taken_case)); |
| |
| e = find_edge (gimple_bb (switch_stmt), dest_bb); |
| gcc_assert (e); |
| return e; |
| } |
| |
| |
| /* Return the CASE_LABEL_EXPR that SWITCH_STMT will take for VAL. |
| We can make optimal use here of the fact that the case labels are |
| sorted: We can do a binary search for a case matching VAL. */ |
| |
| tree |
| find_case_label_for_value (const gswitch *switch_stmt, tree val) |
| { |
| size_t low, high, n = gimple_switch_num_labels (switch_stmt); |
| tree default_case = gimple_switch_default_label (switch_stmt); |
| |
| for (low = 0, high = n; high - low > 1; ) |
| { |
| size_t i = (high + low) / 2; |
| tree t = gimple_switch_label (switch_stmt, i); |
| int cmp; |
| |
| /* Cache the result of comparing CASE_LOW and val. */ |
| cmp = tree_int_cst_compare (CASE_LOW (t), val); |
| |
| if (cmp > 0) |
| high = i; |
| else |
| low = i; |
| |
| if (CASE_HIGH (t) == NULL) |
| { |
| /* A singe-valued case label. */ |
| if (cmp == 0) |
| return t; |
| } |
| else |
| { |
| /* A case range. We can only handle integer ranges. */ |
| if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) |
| return t; |
| } |
| } |
| |
| return default_case; |
| } |
| |
| |
| /* Dump a basic block on stderr. */ |
| |
| void |
| gimple_debug_bb (basic_block bb) |
| { |
| dump_bb (stderr, bb, 0, TDF_VOPS|TDF_MEMSYMS|TDF_BLOCKS); |
| } |
| |
| |
| /* Dump basic block with index N on stderr. */ |
| |
| basic_block |
| gimple_debug_bb_n (int n) |
| { |
| gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n)); |
| return BASIC_BLOCK_FOR_FN (cfun, n); |
| } |
| |
| |
| /* Dump the CFG on stderr. |
| |
| FLAGS are the same used by the tree dumping functions |
| (see TDF_* in dumpfile.h). */ |
| |
| void |
| gimple_debug_cfg (dump_flags_t flags) |
| { |
| gimple_dump_cfg (stderr, flags); |
| } |
| |
| |
| /* Dump the program showing basic block boundaries on the given FILE. |
| |
| FLAGS are the same used by the tree dumping functions (see TDF_* in |
| tree.h). */ |
| |
| void |
| gimple_dump_cfg (FILE *file, dump_flags_t flags) |
| { |
| if (flags & TDF_DETAILS) |
| { |
| dump_function_header (file, current_function_decl, flags); |
| fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n", |
| n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun), |
| last_basic_block_for_fn (cfun)); |
| |
| brief_dump_cfg (file, flags); |
| fprintf (file, "\n"); |
| } |
| |
| if (flags & TDF_STATS) |
| dump_cfg_stats (file); |
| |
| dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS); |
| } |
| |
| |
| /* Dump CFG statistics on FILE. */ |
| |
| void |
| dump_cfg_stats (FILE *file) |
| { |
| static long max_num_merged_labels = 0; |
| unsigned long size, total = 0; |
| long num_edges; |
| basic_block bb; |
| const char * const fmt_str = "%-30s%-13s%12s\n"; |
| const char * const fmt_str_1 = "%-30s%13d" PRsa (11) "\n"; |
| const char * const fmt_str_2 = "%-30s%13ld" PRsa (11) "\n"; |
| const char * const fmt_str_3 = "%-43s" PRsa (11) "\n"; |
| const char *funcname = current_function_name (); |
| |
| fprintf (file, "\nCFG Statistics for %s\n\n", funcname); |
| |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, fmt_str, "", " Number of ", "Memory"); |
| fprintf (file, fmt_str, "", " instances ", "used "); |
| fprintf (file, "---------------------------------------------------------\n"); |
| |
| size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def); |
| total += size; |
| fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun), |
| SIZE_AMOUNT (size)); |
| |
| num_edges = 0; |
| FOR_EACH_BB_FN (bb, cfun) |
| num_edges += EDGE_COUNT (bb->succs); |
| size = num_edges * sizeof (class edge_def); |
| total += size; |
| fprintf (file, fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size)); |
| |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, fmt_str_3, "Total memory used by CFG data", |
| SIZE_AMOUNT (total)); |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, "\n"); |
| |
| if (cfg_stats.num_merged_labels > max_num_merged_labels) |
| max_num_merged_labels = cfg_stats.num_merged_labels; |
| |
| fprintf (file, "Coalesced label blocks: %ld (Max so far: %ld)\n", |
| cfg_stats.num_merged_labels, max_num_merged_labels); |
| |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump CFG statistics on stderr. Keep extern so that it's always |
| linked in the final executable. */ |
| |
| DEBUG_FUNCTION void |
| debug_cfg_stats (void) |
| { |
| dump_cfg_stats (stderr); |
| } |
| |
| /*--------------------------------------------------------------------------- |
| Miscellaneous helpers |
| ---------------------------------------------------------------------------*/ |
| |
| /* Return true if T, a GIMPLE_CALL, can make an abnormal transfer of control |
| flow. Transfers of control flow associated with EH are excluded. */ |
| |
| static bool |
| call_can_make_abnormal_goto (gimple *t) |
| { |
| /* If the function has no non-local labels, then a call cannot make an |
| abnormal transfer of control. */ |
| if (!cfun->has_nonlocal_label |
| && !cfun->calls_setjmp) |
| return false; |
| |
| /* Likewise if the call has no side effects. */ |
| if (!gimple_has_side_effects (t)) |
| return false; |
| |
| /* Likewise if the called function is leaf. */ |
| if (gimple_call_flags (t) & ECF_LEAF) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Return true if T can make an abnormal transfer of control flow. |
| Transfers of control flow associated with EH are excluded. */ |
| |
| bool |
| stmt_can_make_abnormal_goto (gimple *t) |
| { |
| if (computed_goto_p (t)) |
| return true; |
| if (is_gimple_call (t)) |
| return call_can_make_abnormal_goto (t); |
| return false; |
| } |
| |
| |
| /* Return true if T represents a stmt that always transfers control. */ |
| |
| bool |
| is_ctrl_stmt (gimple *t) |
| { |
| switch (gimple_code (t)) |
| { |
| case GIMPLE_COND: |
| case GIMPLE_SWITCH: |
| case GIMPLE_GOTO: |
| case GIMPLE_RETURN: |
| case GIMPLE_RESX: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| |
| /* Return true if T is a statement that may alter the flow of control |
| (e.g., a call to a non-returning function). */ |
| |
| bool |
| is_ctrl_altering_stmt (gimple *t) |
| { |
| gcc_assert (t); |
| |
| switch (gimple_code (t)) |
| { |
| case GIMPLE_CALL: |
| /* Per stmt call flag indicates whether the call could alter |
| controlflow. */ |
| if (gimple_call_ctrl_altering_p (t)) |
| return true; |
| break; |
| |
| case GIMPLE_EH_DISPATCH: |
| /* EH_DISPATCH branches to the individual catch handlers at |
| this level of a try or allowed-exceptions region. It can |
| fallthru to the next statement as well. */ |
| return true; |
| |
| case GIMPLE_ASM: |
| if (gimple_asm_nlabels (as_a <gasm *> (t)) > 0) |
| return true; |
| break; |
| |
| CASE_GIMPLE_OMP: |
| /* OpenMP directives alter control flow. */ |
| return true; |
| |
| case GIMPLE_TRANSACTION: |
| /* A transaction start alters control flow. */ |
| return true; |
| |
| default: |
| break; |
| } |
| |
| /* If a statement can throw, it alters control flow. */ |
| return stmt_can_throw_internal (cfun, t); |
| } |
| |
| |
| /* Return true if T is a simple local goto. */ |
| |
| bool |
| simple_goto_p (gimple *t) |
| { |
| return (gimple_code (t) == GIMPLE_GOTO |
| && TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL); |
| } |
| |
| |
| /* Return true if STMT should start a new basic block. PREV_STMT is |
| the statement preceding STMT. It is used when STMT is a label or a |
| case label. Labels should only start a new basic block if their |
| previous statement wasn't a label. Otherwise, sequence of labels |
| would generate unnecessary basic blocks that only contain a single |
| label. */ |
| |
| static inline bool |
| stmt_starts_bb_p (gimple *stmt, gimple *prev_stmt) |
| { |
| if (stmt == NULL) |
| return false; |
| |
| /* PREV_STMT is only set to a debug stmt if the debug stmt is before |
| any nondebug stmts in the block. We don't want to start another |
| block in this case: the debug stmt will already have started the |
| one STMT would start if we weren't outputting debug stmts. */ |
| if (prev_stmt && is_gimple_debug (prev_stmt)) |
| return false; |
| |
| /* Labels start a new basic block only if the preceding statement |
| wasn't a label of the same type. This prevents the creation of |
| consecutive blocks that have nothing but a single label. */ |
| if (glabel *label_stmt = dyn_cast <glabel *> (stmt)) |
| { |
| /* Nonlocal and computed GOTO targets always start a new block. */ |
| if (DECL_NONLOCAL (gimple_label_label (label_stmt)) |
| || FORCED_LABEL (gimple_label_label (label_stmt))) |
| return true; |
| |
| if (glabel *plabel = safe_dyn_cast <glabel *> (prev_stmt)) |
| { |
| if (DECL_NONLOCAL (gimple_label_label (plabel)) |
| || !DECL_ARTIFICIAL (gimple_label_label (plabel))) |
| return true; |
| |
| cfg_stats.num_merged_labels++; |
| return false; |
| } |
| else |
| return true; |
| } |
| else if (gimple_code (stmt) == GIMPLE_CALL) |
| { |
| if (gimple_call_flags (stmt) & ECF_RETURNS_TWICE) |
| /* setjmp acts similar to a nonlocal GOTO target and thus should |
| start a new block. */ |
| return true; |
| if (gimple_call_internal_p (stmt, IFN_PHI) |
| && prev_stmt |
| && gimple_code (prev_stmt) != GIMPLE_LABEL |
| && (gimple_code (prev_stmt) != GIMPLE_CALL |
| || ! gimple_call_internal_p (prev_stmt, IFN_PHI))) |
| /* PHI nodes start a new block unless preceeded by a label |
| or another PHI. */ |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Return true if T should end a basic block. */ |
| |
| bool |
| stmt_ends_bb_p (gimple *t) |
| { |
| return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t); |
| } |
| |
| /* Remove block annotations and other data structures. */ |
| |
| void |
| delete_tree_cfg_annotations (struct function *fn) |
| { |
| vec_free (label_to_block_map_for_fn (fn)); |
| } |
| |
| /* Return the virtual phi in BB. */ |
| |
| gphi * |
| get_virtual_phi (basic_block bb) |
| { |
| for (gphi_iterator gsi = gsi_start_phis (bb); |
| !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| |
| if (virtual_operand_p (PHI_RESULT (phi))) |
| return phi; |
| } |
| |
| return NULL; |
| } |
| |
| /* Return the first statement in basic block BB. */ |
| |
| gimple * |
| first_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_start_bb (bb); |
| gimple *stmt = NULL; |
| |
| while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i)))) |
| { |
| gsi_next (&i); |
| stmt = NULL; |
| } |
| return stmt; |
| } |
| |
| /* Return the first non-label statement in basic block BB. */ |
| |
| static gimple * |
| first_non_label_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_start_bb (bb); |
| while (!gsi_end_p (i) && gimple_code (gsi_stmt (i)) == GIMPLE_LABEL) |
| gsi_next (&i); |
| return !gsi_end_p (i) ? gsi_stmt (i) : NULL; |
| } |
| |
| /* Return the last statement in basic block BB. */ |
| |
| gimple * |
| last_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_last_bb (bb); |
| gimple *stmt = NULL; |
| |
| while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i)))) |
| { |
| gsi_prev (&i); |
| stmt = NULL; |
| } |
| return stmt; |
| } |
| |
| /* Return the last statement of an otherwise empty block. Return NULL |
| if the block is totally empty, or if it contains more than one |
| statement. */ |
| |
| gimple * |
| last_and_only_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_last_nondebug_bb (bb); |
| gimple *last, *prev; |
| |
| if (gsi_end_p (i)) |
| return NULL; |
| |
| last = gsi_stmt (i); |
| gsi_prev_nondebug (&i); |
| if (gsi_end_p (i)) |
| return last; |
| |
| /* Empty statements should no longer appear in the instruction stream. |
| Everything that might have appeared before should be deleted by |
| remove_useless_stmts, and the optimizers should just gsi_remove |
| instead of smashing with build_empty_stmt. |
| |
| Thus the only thing that should appear here in a block containing |
| one executable statement is a label. */ |
| prev = gsi_stmt (i); |
| if (gimple_code (prev) == GIMPLE_LABEL) |
| return last; |
| else |
| return NULL; |
| } |
| |
| /* Reinstall those PHI arguments queued in OLD_EDGE to NEW_EDGE. */ |
| |
| static void |
| reinstall_phi_args (edge new_edge, edge old_edge) |
| { |
| edge_var_map *vm; |
| int i; |
| gphi_iterator phis; |
| |
| vec<edge_var_map> *v = redirect_edge_var_map_vector (old_edge); |
| if (!v) |
| return; |
| |
| for (i = 0, phis = gsi_start_phis (new_edge->dest); |
| v->iterate (i, &vm) && !gsi_end_p (phis); |
| i++, gsi_next (&phis)) |
| { |
| gphi *phi = phis.phi (); |
| tree result = redirect_edge_var_map_result (vm); |
| tree arg = redirect_edge_var_map_def (vm); |
| |
| gcc_assert (result == gimple_phi_result (phi)); |
| |
| add_phi_arg (phi, arg, new_edge, redirect_edge_var_map_location (vm)); |
| } |
| |
| redirect_edge_var_map_clear (old_edge); |
| } |
| |
| /* Returns the basic block after which the new basic block created |
| by splitting edge EDGE_IN should be placed. Tries to keep the new block |
| near its "logical" location. This is of most help to humans looking |
| at debugging dumps. */ |
| |
| basic_block |
| split_edge_bb_loc (edge edge_in) |
| { |
| basic_block dest = edge_in->dest; |
| basic_block dest_prev = dest->prev_bb; |
| |
| if (dest_prev) |
| { |
| edge e = find_edge (dest_prev, dest); |
| if (e && !(e->flags & EDGE_COMPLEX)) |
| return edge_in->src; |
| } |
| return dest_prev; |
| } |
| |
| /* Split a (typically critical) edge EDGE_IN. Return the new block. |
| Abort on abnormal edges. */ |
| |
| static basic_block |
| gimple_split_edge (edge edge_in) |
| { |
| basic_block new_bb, after_bb, dest; |
| edge new_edge, e; |
| |
| /* Abnormal edges cannot be split. */ |
| gcc_assert (!(edge_in->flags & EDGE_ABNORMAL)); |
| |
| dest = edge_in->dest; |
| |
| after_bb = split_edge_bb_loc (edge_in); |
| |
| new_bb = create_empty_bb (after_bb); |
| new_bb->count = edge_in->count (); |
| |
| e = redirect_edge_and_branch (edge_in, new_bb); |
| gcc_assert (e == edge_in); |
| |
| new_edge = make_single_succ_edge (new_bb, dest, EDGE_FALLTHRU); |
| reinstall_phi_args (new_edge, e); |
| |
| return new_bb; |
| } |
| |
| |
| /* Verify properties of the address expression T whose base should be |
| TREE_ADDRESSABLE if VERIFY_ADDRESSABLE is true. */ |
| |
| static bool |
| verify_address (tree t, bool verify_addressable) |
| { |
| bool old_constant; |
| bool old_side_effects; |
| bool new_constant; |
| bool new_side_effects; |
| |
| old_constant = TREE_CONSTANT (t); |
| old_side_effects = TREE_SIDE_EFFECTS (t); |
| |
| recompute_tree_invariant_for_addr_expr (t); |
| new_side_effects = TREE_SIDE_EFFECTS (t); |
| new_constant = TREE_CONSTANT (t); |
| |
| if (old_constant != new_constant) |
| { |
| error ("constant not recomputed when %<ADDR_EXPR%> changed"); |
| return true; |
| } |
| if (old_side_effects != new_side_effects) |
| { |
| error ("side effects not recomputed when %<ADDR_EXPR%> changed"); |
| return true; |
| } |
| |
| tree base = TREE_OPERAND (t, 0); |
| while (handled_component_p (base)) |
| base = TREE_OPERAND (base, 0); |
| |
| if (!(VAR_P (base) |
| || TREE_CODE (base) == PARM_DECL |
| || TREE_CODE (base) == RESULT_DECL)) |
| return false; |
| |
| if (DECL_GIMPLE_REG_P (base)) |
| { |
| error ("%<DECL_GIMPLE_REG_P%> set on a variable with address taken"); |
| return true; |
| } |
| |
| if (verify_addressable && !TREE_ADDRESSABLE (base)) |
| { |
| error ("address taken but %<TREE_ADDRESSABLE%> bit not set"); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Verify if EXPR is either a GIMPLE ID or a GIMPLE indirect reference. |
| Returns true if there is an error, otherwise false. */ |
| |
| static bool |
| verify_types_in_gimple_min_lval (tree expr) |
| { |
| tree op; |
| |
| if (is_gimple_id (expr)) |
| return false; |
| |
| if (TREE_CODE (expr) != TARGET_MEM_REF |
| && TREE_CODE (expr) != MEM_REF) |
| { |
| error ("invalid expression for min lvalue"); |
| return true; |
| } |
| |
| /* TARGET_MEM_REFs are strange beasts. */ |
| if (TREE_CODE (expr) == TARGET_MEM_REF) |
| return false; |
| |
| op = TREE_OPERAND (expr, 0); |
| if (!is_gimple_val (op)) |
| { |
| error ("invalid operand in indirect reference"); |
| debug_generic_stmt (op); |
| return true; |
| } |
| /* Memory references now generally can involve a value conversion. */ |
| |
| return false; |
| } |
| |
| /* Verify if EXPR is a valid GIMPLE reference expression. If |
| REQUIRE_LVALUE is true verifies it is an lvalue. Returns true |
| if there is an error, otherwise false. */ |
| |
| static bool |
| verify_types_in_gimple_reference (tree expr, bool require_lvalue) |
| { |
| const char *code_name = get_tree_code_name (TREE_CODE (expr)); |
| |
| if (TREE_CODE (expr) == REALPART_EXPR |
| || TREE_CODE (expr) == IMAGPART_EXPR |
| || TREE_CODE (expr) == BIT_FIELD_REF) |
| { |
| tree op = TREE_OPERAND (expr, 0); |
| if (!is_gimple_reg_type (TREE_TYPE (expr))) |
| { |
| error ("non-scalar %qs", code_name); |
| return true; |
| } |
| |
| if (TREE_CODE (expr) == BIT_FIELD_REF) |
| { |
| tree t1 = TREE_OPERAND (expr, 1); |
| tree t2 = TREE_OPERAND (expr, 2); |
| poly_uint64 size, bitpos; |
| if (!poly_int_tree_p (t1, &size) |
| || !poly_int_tree_p (t2, &bitpos) |
| || !types_compatible_p (bitsizetype, TREE_TYPE (t1)) |
| || !types_compatible_p (bitsizetype, TREE_TYPE (t2))) |
| { |
| error ("invalid position or size operand to %qs", code_name); |
| return true; |
| } |
| if (INTEGRAL_TYPE_P (TREE_TYPE (expr)) |
| && maybe_ne (TYPE_PRECISION (TREE_TYPE (expr)), size)) |
| { |
| error ("integral result type precision does not match " |
| "field size of %qs", code_name); |
| return true; |
| } |
| else if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)) |
| && TYPE_MODE (TREE_TYPE (expr)) != BLKmode |
| && maybe_ne (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))), |
| size)) |
| { |
| error ("mode size of non-integral result does not " |
| "match field size of %qs", |
| code_name); |
| return true; |
| } |
| if (INTEGRAL_TYPE_P (TREE_TYPE (op)) |
| && !type_has_mode_precision_p (TREE_TYPE (op))) |
| { |
| error ("%qs of non-mode-precision operand", code_name); |
| return true; |
| } |
| if (!AGGREGATE_TYPE_P (TREE_TYPE (op)) |
| && maybe_gt (size + bitpos, |
| tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (op))))) |
| { |
| error ("position plus size exceeds size of referenced object in " |
| "%qs", code_name); |
| return true; |
| } |
| } |
| |
| if ((TREE_CODE (expr) == REALPART_EXPR |
| || TREE_CODE (expr) == IMAGPART_EXPR) |
| && !useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in %qs reference", code_name); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| expr = op; |
| } |
| |
| while (handled_component_p (expr)) |
| { |
| code_name = get_tree_code_name (TREE_CODE (expr)); |
| |
| if (TREE_CODE (expr) == REALPART_EXPR |
| || TREE_CODE (expr) == IMAGPART_EXPR |
| || TREE_CODE (expr) == BIT_FIELD_REF) |
| { |
| error ("non-top-level %qs", code_name); |
| return true; |
| } |
| |
| tree op = TREE_OPERAND (expr, 0); |
| |
| if (TREE_CODE (expr) == ARRAY_REF |
| || TREE_CODE (expr) == ARRAY_RANGE_REF) |
| { |
| if (!is_gimple_val (TREE_OPERAND (expr, 1)) |
| || (TREE_OPERAND (expr, 2) |
| && !is_gimple_val (TREE_OPERAND (expr, 2))) |
| || (TREE_OPERAND (expr, 3) |
| && !is_gimple_val (TREE_OPERAND (expr, 3)))) |
| { |
| error ("invalid operands to %qs", code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| } |
| |
| /* Verify if the reference array element types are compatible. */ |
| if (TREE_CODE (expr) == ARRAY_REF |
| && !useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in %qs", code_name); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| if (TREE_CODE (expr) == ARRAY_RANGE_REF |
| && !useless_type_conversion_p (TREE_TYPE (TREE_TYPE (expr)), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in %qs", code_name); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (expr))); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| |
| if (TREE_CODE (expr) == COMPONENT_REF) |
| { |
| if (TREE_OPERAND (expr, 2) |
| && !is_gimple_val (TREE_OPERAND (expr, 2))) |
| { |
| error ("invalid %qs offset operator", code_name); |
| return true; |
| } |
| if (!useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_OPERAND (expr, 1)))) |
| { |
| error ("type mismatch in %qs", code_name); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_OPERAND (expr, 1))); |
| return true; |
| } |
| } |
| |
| if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) |
| { |
| /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check |
| that their operand is not an SSA name or an invariant when |
| requiring an lvalue (this usually means there is a SRA or IPA-SRA |
| bug). Otherwise there is nothing to verify, gross mismatches at |
| most invoke undefined behavior. */ |
| if (require_lvalue |
| && (TREE_CODE (op) == SSA_NAME |
| || is_gimple_min_invariant (op))) |
| { |
| error ("conversion of %qs on the left hand side of %qs", |
| get_tree_code_name (TREE_CODE (op)), code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| else if (TREE_CODE (op) == SSA_NAME |
| && TYPE_SIZE (TREE_TYPE (expr)) != TYPE_SIZE (TREE_TYPE (op))) |
| { |
| error ("conversion of register to a different size in %qs", |
| code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| else if (!handled_component_p (op)) |
| return false; |
| } |
| |
| expr = op; |
| } |
| |
| code_name = get_tree_code_name (TREE_CODE (expr)); |
| |
| if (TREE_CODE (expr) == MEM_REF) |
| { |
| if (!is_gimple_mem_ref_addr (TREE_OPERAND (expr, 0)) |
| || (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR |
| && verify_address (TREE_OPERAND (expr, 0), false))) |
| { |
| error ("invalid address operand in %qs", code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| if (!poly_int_tree_p (TREE_OPERAND (expr, 1)) |
| || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))) |
| { |
| error ("invalid offset operand in %qs", code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| if (MR_DEPENDENCE_CLIQUE (expr) != 0 |
| && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique) |
| { |
| error ("invalid clique in %qs", code_name); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| } |
| else if (TREE_CODE (expr) == TARGET_MEM_REF) |
| { |
| if (!TMR_BASE (expr) |
| || !is_gimple_mem_ref_addr (TMR_BASE (expr)) |
| || (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR |
| && verify_address (TMR_BASE (expr), false))) |
| { |
| error ("invalid address operand in %qs", code_name); |
| return true; |
| } |
| if (!TMR_OFFSET (expr) |
| || !poly_int_tree_p (TMR_OFFSET (expr)) |
| || !POINTER_TYPE_P (TREE_TYPE (TMR_OFFSET (expr)))) |
| { |
| error ("invalid offset operand in %qs", code_name); |
| debug_generic_stmt (
|