| /* Control flow functions for trees. |
| Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
| Free Software Foundation, Inc. |
| Contributed by Diego Novillo <dnovillo@redhat.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "tree.h" |
| #include "rtl.h" |
| #include "tm_p.h" |
| #include "hard-reg-set.h" |
| #include "basic-block.h" |
| #include "output.h" |
| #include "flags.h" |
| #include "function.h" |
| #include "expr.h" |
| #include "ggc.h" |
| #include "langhooks.h" |
| #include "diagnostic.h" |
| #include "tree-flow.h" |
| #include "timevar.h" |
| #include "tree-dump.h" |
| #include "tree-pass.h" |
| #include "toplev.h" |
| #include "except.h" |
| #include "cfgloop.h" |
| #include "cfglayout.h" |
| #include "tree-ssa-propagate.h" |
| #include "value-prof.h" |
| #include "pointer-set.h" |
| #include "tree-inline.h" |
| |
| /* This file contains functions for building the Control Flow Graph (CFG) |
| for a function tree. */ |
| |
| /* Local declarations. */ |
| |
| /* Initial capacity for the basic block array. */ |
| static const int initial_cfg_capacity = 20; |
| |
| /* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs |
| which use a particular edge. The CASE_LABEL_EXPRs are chained together |
| via their TREE_CHAIN field, which we clear after we're done with the |
| hash table to prevent problems with duplication of GIMPLE_SWITCHes. |
| |
| Access to this list of CASE_LABEL_EXPRs allows us to efficiently |
| update the case vector in response to edge redirections. |
| |
| Right now this table is set up and torn down at key points in the |
| compilation process. It would be nice if we could make the table |
| more persistent. The key is getting notification of changes to |
| the CFG (particularly edge removal, creation and redirection). */ |
| |
| static struct pointer_map_t *edge_to_cases; |
| |
| /* CFG statistics. */ |
| struct cfg_stats_d |
| { |
| long num_merged_labels; |
| }; |
| |
| static struct cfg_stats_d cfg_stats; |
| |
| /* Nonzero if we found a computed goto while building basic blocks. */ |
| static bool found_computed_goto; |
| |
| /* Basic blocks and flowgraphs. */ |
| static void make_blocks (gimple_seq); |
| static void factor_computed_gotos (void); |
| |
| /* Edges. */ |
| static void make_edges (void); |
| static void make_cond_expr_edges (basic_block); |
| static void make_gimple_switch_edges (basic_block); |
| static void make_goto_expr_edges (basic_block); |
| static edge gimple_redirect_edge_and_branch (edge, basic_block); |
| static edge gimple_try_redirect_by_replacing_jump (edge, basic_block); |
| static unsigned int split_critical_edges (void); |
| |
| /* Various helpers. */ |
| static inline bool stmt_starts_bb_p (gimple, gimple); |
| static int gimple_verify_flow_info (void); |
| static void gimple_make_forwarder_block (edge); |
| static void gimple_cfg2vcg (FILE *); |
| |
| /* Flowgraph optimization and cleanup. */ |
| static void gimple_merge_blocks (basic_block, basic_block); |
| static bool gimple_can_merge_blocks_p (basic_block, basic_block); |
| static void remove_bb (basic_block); |
| static edge find_taken_edge_computed_goto (basic_block, tree); |
| static edge find_taken_edge_cond_expr (basic_block, tree); |
| static edge find_taken_edge_switch_expr (basic_block, tree); |
| static tree find_case_label_for_value (gimple, tree); |
| |
| void |
| init_empty_tree_cfg_for_function (struct function *fn) |
| { |
| /* Initialize the basic block array. */ |
| init_flow (fn); |
| profile_status_for_function (fn) = PROFILE_ABSENT; |
| n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS; |
| last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS; |
| basic_block_info_for_function (fn) |
| = VEC_alloc (basic_block, gc, initial_cfg_capacity); |
| VEC_safe_grow_cleared (basic_block, gc, |
| basic_block_info_for_function (fn), |
| initial_cfg_capacity); |
| |
| /* Build a mapping of labels to their associated blocks. */ |
| label_to_block_map_for_function (fn) |
| = VEC_alloc (basic_block, gc, initial_cfg_capacity); |
| VEC_safe_grow_cleared (basic_block, gc, |
| label_to_block_map_for_function (fn), |
| initial_cfg_capacity); |
| |
| SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK, |
| ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)); |
| SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK, |
| EXIT_BLOCK_PTR_FOR_FUNCTION (fn)); |
| |
| ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb |
| = EXIT_BLOCK_PTR_FOR_FUNCTION (fn); |
| EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->prev_bb |
| = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn); |
| } |
| |
| void |
| init_empty_tree_cfg (void) |
| { |
| init_empty_tree_cfg_for_function (cfun); |
| } |
| |
| /*--------------------------------------------------------------------------- |
| Create basic blocks |
| ---------------------------------------------------------------------------*/ |
| |
| /* Entry point to the CFG builder for trees. SEQ is the sequence of |
| statements to be added to the flowgraph. */ |
| |
| static void |
| build_gimple_cfg (gimple_seq seq) |
| { |
| /* Register specific gimple functions. */ |
| gimple_register_cfg_hooks (); |
| |
| memset ((void *) &cfg_stats, 0, sizeof (cfg_stats)); |
| |
| init_empty_tree_cfg (); |
| |
| found_computed_goto = 0; |
| make_blocks (seq); |
| |
| /* Computed gotos are hell to deal with, especially if there are |
| lots of them with a large number of destinations. So we factor |
| them to a common computed goto location before we build the |
| edge list. After we convert back to normal form, we will un-factor |
| the computed gotos since factoring introduces an unwanted jump. */ |
| if (found_computed_goto) |
| factor_computed_gotos (); |
| |
| /* Make sure there is always at least one block, even if it's empty. */ |
| if (n_basic_blocks == NUM_FIXED_BLOCKS) |
| create_empty_bb (ENTRY_BLOCK_PTR); |
| |
| /* Adjust the size of the array. */ |
| if (VEC_length (basic_block, basic_block_info) < (size_t) n_basic_blocks) |
| VEC_safe_grow_cleared (basic_block, gc, basic_block_info, n_basic_blocks); |
| |
| /* To speed up statement iterator walks, we first purge dead labels. */ |
| cleanup_dead_labels (); |
| |
| /* Group case nodes to reduce the number of edges. |
| We do this after cleaning up dead labels because otherwise we miss |
| a lot of obvious case merging opportunities. */ |
| group_case_labels (); |
| |
| /* Create the edges of the flowgraph. */ |
| make_edges (); |
| cleanup_dead_labels (); |
| |
| /* Debugging dumps. */ |
| |
| /* Write the flowgraph to a VCG file. */ |
| { |
| int local_dump_flags; |
| FILE *vcg_file = dump_begin (TDI_vcg, &local_dump_flags); |
| if (vcg_file) |
| { |
| gimple_cfg2vcg (vcg_file); |
| dump_end (TDI_vcg, vcg_file); |
| } |
| } |
| |
| #ifdef ENABLE_CHECKING |
| verify_stmts (); |
| #endif |
| } |
| |
| static unsigned int |
| execute_build_cfg (void) |
| { |
| gimple_seq body = gimple_body (current_function_decl); |
| |
| build_gimple_cfg (body); |
| gimple_set_body (current_function_decl, NULL); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Scope blocks:\n"); |
| dump_scope_blocks (dump_file, dump_flags); |
| } |
| return 0; |
| } |
| |
| struct gimple_opt_pass pass_build_cfg = |
| { |
| { |
| GIMPLE_PASS, |
| "cfg", /* name */ |
| NULL, /* gate */ |
| execute_build_cfg, /* execute */ |
| NULL, /* sub */ |
| NULL, /* next */ |
| 0, /* static_pass_number */ |
| TV_TREE_CFG, /* tv_id */ |
| PROP_gimple_leh, /* properties_required */ |
| PROP_cfg, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| TODO_verify_stmts | TODO_cleanup_cfg |
| | TODO_dump_func /* todo_flags_finish */ |
| } |
| }; |
| |
| |
| /* Return true if T is a computed goto. */ |
| |
| static bool |
| computed_goto_p (gimple t) |
| { |
| return (gimple_code (t) == GIMPLE_GOTO |
| && TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL); |
| } |
| |
| |
| /* Search the CFG for any computed gotos. If found, factor them to a |
| common computed goto site. Also record the location of that site so |
| that we can un-factor the gotos after we have converted back to |
| normal form. */ |
| |
| static void |
| factor_computed_gotos (void) |
| { |
| basic_block bb; |
| tree factored_label_decl = NULL; |
| tree var = NULL; |
| gimple factored_computed_goto_label = NULL; |
| gimple factored_computed_goto = NULL; |
| |
| /* We know there are one or more computed gotos in this function. |
| Examine the last statement in each basic block to see if the block |
| ends with a computed goto. */ |
| |
| FOR_EACH_BB (bb) |
| { |
| gimple_stmt_iterator gsi = gsi_last_bb (bb); |
| gimple last; |
| |
| if (gsi_end_p (gsi)) |
| continue; |
| |
| last = gsi_stmt (gsi); |
| |
| /* Ignore the computed goto we create when we factor the original |
| computed gotos. */ |
| if (last == factored_computed_goto) |
| continue; |
| |
| /* If the last statement is a computed goto, factor it. */ |
| if (computed_goto_p (last)) |
| { |
| gimple assignment; |
| |
| /* The first time we find a computed goto we need to create |
| the factored goto block and the variable each original |
| computed goto will use for their goto destination. */ |
| if (!factored_computed_goto) |
| { |
| basic_block new_bb = create_empty_bb (bb); |
| gimple_stmt_iterator new_gsi = gsi_start_bb (new_bb); |
| |
| /* Create the destination of the factored goto. Each original |
| computed goto will put its desired destination into this |
| variable and jump to the label we create immediately |
| below. */ |
| var = create_tmp_var (ptr_type_node, "gotovar"); |
| |
| /* Build a label for the new block which will contain the |
| factored computed goto. */ |
| factored_label_decl = create_artificial_label (); |
| factored_computed_goto_label |
| = gimple_build_label (factored_label_decl); |
| gsi_insert_after (&new_gsi, factored_computed_goto_label, |
| GSI_NEW_STMT); |
| |
| /* Build our new computed goto. */ |
| factored_computed_goto = gimple_build_goto (var); |
| gsi_insert_after (&new_gsi, factored_computed_goto, GSI_NEW_STMT); |
| } |
| |
| /* Copy the original computed goto's destination into VAR. */ |
| assignment = gimple_build_assign (var, gimple_goto_dest (last)); |
| gsi_insert_before (&gsi, assignment, GSI_SAME_STMT); |
| |
| /* And re-vector the computed goto to the new destination. */ |
| gimple_goto_set_dest (last, factored_label_decl); |
| } |
| } |
| } |
| |
| |
| /* Build a flowgraph for the sequence of stmts SEQ. */ |
| |
| static void |
| make_blocks (gimple_seq seq) |
| { |
| gimple_stmt_iterator i = gsi_start (seq); |
| gimple stmt = NULL; |
| bool start_new_block = true; |
| bool first_stmt_of_seq = true; |
| basic_block bb = ENTRY_BLOCK_PTR; |
| |
| while (!gsi_end_p (i)) |
| { |
| gimple prev_stmt; |
| |
| prev_stmt = stmt; |
| stmt = gsi_stmt (i); |
| |
| /* If the statement starts a new basic block or if we have determined |
| in a previous pass that we need to create a new block for STMT, do |
| so now. */ |
| if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt)) |
| { |
| if (!first_stmt_of_seq) |
| seq = gsi_split_seq_before (&i); |
| bb = create_basic_block (seq, NULL, bb); |
| start_new_block = false; |
| } |
| |
| /* Now add STMT to BB and create the subgraphs for special statement |
| codes. */ |
| gimple_set_bb (stmt, bb); |
| |
| if (computed_goto_p (stmt)) |
| found_computed_goto = true; |
| |
| /* If STMT is a basic block terminator, set START_NEW_BLOCK for the |
| next iteration. */ |
| if (stmt_ends_bb_p (stmt)) |
| start_new_block = true; |
| |
| gsi_next (&i); |
| first_stmt_of_seq = false; |
| } |
| } |
| |
| |
| /* Create and return a new empty basic block after bb AFTER. */ |
| |
| static basic_block |
| create_bb (void *h, void *e, basic_block after) |
| { |
| basic_block bb; |
| |
| gcc_assert (!e); |
| |
| /* Create and initialize a new basic block. Since alloc_block uses |
| ggc_alloc_cleared to allocate a basic block, we do not have to |
| clear the newly allocated basic block here. */ |
| bb = alloc_block (); |
| |
| bb->index = last_basic_block; |
| bb->flags = BB_NEW; |
| bb->il.gimple = GGC_CNEW (struct gimple_bb_info); |
| set_bb_seq (bb, h ? (gimple_seq) h : gimple_seq_alloc ()); |
| |
| /* Add the new block to the linked list of blocks. */ |
| link_block (bb, after); |
| |
| /* Grow the basic block array if needed. */ |
| if ((size_t) last_basic_block == VEC_length (basic_block, basic_block_info)) |
| { |
| size_t new_size = last_basic_block + (last_basic_block + 3) / 4; |
| VEC_safe_grow_cleared (basic_block, gc, basic_block_info, new_size); |
| } |
| |
| /* Add the newly created block to the array. */ |
| SET_BASIC_BLOCK (last_basic_block, bb); |
| |
| n_basic_blocks++; |
| last_basic_block++; |
| |
| return bb; |
| } |
| |
| |
| /*--------------------------------------------------------------------------- |
| Edge creation |
| ---------------------------------------------------------------------------*/ |
| |
| /* Fold COND_EXPR_COND of each COND_EXPR. */ |
| |
| void |
| fold_cond_expr_cond (void) |
| { |
| basic_block bb; |
| |
| FOR_EACH_BB (bb) |
| { |
| gimple stmt = last_stmt (bb); |
| |
| if (stmt && gimple_code (stmt) == GIMPLE_COND) |
| { |
| tree cond; |
| bool zerop, onep; |
| |
| fold_defer_overflow_warnings (); |
| cond = fold_binary (gimple_cond_code (stmt), boolean_type_node, |
| gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); |
| if (cond) |
| { |
| zerop = integer_zerop (cond); |
| onep = integer_onep (cond); |
| } |
| else |
| zerop = onep = false; |
| |
| fold_undefer_overflow_warnings (zerop || onep, |
| stmt, |
| WARN_STRICT_OVERFLOW_CONDITIONAL); |
| if (zerop) |
| gimple_cond_make_false (stmt); |
| else if (onep) |
| gimple_cond_make_true (stmt); |
| } |
| } |
| } |
| |
| /* Join all the blocks in the flowgraph. */ |
| |
| static void |
| make_edges (void) |
| { |
| basic_block bb; |
| struct omp_region *cur_region = NULL; |
| |
| /* Create an edge from entry to the first block with executable |
| statements in it. */ |
| make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU); |
| |
| /* Traverse the basic block array placing edges. */ |
| FOR_EACH_BB (bb) |
| { |
| gimple last = last_stmt (bb); |
| bool fallthru; |
| |
| if (last) |
| { |
| enum gimple_code code = gimple_code (last); |
| switch (code) |
| { |
| case GIMPLE_GOTO: |
| make_goto_expr_edges (bb); |
| fallthru = false; |
| break; |
| case GIMPLE_RETURN: |
| make_edge (bb, EXIT_BLOCK_PTR, 0); |
| fallthru = false; |
| break; |
| case GIMPLE_COND: |
| make_cond_expr_edges (bb); |
| fallthru = false; |
| break; |
| case GIMPLE_SWITCH: |
| make_gimple_switch_edges (bb); |
| fallthru = false; |
| break; |
| case GIMPLE_RESX: |
| make_eh_edges (last); |
| fallthru = false; |
| break; |
| |
| case GIMPLE_CALL: |
| /* If this function receives a nonlocal goto, then we need to |
| make edges from this call site to all the nonlocal goto |
| handlers. */ |
| if (stmt_can_make_abnormal_goto (last)) |
| make_abnormal_goto_edges (bb, true); |
| |
| /* If this statement has reachable exception handlers, then |
| create abnormal edges to them. */ |
| make_eh_edges (last); |
| |
| /* Some calls are known not to return. */ |
| fallthru = !(gimple_call_flags (last) & ECF_NORETURN); |
| break; |
| |
| case GIMPLE_ASSIGN: |
| /* A GIMPLE_ASSIGN may throw internally and thus be considered |
| control-altering. */ |
| if (is_ctrl_altering_stmt (last)) |
| { |
| make_eh_edges (last); |
| } |
| fallthru = true; |
| break; |
| |
| case GIMPLE_OMP_PARALLEL: |
| case GIMPLE_OMP_TASK: |
| case GIMPLE_OMP_FOR: |
| case GIMPLE_OMP_SINGLE: |
| case GIMPLE_OMP_MASTER: |
| case GIMPLE_OMP_ORDERED: |
| case GIMPLE_OMP_CRITICAL: |
| case GIMPLE_OMP_SECTION: |
| cur_region = new_omp_region (bb, code, cur_region); |
| fallthru = true; |
| break; |
| |
| case GIMPLE_OMP_SECTIONS: |
| cur_region = new_omp_region (bb, code, cur_region); |
| fallthru = true; |
| break; |
| |
| case GIMPLE_OMP_SECTIONS_SWITCH: |
| fallthru = false; |
| break; |
| |
| |
| case GIMPLE_OMP_ATOMIC_LOAD: |
| case GIMPLE_OMP_ATOMIC_STORE: |
| fallthru = true; |
| break; |
| |
| |
| case GIMPLE_OMP_RETURN: |
| /* In the case of a GIMPLE_OMP_SECTION, the edge will go |
| somewhere other than the next block. This will be |
| created later. */ |
| cur_region->exit = bb; |
| fallthru = cur_region->type != GIMPLE_OMP_SECTION; |
| cur_region = cur_region->outer; |
| break; |
| |
| case GIMPLE_OMP_CONTINUE: |
| cur_region->cont = bb; |
| switch (cur_region->type) |
| { |
| case GIMPLE_OMP_FOR: |
| /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE |
| succs edges as abnormal to prevent splitting |
| them. */ |
| single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL; |
| /* Make the loopback edge. */ |
| make_edge (bb, single_succ (cur_region->entry), |
| EDGE_ABNORMAL); |
| |
| /* Create an edge from GIMPLE_OMP_FOR to exit, which |
| corresponds to the case that the body of the loop |
| is not executed at all. */ |
| make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL); |
| make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL); |
| fallthru = false; |
| break; |
| |
| case GIMPLE_OMP_SECTIONS: |
| /* Wire up the edges into and out of the nested sections. */ |
| { |
| basic_block switch_bb = single_succ (cur_region->entry); |
| |
| struct omp_region *i; |
| for (i = cur_region->inner; i ; i = i->next) |
| { |
| gcc_assert (i->type == GIMPLE_OMP_SECTION); |
| make_edge (switch_bb, i->entry, 0); |
| make_edge (i->exit, bb, EDGE_FALLTHRU); |
| } |
| |
| /* Make the loopback edge to the block with |
| GIMPLE_OMP_SECTIONS_SWITCH. */ |
| make_edge (bb, switch_bb, 0); |
| |
| /* Make the edge from the switch to exit. */ |
| make_edge (switch_bb, bb->next_bb, 0); |
| fallthru = false; |
| } |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| break; |
| |
| default: |
| gcc_assert (!stmt_ends_bb_p (last)); |
| fallthru = true; |
| } |
| } |
| else |
| fallthru = true; |
| |
| if (fallthru) |
| make_edge (bb, bb->next_bb, EDGE_FALLTHRU); |
| } |
| |
| if (root_omp_region) |
| free_omp_regions (); |
| |
| /* Fold COND_EXPR_COND of each COND_EXPR. */ |
| fold_cond_expr_cond (); |
| } |
| |
| |
| /* Create the edges for a GIMPLE_COND starting at block BB. */ |
| |
| static void |
| make_cond_expr_edges (basic_block bb) |
| { |
| gimple entry = last_stmt (bb); |
| gimple then_stmt, else_stmt; |
| basic_block then_bb, else_bb; |
| tree then_label, else_label; |
| edge e; |
| |
| gcc_assert (entry); |
| gcc_assert (gimple_code (entry) == GIMPLE_COND); |
| |
| /* Entry basic blocks for each component. */ |
| then_label = gimple_cond_true_label (entry); |
| else_label = gimple_cond_false_label (entry); |
| then_bb = label_to_block (then_label); |
| else_bb = label_to_block (else_label); |
| then_stmt = first_stmt (then_bb); |
| else_stmt = first_stmt (else_bb); |
| |
| e = make_edge (bb, then_bb, EDGE_TRUE_VALUE); |
| e->goto_locus = gimple_location (then_stmt); |
| if (e->goto_locus) |
| e->goto_block = gimple_block (then_stmt); |
| e = make_edge (bb, else_bb, EDGE_FALSE_VALUE); |
| if (e) |
| { |
| e->goto_locus = gimple_location (else_stmt); |
| if (e->goto_locus) |
| e->goto_block = gimple_block (else_stmt); |
| } |
| |
| /* We do not need the labels anymore. */ |
| gimple_cond_set_true_label (entry, NULL_TREE); |
| gimple_cond_set_false_label (entry, NULL_TREE); |
| } |
| |
| |
| /* Called for each element in the hash table (P) as we delete the |
| edge to cases hash table. |
| |
| Clear all the TREE_CHAINs to prevent problems with copying of |
| SWITCH_EXPRs and structure sharing rules, then free the hash table |
| element. */ |
| |
| static bool |
| edge_to_cases_cleanup (const void *key ATTRIBUTE_UNUSED, void **value, |
| void *data ATTRIBUTE_UNUSED) |
| { |
| tree t, next; |
| |
| for (t = (tree) *value; t; t = next) |
| { |
| next = TREE_CHAIN (t); |
| TREE_CHAIN (t) = NULL; |
| } |
| |
| *value = NULL; |
| return false; |
| } |
| |
| /* Start recording information mapping edges to case labels. */ |
| |
| void |
| start_recording_case_labels (void) |
| { |
| gcc_assert (edge_to_cases == NULL); |
| edge_to_cases = pointer_map_create (); |
| } |
| |
| /* Return nonzero if we are recording information for case labels. */ |
| |
| static bool |
| recording_case_labels_p (void) |
| { |
| return (edge_to_cases != NULL); |
| } |
| |
| /* Stop recording information mapping edges to case labels and |
| remove any information we have recorded. */ |
| void |
| end_recording_case_labels (void) |
| { |
| pointer_map_traverse (edge_to_cases, edge_to_cases_cleanup, NULL); |
| pointer_map_destroy (edge_to_cases); |
| edge_to_cases = NULL; |
| } |
| |
| /* If we are inside a {start,end}_recording_cases block, then return |
| a chain of CASE_LABEL_EXPRs from T which reference E. |
| |
| Otherwise return NULL. */ |
| |
| static tree |
| get_cases_for_edge (edge e, gimple t) |
| { |
| void **slot; |
| size_t i, n; |
| |
| /* If we are not recording cases, then we do not have CASE_LABEL_EXPR |
| chains available. Return NULL so the caller can detect this case. */ |
| if (!recording_case_labels_p ()) |
| return NULL; |
| |
| slot = pointer_map_contains (edge_to_cases, e); |
| if (slot) |
| return (tree) *slot; |
| |
| /* If we did not find E in the hash table, then this must be the first |
| time we have been queried for information about E & T. Add all the |
| elements from T to the hash table then perform the query again. */ |
| |
| n = gimple_switch_num_labels (t); |
| for (i = 0; i < n; i++) |
| { |
| tree elt = gimple_switch_label (t, i); |
| tree lab = CASE_LABEL (elt); |
| basic_block label_bb = label_to_block (lab); |
| edge this_edge = find_edge (e->src, label_bb); |
| |
| /* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create |
| a new chain. */ |
| slot = pointer_map_insert (edge_to_cases, this_edge); |
| TREE_CHAIN (elt) = (tree) *slot; |
| *slot = elt; |
| } |
| |
| return (tree) *pointer_map_contains (edge_to_cases, e); |
| } |
| |
| /* Create the edges for a GIMPLE_SWITCH starting at block BB. */ |
| |
| static void |
| make_gimple_switch_edges (basic_block bb) |
| { |
| gimple entry = last_stmt (bb); |
| size_t i, n; |
| |
| n = gimple_switch_num_labels (entry); |
| |
| for (i = 0; i < n; ++i) |
| { |
| tree lab = CASE_LABEL (gimple_switch_label (entry, i)); |
| basic_block label_bb = label_to_block (lab); |
| make_edge (bb, label_bb, 0); |
| } |
| } |
| |
| |
| /* Return the basic block holding label DEST. */ |
| |
| basic_block |
| label_to_block_fn (struct function *ifun, tree dest) |
| { |
| int uid = LABEL_DECL_UID (dest); |
| |
| /* We would die hard when faced by an undefined label. Emit a label to |
| the very first basic block. This will hopefully make even the dataflow |
| and undefined variable warnings quite right. */ |
| if ((errorcount || sorrycount) && uid < 0) |
| { |
| gimple_stmt_iterator gsi = gsi_start_bb (BASIC_BLOCK (NUM_FIXED_BLOCKS)); |
| gimple stmt; |
| |
| stmt = gimple_build_label (dest); |
| gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); |
| uid = LABEL_DECL_UID (dest); |
| } |
| if (VEC_length (basic_block, ifun->cfg->x_label_to_block_map) |
| <= (unsigned int) uid) |
| return NULL; |
| return VEC_index (basic_block, ifun->cfg->x_label_to_block_map, uid); |
| } |
| |
| /* Create edges for an abnormal goto statement at block BB. If FOR_CALL |
| is true, the source statement is a CALL_EXPR instead of a GOTO_EXPR. */ |
| |
| void |
| make_abnormal_goto_edges (basic_block bb, bool for_call) |
| { |
| basic_block target_bb; |
| gimple_stmt_iterator gsi; |
| |
| FOR_EACH_BB (target_bb) |
| for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple label_stmt = gsi_stmt (gsi); |
| tree target; |
| |
| if (gimple_code (label_stmt) != GIMPLE_LABEL) |
| break; |
| |
| target = gimple_label_label (label_stmt); |
| |
| /* Make an edge to every label block that has been marked as a |
| potential target for a computed goto or a non-local goto. */ |
| if ((FORCED_LABEL (target) && !for_call) |
| || (DECL_NONLOCAL (target) && for_call)) |
| { |
| make_edge (bb, target_bb, EDGE_ABNORMAL); |
| break; |
| } |
| } |
| } |
| |
| /* Create edges for a goto statement at block BB. */ |
| |
| static void |
| make_goto_expr_edges (basic_block bb) |
| { |
| gimple_stmt_iterator last = gsi_last_bb (bb); |
| gimple goto_t = gsi_stmt (last); |
| |
| /* A simple GOTO creates normal edges. */ |
| if (simple_goto_p (goto_t)) |
| { |
| tree dest = gimple_goto_dest (goto_t); |
| edge e = make_edge (bb, label_to_block (dest), EDGE_FALLTHRU); |
| e->goto_locus = gimple_location (goto_t); |
| if (e->goto_locus) |
| e->goto_block = gimple_block (goto_t); |
| gsi_remove (&last, true); |
| return; |
| } |
| |
| /* A computed GOTO creates abnormal edges. */ |
| make_abnormal_goto_edges (bb, false); |
| } |
| |
| |
| /*--------------------------------------------------------------------------- |
| Flowgraph analysis |
| ---------------------------------------------------------------------------*/ |
| |
| /* Cleanup useless labels in basic blocks. This is something we wish |
| to do early because it allows us to group case labels before creating |
| the edges for the CFG, and it speeds up block statement iterators in |
| all passes later on. |
| We rerun this pass after CFG is created, to get rid of the labels that |
| are no longer referenced. After then we do not run it any more, since |
| (almost) no new labels should be created. */ |
| |
| /* A map from basic block index to the leading label of that block. */ |
| static struct label_record |
| { |
| /* The label. */ |
| tree label; |
| |
| /* True if the label is referenced from somewhere. */ |
| bool used; |
| } *label_for_bb; |
| |
| /* Callback for for_each_eh_region. Helper for cleanup_dead_labels. */ |
| static void |
| update_eh_label (struct eh_region *region) |
| { |
| tree old_label = get_eh_region_tree_label (region); |
| if (old_label) |
| { |
| tree new_label; |
| basic_block bb = label_to_block (old_label); |
| |
| /* ??? After optimizing, there may be EH regions with labels |
| that have already been removed from the function body, so |
| there is no basic block for them. */ |
| if (! bb) |
| return; |
| |
| new_label = label_for_bb[bb->index].label; |
| label_for_bb[bb->index].used = true; |
| set_eh_region_tree_label (region, new_label); |
| } |
| } |
| |
| |
| /* Given LABEL return the first label in the same basic block. */ |
| |
| static tree |
| main_block_label (tree label) |
| { |
| basic_block bb = label_to_block (label); |
| tree main_label = label_for_bb[bb->index].label; |
| |
| /* label_to_block possibly inserted undefined label into the chain. */ |
| if (!main_label) |
| { |
| label_for_bb[bb->index].label = label; |
| main_label = label; |
| } |
| |
| label_for_bb[bb->index].used = true; |
| return main_label; |
| } |
| |
| /* Cleanup redundant labels. This is a three-step process: |
| 1) Find the leading label for each block. |
| 2) Redirect all references to labels to the leading labels. |
| 3) Cleanup all useless labels. */ |
| |
| void |
| cleanup_dead_labels (void) |
| { |
| basic_block bb; |
| label_for_bb = XCNEWVEC (struct label_record, last_basic_block); |
| |
| /* Find a suitable label for each block. We use the first user-defined |
| label if there is one, or otherwise just the first label we see. */ |
| FOR_EACH_BB (bb) |
| { |
| gimple_stmt_iterator i; |
| |
| for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) |
| { |
| tree label; |
| gimple stmt = gsi_stmt (i); |
| |
| if (gimple_code (stmt) != GIMPLE_LABEL) |
| break; |
| |
| label = gimple_label_label (stmt); |
| |
| /* If we have not yet seen a label for the current block, |
| remember this one and see if there are more labels. */ |
| if (!label_for_bb[bb->index].label) |
| { |
| label_for_bb[bb->index].label = label; |
| continue; |
| } |
| |
| /* If we did see a label for the current block already, but it |
| is an artificially created label, replace it if the current |
| label is a user defined label. */ |
| if (!DECL_ARTIFICIAL (label) |
| && DECL_ARTIFICIAL (label_for_bb[bb->index].label)) |
| { |
| label_for_bb[bb->index].label = label; |
| break; |
| } |
| } |
| } |
| |
| /* Now redirect all jumps/branches to the selected label. |
| First do so for each block ending in a control statement. */ |
| FOR_EACH_BB (bb) |
| { |
| gimple stmt = last_stmt (bb); |
| if (!stmt) |
| continue; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_COND: |
| { |
| tree true_label = gimple_cond_true_label (stmt); |
| tree false_label = gimple_cond_false_label (stmt); |
| |
| if (true_label) |
| gimple_cond_set_true_label (stmt, main_block_label (true_label)); |
| if (false_label) |
| gimple_cond_set_false_label (stmt, main_block_label (false_label)); |
| break; |
| } |
| |
| case GIMPLE_SWITCH: |
| { |
| size_t i, n = gimple_switch_num_labels (stmt); |
| |
| /* Replace all destination labels. */ |
| for (i = 0; i < n; ++i) |
| { |
| tree case_label = gimple_switch_label (stmt, i); |
| tree label = main_block_label (CASE_LABEL (case_label)); |
| CASE_LABEL (case_label) = label; |
| } |
| break; |
| } |
| |
| /* We have to handle gotos until they're removed, and we don't |
| remove them until after we've created the CFG edges. */ |
| case GIMPLE_GOTO: |
| if (!computed_goto_p (stmt)) |
| { |
| tree new_dest = main_block_label (gimple_goto_dest (stmt)); |
| gimple_goto_set_dest (stmt, new_dest); |
| break; |
| } |
| |
| default: |
| break; |
| } |
| } |
| |
| for_each_eh_region (update_eh_label); |
| |
| /* Finally, purge dead labels. All user-defined labels and labels that |
| can be the target of non-local gotos and labels which have their |
| address taken are preserved. */ |
| FOR_EACH_BB (bb) |
| { |
| gimple_stmt_iterator i; |
| tree label_for_this_bb = label_for_bb[bb->index].label; |
| |
| if (!label_for_this_bb) |
| continue; |
| |
| /* If the main label of the block is unused, we may still remove it. */ |
| if (!label_for_bb[bb->index].used) |
| label_for_this_bb = NULL; |
| |
| for (i = gsi_start_bb (bb); !gsi_end_p (i); ) |
| { |
| tree label; |
| gimple stmt = gsi_stmt (i); |
| |
| if (gimple_code (stmt) != GIMPLE_LABEL) |
| break; |
| |
| label = gimple_label_label (stmt); |
| |
| if (label == label_for_this_bb |
| || !DECL_ARTIFICIAL (label) |
| || DECL_NONLOCAL (label) |
| || FORCED_LABEL (label)) |
| gsi_next (&i); |
| else |
| gsi_remove (&i, true); |
| } |
| } |
| |
| free (label_for_bb); |
| } |
| |
| /* Look for blocks ending in a multiway branch (a SWITCH_EXPR in GIMPLE), |
| and scan the sorted vector of cases. Combine the ones jumping to the |
| same label. |
| Eg. three separate entries 1: 2: 3: become one entry 1..3: */ |
| |
| void |
| group_case_labels (void) |
| { |
| basic_block bb; |
| |
| FOR_EACH_BB (bb) |
| { |
| gimple stmt = last_stmt (bb); |
| if (stmt && gimple_code (stmt) == GIMPLE_SWITCH) |
| { |
| int old_size = gimple_switch_num_labels (stmt); |
| int i, j, new_size = old_size; |
| tree default_case = NULL_TREE; |
| tree default_label = NULL_TREE; |
| bool has_default; |
| |
| /* The default label is always the first case in a switch |
| statement after gimplification if it was not optimized |
| away */ |
| if (!CASE_LOW (gimple_switch_default_label (stmt)) |
| && !CASE_HIGH (gimple_switch_default_label (stmt))) |
| { |
| default_case = gimple_switch_default_label (stmt); |
| default_label = CASE_LABEL (default_case); |
| has_default = true; |
| } |
| else |
| has_default = false; |
| |
| /* Look for possible opportunities to merge cases. */ |
| if (has_default) |
| i = 1; |
| else |
| i = 0; |
| while (i < old_size) |
| { |
| tree base_case, base_label, base_high; |
| base_case = gimple_switch_label (stmt, i); |
| |
| gcc_assert (base_case); |
| base_label = CASE_LABEL (base_case); |
| |
| /* Discard cases that have the same destination as the |
| default case. */ |
| if (base_label == default_label) |
| { |
| gimple_switch_set_label (stmt, i, NULL_TREE); |
| i++; |
| new_size--; |
| continue; |
| } |
| |
| base_high = CASE_HIGH (base_case) |
| ? CASE_HIGH (base_case) |
| : CASE_LOW (base_case); |
| i++; |
| |
| /* Try to merge case labels. Break out when we reach the end |
| of the label vector or when we cannot merge the next case |
| label with the current one. */ |
| while (i < old_size) |
| { |
| tree merge_case = gimple_switch_label (stmt, i); |
| tree merge_label = CASE_LABEL (merge_case); |
| tree t = int_const_binop (PLUS_EXPR, base_high, |
| integer_one_node, 1); |
| |
| /* Merge the cases if they jump to the same place, |
| and their ranges are consecutive. */ |
| if (merge_label == base_label |
| && tree_int_cst_equal (CASE_LOW (merge_case), t)) |
| { |
| base_high = CASE_HIGH (merge_case) ? |
| CASE_HIGH (merge_case) : CASE_LOW (merge_case); |
| CASE_HIGH (base_case) = base_high; |
| gimple_switch_set_label (stmt, i, NULL_TREE); |
| new_size--; |
| i++; |
| } |
| else |
| break; |
| } |
| } |
| |
| /* Compress the case labels in the label vector, and adjust the |
| length of the vector. */ |
| for (i = 0, j = 0; i < new_size; i++) |
| { |
| while (! gimple_switch_label (stmt, j)) |
| j++; |
| gimple_switch_set_label (stmt, i, |
| gimple_switch_label (stmt, j++)); |
| } |
| |
| gcc_assert (new_size <= old_size); |
| gimple_switch_set_num_labels (stmt, new_size); |
| } |
| } |
| } |
| |
| /* Checks whether we can merge block B into block A. */ |
| |
| static bool |
| gimple_can_merge_blocks_p (basic_block a, basic_block b) |
| { |
| gimple stmt; |
| gimple_stmt_iterator gsi; |
| gimple_seq phis; |
| |
| if (!single_succ_p (a)) |
| return false; |
| |
| if (single_succ_edge (a)->flags & EDGE_ABNORMAL) |
| return false; |
| |
| if (single_succ (a) != b) |
| return false; |
| |
| if (!single_pred_p (b)) |
| return false; |
| |
| if (b == EXIT_BLOCK_PTR) |
| return false; |
| |
| /* If A ends by a statement causing exceptions or something similar, we |
| cannot merge the blocks. */ |
| stmt = last_stmt (a); |
| if (stmt && stmt_ends_bb_p (stmt)) |
| return false; |
| |
| /* Do not allow a block with only a non-local label to be merged. */ |
| if (stmt |
| && gimple_code (stmt) == GIMPLE_LABEL |
| && DECL_NONLOCAL (gimple_label_label (stmt))) |
| return false; |
| |
| /* It must be possible to eliminate all phi nodes in B. If ssa form |
| is not up-to-date, we cannot eliminate any phis; however, if only |
| some symbols as whole are marked for renaming, this is not a problem, |
| as phi nodes for those symbols are irrelevant in updating anyway. */ |
| phis = phi_nodes (b); |
| if (!gimple_seq_empty_p (phis)) |
| { |
| gimple_stmt_iterator i; |
| |
| if (name_mappings_registered_p ()) |
| return false; |
| |
| for (i = gsi_start (phis); !gsi_end_p (i); gsi_next (&i)) |
| { |
| gimple phi = gsi_stmt (i); |
| |
| if (!is_gimple_reg (gimple_phi_result (phi)) |
| && !may_propagate_copy (gimple_phi_result (phi), |
| gimple_phi_arg_def (phi, 0))) |
| return false; |
| } |
| } |
| |
| /* Do not remove user labels. */ |
| for (gsi = gsi_start_bb (b); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) != GIMPLE_LABEL) |
| break; |
| if (!DECL_ARTIFICIAL (gimple_label_label (stmt))) |
| return false; |
| } |
| |
| /* Protect the loop latches. */ |
| if (current_loops |
| && b->loop_father->latch == b) |
| return false; |
| |
| return true; |
| } |
| |
| /* Replaces all uses of NAME by VAL. */ |
| |
| void |
| replace_uses_by (tree name, tree val) |
| { |
| imm_use_iterator imm_iter; |
| use_operand_p use; |
| gimple stmt; |
| edge e; |
| |
| FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name) |
| { |
| if (gimple_code (stmt) != GIMPLE_PHI) |
| push_stmt_changes (&stmt); |
| |
| FOR_EACH_IMM_USE_ON_STMT (use, imm_iter) |
| { |
| replace_exp (use, val); |
| |
| if (gimple_code (stmt) == GIMPLE_PHI) |
| { |
| e = gimple_phi_arg_edge (stmt, PHI_ARG_INDEX_FROM_USE (use)); |
| if (e->flags & EDGE_ABNORMAL) |
| { |
| /* This can only occur for virtual operands, since |
| for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
| would prevent replacement. */ |
| gcc_assert (!is_gimple_reg (name)); |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1; |
| } |
| } |
| } |
| |
| if (gimple_code (stmt) != GIMPLE_PHI) |
| { |
| size_t i; |
| |
| fold_stmt_inplace (stmt); |
| if (cfgcleanup_altered_bbs) |
| bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index); |
| |
| /* FIXME. This should go in pop_stmt_changes. */ |
| for (i = 0; i < gimple_num_ops (stmt); i++) |
| { |
| tree op = gimple_op (stmt, i); |
| /* Operands may be empty here. For example, the labels |
| of a GIMPLE_COND are nulled out following the creation |
| of the corresponding CFG edges. */ |
| if (op && TREE_CODE (op) == ADDR_EXPR) |
| recompute_tree_invariant_for_addr_expr (op); |
| } |
| |
| maybe_clean_or_replace_eh_stmt (stmt, stmt); |
| |
| pop_stmt_changes (&stmt); |
| } |
| } |
| |
| gcc_assert (has_zero_uses (name)); |
| |
| /* Also update the trees stored in loop structures. */ |
| if (current_loops) |
| { |
| struct loop *loop; |
| loop_iterator li; |
| |
| FOR_EACH_LOOP (li, loop, 0) |
| { |
| substitute_in_loop_info (loop, name, val); |
| } |
| } |
| } |
| |
| /* Merge block B into block A. */ |
| |
| static void |
| gimple_merge_blocks (basic_block a, basic_block b) |
| { |
| gimple_stmt_iterator last, gsi, psi; |
| gimple_seq phis = phi_nodes (b); |
| |
| if (dump_file) |
| fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index); |
| |
| /* Remove all single-valued PHI nodes from block B of the form |
| V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */ |
| gsi = gsi_last_bb (a); |
| for (psi = gsi_start (phis); !gsi_end_p (psi); ) |
| { |
| gimple phi = gsi_stmt (psi); |
| tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0); |
| gimple copy; |
| bool may_replace_uses = !is_gimple_reg (def) |
| || may_propagate_copy (def, use); |
| |
| /* In case we maintain loop closed ssa form, do not propagate arguments |
| of loop exit phi nodes. */ |
| if (current_loops |
| && loops_state_satisfies_p (LOOP_CLOSED_SSA) |
| && is_gimple_reg (def) |
| && TREE_CODE (use) == SSA_NAME |
| && a->loop_father != b->loop_father) |
| may_replace_uses = false; |
| |
| if (!may_replace_uses) |
| { |
| gcc_assert (is_gimple_reg (def)); |
| |
| /* Note that just emitting the copies is fine -- there is no problem |
| with ordering of phi nodes. This is because A is the single |
| predecessor of B, therefore results of the phi nodes cannot |
| appear as arguments of the phi nodes. */ |
| copy = gimple_build_assign (def, use); |
| gsi_insert_after (&gsi, copy, GSI_NEW_STMT); |
| remove_phi_node (&psi, false); |
| } |
| else |
| { |
| /* If we deal with a PHI for virtual operands, we can simply |
| propagate these without fussing with folding or updating |
| the stmt. */ |
| if (!is_gimple_reg (def)) |
| { |
| imm_use_iterator iter; |
| use_operand_p use_p; |
| gimple stmt; |
| |
| FOR_EACH_IMM_USE_STMT (stmt, iter, def) |
| FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
| SET_USE (use_p, use); |
| } |
| else |
| replace_uses_by (def, use); |
| |
| remove_phi_node (&psi, true); |
| } |
| } |
| |
| /* Ensure that B follows A. */ |
| move_block_after (b, a); |
| |
| gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU); |
| gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a))); |
| |
| /* Remove labels from B and set gimple_bb to A for other statements. */ |
| for (gsi = gsi_start_bb (b); !gsi_end_p (gsi);) |
| { |
| if (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) |
| { |
| gimple label = gsi_stmt (gsi); |
| |
| gsi_remove (&gsi, false); |
| |
| /* Now that we can thread computed gotos, we might have |
| a situation where we have a forced label in block B |
| However, the label at the start of block B might still be |
| used in other ways (think about the runtime checking for |
| Fortran assigned gotos). So we can not just delete the |
| label. Instead we move the label to the start of block A. */ |
| if (FORCED_LABEL (gimple_label_label (label))) |
| { |
| gimple_stmt_iterator dest_gsi = gsi_start_bb (a); |
| gsi_insert_before (&dest_gsi, label, GSI_NEW_STMT); |
| } |
| } |
| else |
| { |
| gimple_set_bb (gsi_stmt (gsi), a); |
| gsi_next (&gsi); |
| } |
| } |
| |
| /* Merge the sequences. */ |
| last = gsi_last_bb (a); |
| gsi_insert_seq_after (&last, bb_seq (b), GSI_NEW_STMT); |
| set_bb_seq (b, NULL); |
| |
| if (cfgcleanup_altered_bbs) |
| bitmap_set_bit (cfgcleanup_altered_bbs, a->index); |
| } |
| |
| |
| /* Return the one of two successors of BB that is not reachable by a |
| reached by a complex edge, if there is one. Else, return BB. We use |
| this in optimizations that use post-dominators for their heuristics, |
| to catch the cases in C++ where function calls are involved. */ |
| |
| basic_block |
| single_noncomplex_succ (basic_block bb) |
| { |
| edge e0, e1; |
| if (EDGE_COUNT (bb->succs) != 2) |
| return bb; |
| |
| e0 = EDGE_SUCC (bb, 0); |
| e1 = EDGE_SUCC (bb, 1); |
| if (e0->flags & EDGE_COMPLEX) |
| return e1->dest; |
| if (e1->flags & EDGE_COMPLEX) |
| return e0->dest; |
| |
| return bb; |
| } |
| |
| |
| /* Walk the function tree removing unnecessary statements. |
| |
| * Empty statement nodes are removed |
| |
| * Unnecessary TRY_FINALLY and TRY_CATCH blocks are removed |
| |
| * Unnecessary COND_EXPRs are removed |
| |
| * Some unnecessary BIND_EXPRs are removed |
| |
| * GOTO_EXPRs immediately preceding destination are removed. |
| |
| Clearly more work could be done. The trick is doing the analysis |
| and removal fast enough to be a net improvement in compile times. |
| |
| Note that when we remove a control structure such as a COND_EXPR |
| BIND_EXPR, or TRY block, we will need to repeat this optimization pass |
| to ensure we eliminate all the useless code. */ |
| |
| struct rus_data |
| { |
| bool repeat; |
| bool may_throw; |
| bool may_branch; |
| bool has_label; |
| bool last_was_goto; |
| gimple_stmt_iterator last_goto_gsi; |
| }; |
| |
| |
| static void remove_useless_stmts_1 (gimple_stmt_iterator *gsi, struct rus_data *); |
| |
| /* Given a statement sequence, find the first executable statement with |
| location information, and warn that it is unreachable. When searching, |
| descend into containers in execution order. */ |
| |
| static bool |
| remove_useless_stmts_warn_notreached (gimple_seq stmts) |
| { |
| gimple_stmt_iterator gsi; |
| |
| for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple stmt = gsi_stmt (gsi); |
| |
| if (gimple_has_location (stmt)) |
| { |
| location_t loc = gimple_location (stmt); |
| if (LOCATION_LINE (loc) > 0) |
| { |
| warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc); |
| return true; |
| } |
| } |
| |
| switch (gimple_code (stmt)) |
| { |
| /* Unfortunately, we need the CFG now to detect unreachable |
| branches in a conditional, so conditionals are not handled here. */ |
| |
| case GIMPLE_TRY: |
| if (remove_useless_stmts_warn_notreached (gimple_try_eval (stmt))) |
| return true; |
| if (remove_useless_stmts_warn_notreached (gimple_try_cleanup (stmt))) |
| return true; |
| break; |
| |
| case GIMPLE_CATCH: |
| return remove_useless_stmts_warn_notreached (gimple_catch_handler (stmt)); |
| |
| case GIMPLE_EH_FILTER: |
| return remove_useless_stmts_warn_notreached (gimple_eh_filter_failure (stmt)); |
| |
| case GIMPLE_BIND: |
| return remove_useless_stmts_warn_notreached (gimple_bind_body (stmt)); |
| |
| default: |
| break; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Helper for remove_useless_stmts_1. Handle GIMPLE_COND statements. */ |
| |
| static void |
| remove_useless_stmts_cond (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| gimple stmt = gsi_stmt (*gsi); |
| |
| /* The folded result must still be a conditional statement. */ |
| fold_stmt_inplace (stmt); |
| |
| data->may_branch = true; |
| |
| /* Replace trivial conditionals with gotos. */ |
| if (gimple_cond_true_p (stmt)) |
| { |
| /* Goto THEN label. */ |
| tree then_label = gimple_cond_true_label (stmt); |
| |
| gsi_replace (gsi, gimple_build_goto (then_label), false); |
| data->last_goto_gsi = *gsi; |
| data->last_was_goto = true; |
| data->repeat = true; |
| } |
| else if (gimple_cond_false_p (stmt)) |
| { |
| /* Goto ELSE label. */ |
| tree else_label = gimple_cond_false_label (stmt); |
| |
| gsi_replace (gsi, gimple_build_goto (else_label), false); |
| data->last_goto_gsi = *gsi; |
| data->last_was_goto = true; |
| data->repeat = true; |
| } |
| else |
| { |
| tree then_label = gimple_cond_true_label (stmt); |
| tree else_label = gimple_cond_false_label (stmt); |
| |
| if (then_label == else_label) |
| { |
| /* Goto common destination. */ |
| gsi_replace (gsi, gimple_build_goto (then_label), false); |
| data->last_goto_gsi = *gsi; |
| data->last_was_goto = true; |
| data->repeat = true; |
| } |
| } |
| |
| gsi_next (gsi); |
| |
| data->last_was_goto = false; |
| } |
| |
| /* Helper for remove_useless_stmts_1. |
| Handle the try-finally case for GIMPLE_TRY statements. */ |
| |
| static void |
| remove_useless_stmts_tf (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| bool save_may_branch, save_may_throw; |
| bool this_may_branch, this_may_throw; |
| |
| gimple_seq eval_seq, cleanup_seq; |
| gimple_stmt_iterator eval_gsi, cleanup_gsi; |
| |
| gimple stmt = gsi_stmt (*gsi); |
| |
| /* Collect may_branch and may_throw information for the body only. */ |
| save_may_branch = data->may_branch; |
| save_may_throw = data->may_throw; |
| data->may_branch = false; |
| data->may_throw = false; |
| data->last_was_goto = false; |
| |
| eval_seq = gimple_try_eval (stmt); |
| eval_gsi = gsi_start (eval_seq); |
| remove_useless_stmts_1 (&eval_gsi, data); |
| |
| this_may_branch = data->may_branch; |
| this_may_throw = data->may_throw; |
| data->may_branch |= save_may_branch; |
| data->may_throw |= save_may_throw; |
| data->last_was_goto = false; |
| |
| cleanup_seq = gimple_try_cleanup (stmt); |
| cleanup_gsi = gsi_start (cleanup_seq); |
| remove_useless_stmts_1 (&cleanup_gsi, data); |
| |
| /* If the body is empty, then we can emit the FINALLY block without |
| the enclosing TRY_FINALLY_EXPR. */ |
| if (gimple_seq_empty_p (eval_seq)) |
| { |
| gsi_insert_seq_before (gsi, cleanup_seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| data->repeat = true; |
| } |
| |
| /* If the handler is empty, then we can emit the TRY block without |
| the enclosing TRY_FINALLY_EXPR. */ |
| else if (gimple_seq_empty_p (cleanup_seq)) |
| { |
| gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| data->repeat = true; |
| } |
| |
| /* If the body neither throws, nor branches, then we can safely |
| string the TRY and FINALLY blocks together. */ |
| else if (!this_may_branch && !this_may_throw) |
| { |
| gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT); |
| gsi_insert_seq_before (gsi, cleanup_seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| data->repeat = true; |
| } |
| else |
| gsi_next (gsi); |
| } |
| |
| /* Helper for remove_useless_stmts_1. |
| Handle the try-catch case for GIMPLE_TRY statements. */ |
| |
| static void |
| remove_useless_stmts_tc (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| bool save_may_throw, this_may_throw; |
| |
| gimple_seq eval_seq, cleanup_seq, handler_seq, failure_seq; |
| gimple_stmt_iterator eval_gsi, cleanup_gsi, handler_gsi, failure_gsi; |
| |
| gimple stmt = gsi_stmt (*gsi); |
| |
| /* Collect may_throw information for the body only. */ |
| save_may_throw = data->may_throw; |
| data->may_throw = false; |
| data->last_was_goto = false; |
| |
| eval_seq = gimple_try_eval (stmt); |
| eval_gsi = gsi_start (eval_seq); |
| remove_useless_stmts_1 (&eval_gsi, data); |
| |
| this_may_throw = data->may_throw; |
| data->may_throw = save_may_throw; |
| |
| cleanup_seq = gimple_try_cleanup (stmt); |
| |
| /* If the body cannot throw, then we can drop the entire TRY_CATCH_EXPR. */ |
| if (!this_may_throw) |
| { |
| if (warn_notreached) |
| { |
| remove_useless_stmts_warn_notreached (cleanup_seq); |
| } |
| gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| data->repeat = true; |
| return; |
| } |
| |
| /* Process the catch clause specially. We may be able to tell that |
| no exceptions propagate past this point. */ |
| |
| this_may_throw = true; |
| cleanup_gsi = gsi_start (cleanup_seq); |
| stmt = gsi_stmt (cleanup_gsi); |
| data->last_was_goto = false; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_CATCH: |
| /* If the first element is a catch, they all must be. */ |
| while (!gsi_end_p (cleanup_gsi)) |
| { |
| stmt = gsi_stmt (cleanup_gsi); |
| /* If we catch all exceptions, then the body does not |
| propagate exceptions past this point. */ |
| if (gimple_catch_types (stmt) == NULL) |
| this_may_throw = false; |
| data->last_was_goto = false; |
| handler_seq = gimple_catch_handler (stmt); |
| handler_gsi = gsi_start (handler_seq); |
| remove_useless_stmts_1 (&handler_gsi, data); |
| gsi_next (&cleanup_gsi); |
| } |
| gsi_next (gsi); |
| break; |
| |
| case GIMPLE_EH_FILTER: |
| /* If the first element is an eh_filter, it should stand alone. */ |
| if (gimple_eh_filter_must_not_throw (stmt)) |
| this_may_throw = false; |
| else if (gimple_eh_filter_types (stmt) == NULL) |
| this_may_throw = false; |
| failure_seq = gimple_eh_filter_failure (stmt); |
| failure_gsi = gsi_start (failure_seq); |
| remove_useless_stmts_1 (&failure_gsi, data); |
| gsi_next (gsi); |
| break; |
| |
| default: |
| /* Otherwise this is a list of cleanup statements. */ |
| remove_useless_stmts_1 (&cleanup_gsi, data); |
| |
| /* If the cleanup is empty, then we can emit the TRY block without |
| the enclosing TRY_CATCH_EXPR. */ |
| if (gimple_seq_empty_p (cleanup_seq)) |
| { |
| gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT); |
| gsi_remove(gsi, false); |
| data->repeat = true; |
| } |
| else |
| gsi_next (gsi); |
| break; |
| } |
| |
| data->may_throw |= this_may_throw; |
| } |
| |
| /* Helper for remove_useless_stmts_1. Handle GIMPLE_BIND statements. */ |
| |
| static void |
| remove_useless_stmts_bind (gimple_stmt_iterator *gsi, struct rus_data *data ATTRIBUTE_UNUSED) |
| { |
| tree block; |
| gimple_seq body_seq, fn_body_seq; |
| gimple_stmt_iterator body_gsi; |
| |
| gimple stmt = gsi_stmt (*gsi); |
| |
| /* First remove anything underneath the BIND_EXPR. */ |
| |
| body_seq = gimple_bind_body (stmt); |
| body_gsi = gsi_start (body_seq); |
| remove_useless_stmts_1 (&body_gsi, data); |
| |
| /* If the GIMPLE_BIND has no variables, then we can pull everything |
| up one level and remove the GIMPLE_BIND, unless this is the toplevel |
| GIMPLE_BIND for the current function or an inlined function. |
| |
| When this situation occurs we will want to apply this |
| optimization again. */ |
| block = gimple_bind_block (stmt); |
| fn_body_seq = gimple_body (current_function_decl); |
| if (gimple_bind_vars (stmt) == NULL_TREE |
| && (gimple_seq_empty_p (fn_body_seq) |
| || stmt != gimple_seq_first_stmt (fn_body_seq)) |
| && (! block |
| || ! BLOCK_ABSTRACT_ORIGIN (block) |
| || (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (block)) |
| != FUNCTION_DECL))) |
| { |
| tree var = NULL_TREE; |
| /* Even if there are no gimple_bind_vars, there might be other |
| decls in BLOCK_VARS rendering the GIMPLE_BIND not useless. */ |
| if (block && !BLOCK_NUM_NONLOCALIZED_VARS (block)) |
| for (var = BLOCK_VARS (block); var; var = TREE_CHAIN (var)) |
| if (TREE_CODE (var) == IMPORTED_DECL) |
| break; |
| if (var || (block && BLOCK_NUM_NONLOCALIZED_VARS (block))) |
| gsi_next (gsi); |
| else |
| { |
| gsi_insert_seq_before (gsi, body_seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| data->repeat = true; |
| } |
| } |
| else |
| gsi_next (gsi); |
| } |
| |
| /* Helper for remove_useless_stmts_1. Handle GIMPLE_GOTO statements. */ |
| |
| static void |
| remove_useless_stmts_goto (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| gimple stmt = gsi_stmt (*gsi); |
| |
| tree dest = gimple_goto_dest (stmt); |
| |
| data->may_branch = true; |
| data->last_was_goto = false; |
| |
| /* Record iterator for last goto expr, so that we can delete it if unnecessary. */ |
| if (TREE_CODE (dest) == LABEL_DECL) |
| { |
| data->last_goto_gsi = *gsi; |
| data->last_was_goto = true; |
| } |
| |
| gsi_next(gsi); |
| } |
| |
| /* Helper for remove_useless_stmts_1. Handle GIMPLE_LABEL statements. */ |
| |
| static void |
| remove_useless_stmts_label (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| gimple stmt = gsi_stmt (*gsi); |
| |
| tree label = gimple_label_label (stmt); |
| |
| data->has_label = true; |
| |
| /* We do want to jump across non-local label receiver code. */ |
| if (DECL_NONLOCAL (label)) |
| data->last_was_goto = false; |
| |
| else if (data->last_was_goto |
| && gimple_goto_dest (gsi_stmt (data->last_goto_gsi)) == label) |
| { |
| /* Replace the preceding GIMPLE_GOTO statement with |
| a GIMPLE_NOP, which will be subsequently removed. |
| In this way, we avoid invalidating other iterators |
| active on the statement sequence. */ |
| gsi_replace(&data->last_goto_gsi, gimple_build_nop(), false); |
| data->last_was_goto = false; |
| data->repeat = true; |
| } |
| |
| /* ??? Add something here to delete unused labels. */ |
| |
| gsi_next (gsi); |
| } |
| |
| |
| /* T is CALL_EXPR. Set current_function_calls_* flags. */ |
| |
| void |
| notice_special_calls (gimple call) |
| { |
| int flags = gimple_call_flags (call); |
| |
| if (flags & ECF_MAY_BE_ALLOCA) |
| cfun->calls_alloca = true; |
| if (flags & ECF_RETURNS_TWICE) |
| cfun->calls_setjmp = true; |
| } |
| |
| |
| /* Clear flags set by notice_special_calls. Used by dead code removal |
| to update the flags. */ |
| |
| void |
| clear_special_calls (void) |
| { |
| cfun->calls_alloca = false; |
| cfun->calls_setjmp = false; |
| } |
| |
| /* Remove useless statements from a statement sequence, and perform |
| some preliminary simplifications. */ |
| |
| static void |
| remove_useless_stmts_1 (gimple_stmt_iterator *gsi, struct rus_data *data) |
| { |
| while (!gsi_end_p (*gsi)) |
| { |
| gimple stmt = gsi_stmt (*gsi); |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_COND: |
| remove_useless_stmts_cond (gsi, data); |
| break; |
| |
| case GIMPLE_GOTO: |
| remove_useless_stmts_goto (gsi, data); |
| break; |
| |
| case GIMPLE_LABEL: |
| remove_useless_stmts_label (gsi, data); |
| break; |
| |
| case GIMPLE_ASSIGN: |
| fold_stmt (gsi); |
| stmt = gsi_stmt (*gsi); |
| data->last_was_goto = false; |
| if (stmt_could_throw_p (stmt)) |
| data->may_throw = true; |
| gsi_next (gsi); |
| break; |
| |
| case GIMPLE_ASM: |
| fold_stmt (gsi); |
| data->last_was_goto = false; |
| gsi_next (gsi); |
| break; |
| |
| case GIMPLE_CALL: |
| fold_stmt (gsi); |
| stmt = gsi_stmt (*gsi); |
| data->last_was_goto = false; |
| if (is_gimple_call (stmt)) |
| notice_special_calls (stmt); |
| |
| /* We used to call update_gimple_call_flags here, |
| which copied side-effects and nothrows status |
| from the function decl to the call. In the new |
| tuplified GIMPLE, the accessors for this information |
| always consult the function decl, so this copying |
| is no longer necessary. */ |
| if (stmt_could_throw_p (stmt)) |
| data->may_throw = true; |
| gsi_next (gsi); |
| break; |
| |
| case GIMPLE_RETURN: |
| fold_stmt (gsi); |
| data->last_was_goto = false; |
| data->may_branch = true; |
| gsi_next (gsi); |
| break; |
| |
| case GIMPLE_BIND: |
| remove_useless_stmts_bind (gsi, data); |
| break; |
| |
| case GIMPLE_TRY: |
| if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH) |
| remove_useless_stmts_tc (gsi, data); |
| else if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) |
| remove_useless_stmts_tf (gsi, data); |
| else |
| gcc_unreachable (); |
| break; |
| |
| case GIMPLE_CATCH: |
| gcc_unreachable (); |
| break; |
| |
| case GIMPLE_NOP: |
| gsi_remove (gsi, false); |
| break; |
| |
| case GIMPLE_OMP_FOR: |
| { |
| gimple_seq pre_body_seq = gimple_omp_for_pre_body (stmt); |
| gimple_stmt_iterator pre_body_gsi = gsi_start (pre_body_seq); |
| |
| remove_useless_stmts_1 (&pre_body_gsi, data); |
| data->last_was_goto = false; |
| } |
| /* FALLTHROUGH */ |
| case GIMPLE_OMP_CRITICAL: |
| case GIMPLE_OMP_CONTINUE: |
| case GIMPLE_OMP_MASTER: |
| case GIMPLE_OMP_ORDERED: |
| case GIMPLE_OMP_SECTION: |
| case GIMPLE_OMP_SECTIONS: |
| case GIMPLE_OMP_SINGLE: |
| { |
| gimple_seq body_seq = gimple_omp_body (stmt); |
| gimple_stmt_iterator body_gsi = gsi_start (body_seq); |
| |
| remove_useless_stmts_1 (&body_gsi, data); |
| data->last_was_goto = false; |
| gsi_next (gsi); |
| } |
| break; |
| |
| case GIMPLE_OMP_PARALLEL: |
| case GIMPLE_OMP_TASK: |
| { |
| /* Make sure the outermost GIMPLE_BIND isn't removed |
| as useless. */ |
| gimple_seq body_seq = gimple_omp_body (stmt); |
| gimple bind = gimple_seq_first_stmt (body_seq); |
| gimple_seq bind_seq = gimple_bind_body (bind); |
| gimple_stmt_iterator bind_gsi = gsi_start (bind_seq); |
| |
| remove_useless_stmts_1 (&bind_gsi, data); |
| data->last_was_goto = false; |
| gsi_next (gsi); |
| } |
| break; |
| |
| case GIMPLE_CHANGE_DYNAMIC_TYPE: |
| /* If we do not optimize remove GIMPLE_CHANGE_DYNAMIC_TYPE as |
| expansion is confused about them and we only remove them |
| during alias computation otherwise. */ |
| if (!optimize) |
| { |
| data->last_was_goto = false; |
| gsi_remove (gsi, false); |
| break; |
| } |
| /* Fallthru. */ |
| |
| default: |
| data->last_was_goto = false; |
| gsi_next (gsi); |
| break; |
| } |
| } |
| } |
| |
| /* Walk the function tree, removing useless statements and performing |
| some preliminary simplifications. */ |
| |
| static unsigned int |
| remove_useless_stmts (void) |
| { |
| struct rus_data data; |
| |
| clear_special_calls (); |
| |
| do |
| { |
| gimple_stmt_iterator gsi; |
| |
| gsi = gsi_start (gimple_body (current_function_decl)); |
| memset (&data, 0, sizeof (data)); |
| remove_useless_stmts_1 (&gsi, &data); |
| } |
| while (data.repeat); |
| return 0; |
| } |
| |
| |
| struct gimple_opt_pass pass_remove_useless_stmts = |
| { |
| { |
| GIMPLE_PASS, |
| "useless", /* name */ |
| NULL, /* gate */ |
| remove_useless_stmts, /* execute */ |
| NULL, /* sub */ |
| NULL, /* next */ |
| 0, /* static_pass_number */ |
| 0, /* tv_id */ |
| PROP_gimple_any, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| TODO_dump_func /* todo_flags_finish */ |
| } |
| }; |
| |
| /* Remove PHI nodes associated with basic block BB and all edges out of BB. */ |
| |
| static void |
| remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb) |
| { |
| /* Since this block is no longer reachable, we can just delete all |
| of its PHI nodes. */ |
| remove_phi_nodes (bb); |
| |
| /* Remove edges to BB's successors. */ |
| while (EDGE_COUNT (bb->succs) > 0) |
| remove_edge (EDGE_SUCC (bb, 0)); |
| } |
| |
| |
| /* Remove statements of basic block BB. */ |
| |
| static void |
| remove_bb (basic_block bb) |
| { |
| gimple_stmt_iterator i; |
| source_location loc = UNKNOWN_LOCATION; |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "Removing basic block %d\n", bb->index); |
| if (dump_flags & TDF_DETAILS) |
| { |
| dump_bb (bb, dump_file, 0); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| |
| if (current_loops) |
| { |
| struct loop *loop = bb->loop_father; |
| |
| /* If a loop gets removed, clean up the information associated |
| with it. */ |
| if (loop->latch == bb |
| || loop->header == bb) |
| free_numbers_of_iterations_estimates_loop (loop); |
| } |
| |
| /* Remove all the instructions in the block. */ |
| if (bb_seq (bb) != NULL) |
| { |
| for (i = gsi_start_bb (bb); !gsi_end_p (i);) |
| { |
| gimple stmt = gsi_stmt (i); |
| if (gimple_code (stmt) == GIMPLE_LABEL |
| && (FORCED_LABEL (gimple_label_label (stmt)) |
| || DECL_NONLOCAL (gimple_label_label (stmt)))) |
| { |
| basic_block new_bb; |
| gimple_stmt_iterator new_gsi; |
| |
| /* A non-reachable non-local label may still be referenced. |
| But it no longer needs to carry the extra semantics of |
| non-locality. */ |
| if (DECL_NONLOCAL (gimple_label_label (stmt))) |
| { |
| DECL_NONLOCAL (gimple_label_label (stmt)) = 0; |
| FORCED_LABEL (gimple_label_label (stmt)) = 1; |
| } |
| |
| new_bb = bb->prev_bb; |
| new_gsi = gsi_start_bb (new_bb); |
| gsi_remove (&i, false); |
| gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT); |
| } |
| else |
| { |
| /* Release SSA definitions if we are in SSA. Note that we |
| may be called when not in SSA. For example, |
| final_cleanup calls this function via |
| cleanup_tree_cfg. */ |
| if (gimple_in_ssa_p (cfun)) |
| release_defs (stmt); |
| |
| gsi_remove (&i, true); |
| } |
| |
| /* Don't warn for removed gotos. Gotos are often removed due to |
| jump threading, thus resulting in bogus warnings. Not great, |
| since this way we lose warnings for gotos in the original |
| program that are indeed unreachable. */ |
| if (gimple_code (stmt) != GIMPLE_GOTO |
| && gimple_has_location (stmt) |
| && !loc) |
| loc = gimple_location (stmt); |
| } |
| } |
| |
| /* If requested, give a warning that the first statement in the |
| block is unreachable. We walk statements backwards in the |
| loop above, so the last statement we process is the first statement |
| in the block. */ |
| if (loc > BUILTINS_LOCATION && LOCATION_LINE (loc) > 0) |
| warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc); |
| |
| remove_phi_nodes_and_edges_for_unreachable_block (bb); |
| bb->il.gimple = NULL; |
| } |
| |
| |
| /* Given a basic block BB ending with COND_EXPR or SWITCH_EXPR, and a |
| predicate VAL, return the edge that will be taken out of the block. |
| If VAL does not match a unique edge, NULL is returned. */ |
| |
| edge |
| find_taken_edge (basic_block bb, tree val) |
| { |
| gimple stmt; |
| |
| stmt = last_stmt (bb); |
| |
| gcc_assert (stmt); |
| gcc_assert (is_ctrl_stmt (stmt)); |
| |
| if (val == NULL) |
| return NULL; |
| |
| if (!is_gimple_min_invariant (val)) |
| return NULL; |
| |
| if (gimple_code (stmt) == GIMPLE_COND) |
| return find_taken_edge_cond_expr (bb, val); |
| |
| if (gimple_code (stmt) == GIMPLE_SWITCH) |
| return find_taken_edge_switch_expr (bb, val); |
| |
| if (computed_goto_p (stmt)) |
| { |
| /* Only optimize if the argument is a label, if the argument is |
| not a label then we can not construct a proper CFG. |
| |
| It may be the case that we only need to allow the LABEL_REF to |
| appear inside an ADDR_EXPR, but we also allow the LABEL_REF to |
| appear inside a LABEL_EXPR just to be safe. */ |
| if ((TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR) |
| && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL) |
| return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0)); |
| return NULL; |
| } |
| |
| gcc_unreachable (); |
| } |
| |
| /* Given a constant value VAL and the entry block BB to a GOTO_EXPR |
| statement, determine which of the outgoing edges will be taken out of the |
| block. Return NULL if either edge may be taken. */ |
| |
| static edge |
| find_taken_edge_computed_goto (basic_block bb, tree val) |
| { |
| basic_block dest; |
| edge e = NULL; |
| |
| dest = label_to_block (val); |
| if (dest) |
| { |
| e = find_edge (bb, dest); |
| gcc_assert (e != NULL); |
| } |
| |
| return e; |
| } |
| |
| /* Given a constant value VAL and the entry block BB to a COND_EXPR |
| statement, determine which of the two edges will be taken out of the |
| block. Return NULL if either edge may be taken. */ |
| |
| static edge |
| find_taken_edge_cond_expr (basic_block bb, tree val) |
| { |
| edge true_edge, false_edge; |
| |
| extract_true_false_edges_from_block (bb, &true_edge, &false_edge); |
| |
| gcc_assert (TREE_CODE (val) == INTEGER_CST); |
| return (integer_zerop (val) ? false_edge : true_edge); |
| } |
| |
| /* Given an INTEGER_CST VAL and the entry block BB to a SWITCH_EXPR |
| statement, determine which edge will be taken out of the block. Return |
| NULL if any edge may be taken. */ |
| |
| static edge |
| find_taken_edge_switch_expr (basic_block bb, tree val) |
| { |
| basic_block dest_bb; |
| edge e; |
| gimple switch_stmt; |
| tree taken_case; |
| |
| switch_stmt = last_stmt (bb); |
| taken_case = find_case_label_for_value (switch_stmt, val); |
| dest_bb = label_to_block (CASE_LABEL (taken_case)); |
| |
| e = find_edge (bb, dest_bb); |
| gcc_assert (e); |
| return e; |
| } |
| |
| |
| /* Return the CASE_LABEL_EXPR that SWITCH_STMT will take for VAL. |
| We can make optimal use here of the fact that the case labels are |
| sorted: We can do a binary search for a case matching VAL. */ |
| |
| static tree |
| find_case_label_for_value (gimple switch_stmt, tree val) |
| { |
| size_t low, high, n = gimple_switch_num_labels (switch_stmt); |
| tree default_case = gimple_switch_default_label (switch_stmt); |
| |
| for (low = 0, high = n; high - low > 1; ) |
| { |
| size_t i = (high + low) / 2; |
| tree t = gimple_switch_label (switch_stmt, i); |
| int cmp; |
| |
| /* Cache the result of comparing CASE_LOW and val. */ |
| cmp = tree_int_cst_compare (CASE_LOW (t), val); |
| |
| if (cmp > 0) |
| high = i; |
| else |
| low = i; |
| |
| if (CASE_HIGH (t) == NULL) |
| { |
| /* A singe-valued case label. */ |
| if (cmp == 0) |
| return t; |
| } |
| else |
| { |
| /* A case range. We can only handle integer ranges. */ |
| if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) |
| return t; |
| } |
| } |
| |
| return default_case; |
| } |
| |
| |
| /* Dump a basic block on stderr. */ |
| |
| void |
| gimple_debug_bb (basic_block bb) |
| { |
| gimple_dump_bb (bb, stderr, 0, TDF_VOPS|TDF_MEMSYMS); |
| } |
| |
| |
| /* Dump basic block with index N on stderr. */ |
| |
| basic_block |
| gimple_debug_bb_n (int n) |
| { |
| gimple_debug_bb (BASIC_BLOCK (n)); |
| return BASIC_BLOCK (n); |
| } |
| |
| |
| /* Dump the CFG on stderr. |
| |
| FLAGS are the same used by the tree dumping functions |
| (see TDF_* in tree-pass.h). */ |
| |
| void |
| gimple_debug_cfg (int flags) |
| { |
| gimple_dump_cfg (stderr, flags); |
| } |
| |
| |
| /* Dump the program showing basic block boundaries on the given FILE. |
| |
| FLAGS are the same used by the tree dumping functions (see TDF_* in |
| tree.h). */ |
| |
| void |
| gimple_dump_cfg (FILE *file, int flags) |
| { |
| if (flags & TDF_DETAILS) |
| { |
| const char *funcname |
| = lang_hooks.decl_printable_name (current_function_decl, 2); |
| |
| fputc ('\n', file); |
| fprintf (file, ";; Function %s\n\n", funcname); |
| fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n", |
| n_basic_blocks, n_edges, last_basic_block); |
| |
| brief_dump_cfg (file); |
| fprintf (file, "\n"); |
| } |
| |
| if (flags & TDF_STATS) |
| dump_cfg_stats (file); |
| |
| dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS); |
| } |
| |
| |
| /* Dump CFG statistics on FILE. */ |
| |
| void |
| dump_cfg_stats (FILE *file) |
| { |
| static long max_num_merged_labels = 0; |
| unsigned long size, total = 0; |
| long num_edges; |
| basic_block bb; |
| const char * const fmt_str = "%-30s%-13s%12s\n"; |
| const char * const fmt_str_1 = "%-30s%13d%11lu%c\n"; |
| const char * const fmt_str_2 = "%-30s%13ld%11lu%c\n"; |
| const char * const fmt_str_3 = "%-43s%11lu%c\n"; |
| const char *funcname |
| = lang_hooks.decl_printable_name (current_function_decl, 2); |
| |
| |
| fprintf (file, "\nCFG Statistics for %s\n\n", funcname); |
| |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, fmt_str, "", " Number of ", "Memory"); |
| fprintf (file, fmt_str, "", " instances ", "used "); |
| fprintf (file, "---------------------------------------------------------\n"); |
| |
| size = n_basic_blocks * sizeof (struct basic_block_def); |
| total += size; |
| fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks, |
| SCALE (size), LABEL (size)); |
| |
| num_edges = 0; |
| FOR_EACH_BB (bb) |
| num_edges += EDGE_COUNT (bb->succs); |
| size = num_edges * sizeof (struct edge_def); |
| total += size; |
| fprintf (file, fmt_str_2, "Edges", num_edges, SCALE (size), LABEL (size)); |
| |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, fmt_str_3, "Total memory used by CFG data", SCALE (total), |
| LABEL (total)); |
| fprintf (file, "---------------------------------------------------------\n"); |
| fprintf (file, "\n"); |
| |
| if (cfg_stats.num_merged_labels > max_num_merged_labels) |
| max_num_merged_labels = cfg_stats.num_merged_labels; |
| |
| fprintf (file, "Coalesced label blocks: %ld (Max so far: %ld)\n", |
| cfg_stats.num_merged_labels, max_num_merged_labels); |
| |
| fprintf (file, "\n"); |
| } |
| |
| |
| /* Dump CFG statistics on stderr. Keep extern so that it's always |
| linked in the final executable. */ |
| |
| void |
| debug_cfg_stats (void) |
| { |
| dump_cfg_stats (stderr); |
| } |
| |
| |
| /* Dump the flowgraph to a .vcg FILE. */ |
| |
| static void |
| gimple_cfg2vcg (FILE *file) |
| { |
| edge e; |
| edge_iterator ei; |
| basic_block bb; |
| const char *funcname |
| = lang_hooks.decl_printable_name (current_function_decl, 2); |
| |
| /* Write the file header. */ |
| fprintf (file, "graph: { title: \"%s\"\n", funcname); |
| fprintf (file, "node: { title: \"ENTRY\" label: \"ENTRY\" }\n"); |
| fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n"); |
| |
| /* Write blocks and edges. */ |
| FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) |
| { |
| fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"", |
| e->dest->index); |
| |
| if (e->flags & EDGE_FAKE) |
| fprintf (file, " linestyle: dotted priority: 10"); |
| else |
| fprintf (file, " linestyle: solid priority: 100"); |
| |
| fprintf (file, " }\n"); |
| } |
| fputc ('\n', file); |
| |
| FOR_EACH_BB (bb) |
| { |
| enum gimple_code head_code, end_code; |
| const char *head_name, *end_name; |
| int head_line = 0; |
| int end_line = 0; |
| gimple first = first_stmt (bb); |
| gimple last = last_stmt (bb); |
| |
| if (first) |
| { |
| head_code = gimple_code (first); |
| head_name = gimple_code_name[head_code]; |
| head_line = get_lineno (first); |
| } |
| else |
| head_name = "no-statement"; |
| |
| if (last) |
| { |
| end_code = gimple_code (last); |
| end_name = gimple_code_name[end_code]; |
| end_line = get_lineno (last); |
| } |
| else |
| end_name = "no-statement"; |
| |
| fprintf (file, "node: { title: \"%d\" label: \"#%d\\n%s (%d)\\n%s (%d)\"}\n", |
| bb->index, bb->index, head_name, head_line, end_name, |
| end_line); |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| if (e->dest == EXIT_BLOCK_PTR) |
| fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index); |
| else |
| fprintf (file, "edge: { sourcename: \"%d\" targetname: \"%d\"", bb->index, e->dest->index); |
| |
| if (e->flags & EDGE_FAKE) |
| fprintf (file, " priority: 10 linestyle: dotted"); |
| else |
| fprintf (file, " priority: 100 linestyle: solid"); |
| |
| fprintf (file, " }\n"); |
| } |
| |
| if (bb->next_bb != EXIT_BLOCK_PTR) |
| fputc ('\n', file); |
| } |
| |
| fputs ("}\n\n", file); |
| } |
| |
| |
| |
| /*--------------------------------------------------------------------------- |
| Miscellaneous helpers |
| ---------------------------------------------------------------------------*/ |
| |
| /* Return true if T represents a stmt that always transfers control. */ |
| |
| bool |
| is_ctrl_stmt (gimple t) |
| { |
| return gimple_code (t) == GIMPLE_COND |
| || gimple_code (t) == GIMPLE_SWITCH |
| || gimple_code (t) == GIMPLE_GOTO |
| || gimple_code (t) == GIMPLE_RETURN |
| || gimple_code (t) == GIMPLE_RESX; |
| } |
| |
| |
| /* Return true if T is a statement that may alter the flow of control |
| (e.g., a call to a non-returning function). */ |
| |
| bool |
| is_ctrl_altering_stmt (gimple t) |
| { |
| gcc_assert (t); |
| |
| if (is_gimple_call (t)) |
| { |
| int flags = gimple_call_flags (t); |
| |
| /* A non-pure/const call alters flow control if the current |
| function has nonlocal labels. */ |
| if (!(flags & (ECF_CONST | ECF_PURE)) |
| && cfun->has_nonlocal_label) |
| return true; |
| |
| /* A call also alters control flow if it does not return. */ |
| if (gimple_call_flags (t) & ECF_NORETURN) |
| return true; |
| } |
| |
| /* OpenMP directives alter control flow. */ |
| if (is_gimple_omp (t)) |
| return true; |
| |
| /* If a statement can throw, it alters control flow. */ |
| return stmt_can_throw_internal (t); |
| } |
| |
| |
| /* Return true if T is a simple local goto. */ |
| |
| bool |
| simple_goto_p (gimple t) |
| { |
| return (gimple_code (t) == GIMPLE_GOTO |
| && TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL); |
| } |
| |
| |
| /* Return true if T can make an abnormal transfer of control flow. |
| Transfers of control flow associated with EH are excluded. */ |
| |
| bool |
| stmt_can_make_abnormal_goto (gimple t) |
| { |
| if (computed_goto_p (t)) |
| return true; |
| if (is_gimple_call (t)) |
| return gimple_has_side_effects (t) && cfun->has_nonlocal_label; |
| return false; |
| } |
| |
| |
| /* Return true if STMT should start a new basic block. PREV_STMT is |
| the statement preceding STMT. It is used when STMT is a label or a |
| case label. Labels should only start a new basic block if their |
| previous statement wasn't a label. Otherwise, sequence of labels |
| would generate unnecessary basic blocks that only contain a single |
| label. */ |
| |
| static inline bool |
| stmt_starts_bb_p (gimple stmt, gimple prev_stmt) |
| { |
| if (stmt == NULL) |
| return false; |
| |
| /* Labels start a new basic block only if the preceding statement |
| wasn't a label of the same type. This prevents the creation of |
| consecutive blocks that have nothing but a single label. */ |
| if (gimple_code (stmt) == GIMPLE_LABEL) |
| { |
| /* Nonlocal and computed GOTO targets always start a new block. */ |
| if (DECL_NONLOCAL (gimple_label_label (stmt)) |
| || FORCED_LABEL (gimple_label_label (stmt))) |
| return true; |
| |
| if (prev_stmt && gimple_code (prev_stmt) == GIMPLE_LABEL) |
| { |
| if (DECL_NONLOCAL (gimple_label_label (prev_stmt))) |
| return true; |
| |
| cfg_stats.num_merged_labels++; |
| return false; |
| } |
| else |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Return true if T should end a basic block. */ |
| |
| bool |
| stmt_ends_bb_p (gimple t) |
| { |
| return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t); |
| } |
| |
| /* Remove block annotations and other data structures. */ |
| |
| void |
| delete_tree_cfg_annotations (void) |
| { |
| label_to_block_map = NULL; |
| } |
| |
| |
| /* Return the first statement in basic block BB. */ |
| |
| gimple |
| first_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_start_bb (bb); |
| return !gsi_end_p (i) ? gsi_stmt (i) : NULL; |
| } |
| |
| /* Return the last statement in basic block BB. */ |
| |
| gimple |
| last_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator b = gsi_last_bb (bb); |
| return !gsi_end_p (b) ? gsi_stmt (b) : NULL; |
| } |
| |
| /* Return the last statement of an otherwise empty block. Return NULL |
| if the block is totally empty, or if it contains more than one |
| statement. */ |
| |
| gimple |
| last_and_only_stmt (basic_block bb) |
| { |
| gimple_stmt_iterator i = gsi_last_bb (bb); |
| gimple last, prev; |
| |
| if (gsi_end_p (i)) |
| return NULL; |
| |
| last = gsi_stmt (i); |
| gsi_prev (&i); |
| if (gsi_end_p (i)) |
| return last; |
| |
| /* Empty statements should no longer appear in the instruction stream. |
| Everything that might have appeared before should be deleted by |
| remove_useless_stmts, and the optimizers should just gsi_remove |
| instead of smashing with build_empty_stmt. |
| |
| Thus the only thing that should appear here in a block containing |
| one executable statement is a label. */ |
| prev = gsi_stmt (i); |
| if (gimple_code (prev) == GIMPLE_LABEL) |
| return last; |
| else |
| return NULL; |
| } |
| |
| /* Reinstall those PHI arguments queued in OLD_EDGE to NEW_EDGE. */ |
| |
| static void |
| reinstall_phi_args (edge new_edge, edge old_edge) |
| { |
| edge_var_map_vector v; |
| edge_var_map *vm; |
| int i; |
| gimple_stmt_iterator phis; |
| |
| v = redirect_edge_var_map_vector (old_edge); |
| if (!v) |
| return; |
| |
| for (i = 0, phis = gsi_start_phis (new_edge->dest); |
| VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis); |
| i++, gsi_next (&phis)) |
| { |
| gimple phi = gsi_stmt (phis); |
| tree result = redirect_edge_var_map_result (vm); |
| tree arg = redirect_edge_var_map_def (vm); |
| |
| gcc_assert (result == gimple_phi_result (phi)); |
| |
| add_phi_arg (phi, arg, new_edge); |
| } |
| |
| redirect_edge_var_map_clear (old_edge); |
| } |
| |
| /* Returns the basic block after which the new basic block created |
| by splitting edge EDGE_IN should be placed. Tries to keep the new block |
| near its "logical" location. This is of most help to humans looking |
| at debugging dumps. */ |
| |
| static basic_block |
| split_edge_bb_loc (edge edge_in) |
| { |
| basic_block dest = edge_in->dest; |
| |
| if (dest->prev_bb && find_edge (dest->prev_bb, dest)) |
| return edge_in->src; |
| else |
| return dest->prev_bb; |
| } |
| |
| /* Split a (typically critical) edge EDGE_IN. Return the new block. |
| Abort on abnormal edges. */ |
| |
| static basic_block |
| gimple_split_edge (edge edge_in) |
| { |
| basic_block new_bb, after_bb, dest; |
| edge new_edge, e; |
| |
| /* Abnormal edges cannot be split. */ |
| gcc_assert (!(edge_in->flags & EDGE_ABNORMAL)); |
| |
| dest = edge_in->dest; |
| |
| after_bb = split_edge_bb_loc (edge_in); |
| |
| new_bb = create_empty_bb (after_bb); |
| new_bb->frequency = EDGE_FREQUENCY (edge_in); |
| new_bb->count = edge_in->count; |
| new_edge = make_edge (new_bb, dest, EDGE_FALLTHRU); |
| new_edge->probability = REG_BR_PROB_BASE; |
| new_edge->count = edge_in->count; |
| |
| e = redirect_edge_and_branch (edge_in, new_bb); |
| gcc_assert (e == edge_in); |
| reinstall_phi_args (new_edge, e); |
| |
| return new_bb; |
| } |
| |
| /* Callback for walk_tree, check that all elements with address taken are |
| properly noticed as such. The DATA is an int* that is 1 if TP was seen |
| inside a PHI node. */ |
| |
| static tree |
| verify_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) |
| { |
| tree t = *tp, x; |
| |
| if (TYPE_P (t)) |
| *walk_subtrees = 0; |
| |
| /* Check operand N for being valid GIMPLE and give error MSG if not. */ |
| #define CHECK_OP(N, MSG) \ |
| do { if (!is_gimple_val (TREE_OPERAND (t, N))) \ |
| { error (MSG); return TREE_OPERAND (t, N); }} while (0) |
| |
| switch (TREE_CODE (t)) |
| { |
| case SSA_NAME: |
| if (SSA_NAME_IN_FREE_LIST (t)) |
| { |
| error ("SSA name in freelist but still referenced"); |
| return *tp; |
| } |
| break; |
| |
| case INDIRECT_REF: |
| x = TREE_OPERAND (t, 0); |
| if (!is_gimple_reg (x) && !is_gimple_min_invariant (x)) |
| { |
| error ("Indirect reference's operand is not a register or a constant."); |
| return x; |
| } |
| break; |
| |
| case ASSERT_EXPR: |
| x = fold (ASSERT_EXPR_COND (t)); |
| if (x == boolean_false_node) |
| { |
| error ("ASSERT_EXPR with an always-false condition"); |
| return *tp; |
| } |
| break; |
| |
| case MODIFY_EXPR: |
| error ("MODIFY_EXPR not expected while having tuples."); |
| return *tp; |
| |
| case ADDR_EXPR: |
| { |
| bool old_constant; |
| bool old_side_effects; |
| bool new_constant; |
| bool new_side_effects; |
| |
| gcc_assert (is_gimple_address (t)); |
| |
| old_constant = TREE_CONSTANT (t); |
| old_side_effects = TREE_SIDE_EFFECTS (t); |
| |
| recompute_tree_invariant_for_addr_expr (t); |
| new_side_effects = TREE_SIDE_EFFECTS (t); |
| new_constant = TREE_CONSTANT (t); |
| |
| if (old_constant != new_constant) |
| { |
| error ("constant not recomputed when ADDR_EXPR changed"); |
| return t; |
| } |
| if (old_side_effects != new_side_effects) |
| { |
| error ("side effects not recomputed when ADDR_EXPR changed"); |
| return t; |
| } |
| |
| /* Skip any references (they will be checked when we recurse down the |
| tree) and ensure that any variable used as a prefix is marked |
| addressable. */ |
| for (x = TREE_OPERAND (t, 0); |
| handled_component_p (x); |
| x = TREE_OPERAND (x, 0)) |
| ; |
| |
| if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL) |
| return NULL; |
| if (!TREE_ADDRESSABLE (x)) |
| { |
| error ("address taken, but ADDRESSABLE bit not set"); |
| return x; |
| } |
| |
| break; |
| } |
| |
| case COND_EXPR: |
| x = COND_EXPR_COND (t); |
| if (!INTEGRAL_TYPE_P (TREE_TYPE (x))) |
| { |
| error ("non-integral used in condition"); |
| return x; |
| } |
| if (!is_gimple_condexpr (x)) |
| { |
| error ("invalid conditional operand"); |
| return x; |
| } |
| break; |
| |
| case NON_LVALUE_EXPR: |
| gcc_unreachable (); |
| |
| CASE_CONVERT: |
| case FIX_TRUNC_EXPR: |
| case FLOAT_EXPR: |
| case NEGATE_EXPR: |
| case ABS_EXPR: |
| case BIT_NOT_EXPR: |
| case TRUTH_NOT_EXPR: |
| CHECK_OP (0, "invalid operand to unary operator"); |
| break; |
| |
| case REALPART_EXPR: |
| case IMAGPART_EXPR: |
| case COMPONENT_REF: |
| case ARRAY_REF: |
| case ARRAY_RANGE_REF: |
| case BIT_FIELD_REF: |
| case VIEW_CONVERT_EXPR: |
| /* We have a nest of references. Verify that each of the operands |
| that determine where to reference is either a constant or a variable, |
| verify that the base is valid, and then show we've already checked |
| the subtrees. */ |
| while (handled_component_p (t)) |
| { |
| if (TREE_CODE (t) == COMPONENT_REF && TREE_OPERAND (t, 2)) |
| CHECK_OP (2, "invalid COMPONENT_REF offset operator"); |
| else if (TREE_CODE (t) == ARRAY_REF |
| || TREE_CODE (t) == ARRAY_RANGE_REF) |
| { |
| CHECK_OP (1, "invalid array index"); |
| if (TREE_OPERAND (t, 2)) |
| CHECK_OP (2, "invalid array lower bound"); |
| if (TREE_OPERAND (t, 3)) |
| CHECK_OP (3, "invalid array stride"); |
| } |
| else if (TREE_CODE (t) == BIT_FIELD_REF) |
| { |
| if (!host_integerp (TREE_OPERAND (t, 1), 1) |
| || !host_integerp (TREE_OPERAND (t, 2), 1)) |
| { |
| error ("invalid position or size operand to BIT_FIELD_REF"); |
| return t; |
| } |
| else if (INTEGRAL_TYPE_P (TREE_TYPE (t)) |
| && (TYPE_PRECISION (TREE_TYPE (t)) |
| != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) |
| { |
| error ("integral result type precision does not match " |
| "field size of BIT_FIELD_REF"); |
| return t; |
| } |
| if (!INTEGRAL_TYPE_P (TREE_TYPE (t)) |
| && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t))) |
| != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) |
| { |
| error ("mode precision of non-integral result does not " |
| "match field size of BIT_FIELD_REF"); |
| return t; |
| } |
| } |
| |
| t = TREE_OPERAND (t, 0); |
| } |
| |
| if (!is_gimple_min_invariant (t) && !is_gimple_lvalue (t)) |
| { |
| error ("invalid reference prefix"); |
| return t; |
| } |
| *walk_subtrees = 0; |
| break; |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| /* PLUS_EXPR and MINUS_EXPR don't work on pointers, they should be done using |
| POINTER_PLUS_EXPR. */ |
| if (POINTER_TYPE_P (TREE_TYPE (t))) |
| { |
| error ("invalid operand to plus/minus, type is a pointer"); |
| return t; |
| } |
| CHECK_OP (0, "invalid operand to binary operator"); |
| CHECK_OP (1, "invalid operand to binary operator"); |
| break; |
| |
| case POINTER_PLUS_EXPR: |
| /* Check to make sure the first operand is a pointer or reference type. */ |
| if (!POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))) |
| { |
| error ("invalid operand to pointer plus, first operand is not a pointer"); |
| return t; |
| } |
| /* Check to make sure the second operand is an integer with type of |
| sizetype. */ |
| if (!useless_type_conversion_p (sizetype, |
| TREE_TYPE (TREE_OPERAND (t, 1)))) |
| { |
| error ("invalid operand to pointer plus, second operand is not an " |
| "integer with type of sizetype."); |
| return t; |
| } |
| /* FALLTHROUGH */ |
| case LT_EXPR: |
| case LE_EXPR: |
| case GT_EXPR: |
| case GE_EXPR: |
| case EQ_EXPR: |
| case NE_EXPR: |
| case UNORDERED_EXPR: |
| case ORDERED_EXPR: |
| case UNLT_EXPR: |
| case UNLE_EXPR: |
| case UNGT_EXPR: |
| case UNGE_EXPR: |
| case UNEQ_EXPR: |
| case LTGT_EXPR: |
| case MULT_EXPR: |
| case TRUNC_DIV_EXPR: |
| case CEIL_DIV_EXPR: |
| case FLOOR_DIV_EXPR: |
| case ROUND_DIV_EXPR: |
| case TRUNC_MOD_EXPR: |
| case CEIL_MOD_EXPR: |
| case FLOOR_MOD_EXPR: |
| case ROUND_MOD_EXPR: |
| case RDIV_EXPR: |
| case EXACT_DIV_EXPR: |
| case MIN_EXPR: |
| case MAX_EXPR: |
| case LSHIFT_EXPR: |
| case RSHIFT_EXPR: |
| case LROTATE_EXPR: |
| case RROTATE_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_XOR_EXPR: |
| case BIT_AND_EXPR: |
| CHECK_OP (0, "invalid operand to binary operator"); |
| CHECK_OP (1, "invalid operand to binary operator"); |
| break; |
| |
| case CONSTRUCTOR: |
| if (TREE_CONSTANT (t) && TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE) |
| *walk_subtrees = 0; |
| break; |
| |
| default: |
| break; |
| } |
| return NULL; |
| |
| #undef CHECK_OP |
| } |
| |
| |
| /* Verify if EXPR is either a GIMPLE ID or a GIMPLE indirect reference. |
| Returns true if there is an error, otherwise false. */ |
| |
| static bool |
| verify_types_in_gimple_min_lval (tree expr) |
| { |
| tree op; |
| |
| if (is_gimple_id (expr)) |
| return false; |
| |
| if (!INDIRECT_REF_P (expr) |
| && TREE_CODE (expr) != TARGET_MEM_REF) |
| { |
| error ("invalid expression for min lvalue"); |
| return true; |
| } |
| |
| /* TARGET_MEM_REFs are strange beasts. */ |
| if (TREE_CODE (expr) == TARGET_MEM_REF) |
| return false; |
| |
| op = TREE_OPERAND (expr, 0); |
| if (!is_gimple_val (op)) |
| { |
| error ("invalid operand in indirect reference"); |
| debug_generic_stmt (op); |
| return true; |
| } |
| if (!useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in indirect reference"); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Verify if EXPR is a valid GIMPLE reference expression. Returns true |
| if there is an error, otherwise false. */ |
| |
| static bool |
| verify_types_in_gimple_reference (tree expr) |
| { |
| while (handled_component_p (expr)) |
| { |
| tree op = TREE_OPERAND (expr, 0); |
| |
| if (TREE_CODE (expr) == ARRAY_REF |
| || TREE_CODE (expr) == ARRAY_RANGE_REF) |
| { |
| if (!is_gimple_val (TREE_OPERAND (expr, 1)) |
| || (TREE_OPERAND (expr, 2) |
| && !is_gimple_val (TREE_OPERAND (expr, 2))) |
| || (TREE_OPERAND (expr, 3) |
| && !is_gimple_val (TREE_OPERAND (expr, 3)))) |
| { |
| error ("invalid operands to array reference"); |
| debug_generic_stmt (expr); |
| return true; |
| } |
| } |
| |
| /* Verify if the reference array element types are compatible. */ |
| if (TREE_CODE (expr) == ARRAY_REF |
| && !useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in array reference"); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| if (TREE_CODE (expr) == ARRAY_RANGE_REF |
| && !useless_type_conversion_p (TREE_TYPE (TREE_TYPE (expr)), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in array range reference"); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (expr))); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| |
| if ((TREE_CODE (expr) == REALPART_EXPR |
| || TREE_CODE (expr) == IMAGPART_EXPR) |
| && !useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_TYPE (op)))) |
| { |
| error ("type mismatch in real/imagpart reference"); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_TYPE (op))); |
| return true; |
| } |
| |
| if (TREE_CODE (expr) == COMPONENT_REF |
| && !useless_type_conversion_p (TREE_TYPE (expr), |
| TREE_TYPE (TREE_OPERAND (expr, 1)))) |
| { |
| error ("type mismatch in component reference"); |
| debug_generic_stmt (TREE_TYPE (expr)); |
| debug_generic_stmt (TREE_TYPE (TREE_OPERAND (expr, 1))); |
| return true; |
| } |
| |
| /* For VIEW_CONVERT_EXPRs which are allowed here, too, there |
| is nothing to verify. Gross mismatches at most invoke |
| undefined behavior. */ |
| if (TREE_CODE (expr) == VIEW_CONVERT_EXPR |
| && !handled_component_p (op)) |
| return false; |
| |
| expr = op; |
| } |
| |
| return verify_types_in_gimple_min_lval (expr); |
| } |
| |
| /* Returns true if there is one pointer type in TYPE_POINTER_TO (SRC_OBJ) |
| list of pointer-to types that is trivially convertible to DEST. */ |
| |
| static bool |
| one_pointer_to_useless_type_conversion_p (tree dest, tree src_obj) |
| { |
| tree src; |
| |
| if (!TYPE_POINTER_TO (src_obj)) |
| return true; |
| |
| for (src = TYPE_POINTER_TO (src_obj); src; src = TYPE_NEXT_PTR_TO (src)) |
| if (useless_type_conversion_p (dest, src)) |
| return true; |
| |
| return false; |
| } |
| |
| /* Return true if TYPE1 is a fixed-point type and if conversions to and |
| from TYPE2 can be handled by FIXED_CONVERT_EXPR. */ |
| |
| static bool |
| valid_fixed_convert_types_p (tree type1, tree type2) |
| { |
| return (FIXED_POINT_TYPE_P (type1) |
| && (INTEGRAL_TYPE_P (type2) |
| || SCALAR_FLOAT_TYPE_P (type2) |
| || FIXED_POINT_TYPE_P (type2))); |
| } |
| |
| /* Verify the contents of a GIMPLE_CALL STMT. Returns true when there |
| is a problem, otherwise false. */ |
| |
| static bool |
| verify_gimple_call (gimple stmt) |
| { |
| tree fn = gimple_call_fn (stmt); |
| tree fntype; |
| |
| if (!POINTER_TYPE_P (TREE_TYPE (fn)) |
| || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE |
| && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE)) |
| { |
| error ("non-function in gimple call"); |
| return true; |
| } |
| |
| if (gimple_call_lhs (stmt) |
| && !is_gimple_lvalue (gimple_call_lhs (stmt))) |
| { |
| error ("invalid LHS in gimple call"); |
| return true; |
| } |
| |
| fntype = TREE_TYPE (TREE_TYPE (fn)); |
| if (gimple_call_lhs (stmt) |
| && !useless_type_conversion_p (TREE_TYPE (gimple_call_lhs (stmt)), |
| TREE_TYPE (fntype)) |
| /* ??? At least C++ misses conversions at assignments from |
| void * call results. |
| ??? Java is completely off. Especially with functions |
| returning java.lang.Object. |
| For now simply allow arbitrary pointer type conversions. */ |
| && !(POINTER_TYPE_P (TREE_TYPE (gimple_call_lhs (stmt))) |
| && POINTER_TYPE_P (TREE_TYPE (fntype)))) |
| { |
| error ("invalid conversion in gimple call"); |
| debug_generic_stmt (TREE_TYPE (gimple_call_lhs (stmt))); |
| debug_generic_stmt (TREE_TYPE (fntype)); |
| return true; |
| } |
| |
| /* ??? The C frontend passes unpromoted arguments in case it |
| didn't see a function declaration before the call. So for now |
| leave the call arguments unverified. Once we gimplify |
| unit-at-a-time we have a chance to fix this. */ |
| |
| return false; |
| } |
| |
| /* Verifies the gimple comparison with the result type TYPE and |
| the operands OP0 and OP1. */ |
| |
| static bool |
| verify_gimple_comparison (tree type, tree op0, tree op1) |
| { |
| tree op0_type = TREE_TYPE (op0); |
| tree op1_type = TREE_TYPE (op1); |
| |
| if (!is_gimple_val (op0) || !is_gimple_val (op1)) |
| { |
| error ("invalid operands in gimple comparison"); |
| return true; |
| } |
| |
| /* For comparisons we do not have the operations type as the |
| effective type the comparison is carried out in. Instead |
| we require that either the first operand is trivially |
| convertible into the second, or the other way around. |
| The resulting type of a comparison may be any integral type. |
| Because we special-case pointers to void we allow |
| comparisons of pointers with the same mode as well. */ |
| if ((!useless_type_conversion_p (op0_type, op1_type) |
| && !useless_type_conversion_p (op1_type, op0_type) |
| && (!POINTER_TYPE_P (op0_type) |
| || !POINTER_TYPE_P (op1_type) |
| || TYPE_MODE (op0_type) != TYPE_MODE (op1_type))) |
| || !INTEGRAL_TYPE_P (type)) |
| { |
| error ("type mismatch in comparison expression"); |
| debug_generic_expr (type); |
| debug_generic_expr (op0_type); |
| debug_generic_expr (op1_type); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Verify a gimple assignment statement STMT with an unary rhs. |
| Returns true if anything is wrong. */ |
| |
| static bool |
| verify_gimple_assign_unary (gimple stmt) |
| { |
| enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
| tree lhs = gimple_assign_lhs (stmt); |
| tree lhs_type = TREE_TYPE (lhs); |
| tree rhs1 = gimple_assign_rhs1 (stmt); |
| tree rhs1_type = TREE_TYPE (rhs1); |
| |
| if (!is_gimple_reg (lhs) |
| && !(optimize == 0 |
| && TREE_CODE (lhs_type) == COMPLEX_TYPE)) |
| { |
| error ("non-register as LHS of unary operation"); |
| return true; |
| } |
| |
| if (!is_gimple_val (rhs1)) |
| { |
| error ("invalid operand in unary operation"); |
| return true; |
| } |
| |
| /* First handle conversions. */ |
| switch (rhs_code) |
| { |
| CASE_CONVERT: |
| { |
| /* Allow conversions between integral types and pointers only if |
| there is no sign or zero extension involved. |
| For targets were the precision of sizetype doesn't match that |
| of pointers we need to allow arbitrary conversions from and |
| to sizetype. */ |
| if ((POINTER_TYPE_P (lhs_type) |
| && INTEGRAL_TYPE_P (rhs1_type) |
| && (TYPE_PRECISION (lhs_type) >= TYPE_PRECISION (rhs1_type) |
| || rhs1_type == sizetype)) |
| || (POINTER_TYPE_P (rhs1_type) |
| && INTEGRAL_TYPE_P (lhs_type) |
| && (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type) |
| || lhs_type == sizetype))) |
| return false; |
| |
| /* Allow conversion from integer to offset type and vice versa. */ |
| if ((TREE_CODE (lhs_type) == OFFSET_TYPE |
| && TREE_CODE (rhs1_type) == INTEGER_TYPE) |
| || (TREE_CODE (lhs_type) == INTEGER_TYPE |
| && TREE_CODE (rhs1_type) == OFFSET_TYPE)) |
| return false; |
| |
| /* Otherwise assert we are converting between types of the |
| same kind. */ |
| if (INTEGRAL_TYPE_P (lhs_type) != INTEGRAL_TYPE_P (rhs1_type)) |
| { |
| error ("invalid types in nop conversion"); |
| debug_generic_expr (lhs_type); |
| debug_generic_expr (rhs1_type); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| case FIXED_CONVERT_EXPR: |
| { |
| if (!valid_fixed_convert_types_p (lhs_type, rhs1_type) |
| && !valid_fixed_convert_types_p (rhs1_type, lhs_type)) |
| { |
| error ("invalid types in fixed-point conversion"); |
| debug_generic_expr (lhs_type); |
| debug_generic_expr (rhs1_type); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| case FLOAT_EXPR: |
| { |
| if (!INTEGRAL_TYPE_P (rhs1_type) || !
|