| /* Exception handling semantics and decomposition for trees. |
| Copyright (C) 2003-2024 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "cgraph.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "calls.h" |
| #include "except.h" |
| #include "cfganal.h" |
| #include "cfgcleanup.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "tree-cfg.h" |
| #include "tree-into-ssa.h" |
| #include "tree-ssa.h" |
| #include "tree-inline.h" |
| #include "langhooks.h" |
| #include "cfgloop.h" |
| #include "gimple-low.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "asan.h" |
| #include "gimplify.h" |
| |
| /* In some instances a tree and a gimple need to be stored in a same table, |
| i.e. in hash tables. This is a structure to do this. */ |
| typedef union {tree *tp; tree t; gimple *g;} treemple; |
| |
| /* Misc functions used in this file. */ |
| |
| /* Remember and lookup EH landing pad data for arbitrary statements. |
| Really this means any statement that could_throw_p. We could |
| stuff this information into the stmt_ann data structure, but: |
| |
| (1) We absolutely rely on this information being kept until |
| we get to rtl. Once we're done with lowering here, if we lose |
| the information there's no way to recover it! |
| |
| (2) There are many more statements that *cannot* throw as |
| compared to those that can. We should be saving some amount |
| of space by only allocating memory for those that can throw. */ |
| |
| /* Add statement T in function IFUN to landing pad NUM. */ |
| |
| static void |
| add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num) |
| { |
| gcc_assert (num != 0); |
| |
| if (!get_eh_throw_stmt_table (ifun)) |
| set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31)); |
| |
| gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num)); |
| } |
| |
| /* Add statement T in the current function (cfun) to EH landing pad NUM. */ |
| |
| void |
| add_stmt_to_eh_lp (gimple *t, int num) |
| { |
| add_stmt_to_eh_lp_fn (cfun, t, num); |
| } |
| |
| /* Add statement T to the single EH landing pad in REGION. */ |
| |
| static void |
| record_stmt_eh_region (eh_region region, gimple *t) |
| { |
| if (region == NULL) |
| return; |
| if (region->type == ERT_MUST_NOT_THROW) |
| add_stmt_to_eh_lp_fn (cfun, t, -region->index); |
| else |
| { |
| eh_landing_pad lp = region->landing_pads; |
| if (lp == NULL) |
| lp = gen_eh_landing_pad (region); |
| else |
| gcc_assert (lp->next_lp == NULL); |
| add_stmt_to_eh_lp_fn (cfun, t, lp->index); |
| } |
| } |
| |
| |
| /* Remove statement T in function IFUN from its EH landing pad. */ |
| |
| bool |
| remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t) |
| { |
| if (!get_eh_throw_stmt_table (ifun)) |
| return false; |
| |
| if (!get_eh_throw_stmt_table (ifun)->get (t)) |
| return false; |
| |
| get_eh_throw_stmt_table (ifun)->remove (t); |
| return true; |
| } |
| |
| |
| /* Remove statement T in the current function (cfun) from its |
| EH landing pad. */ |
| |
| bool |
| remove_stmt_from_eh_lp (gimple *t) |
| { |
| return remove_stmt_from_eh_lp_fn (cfun, t); |
| } |
| |
| /* Determine if statement T is inside an EH region in function IFUN. |
| Positive numbers indicate a landing pad index; negative numbers |
| indicate a MUST_NOT_THROW region index; zero indicates that the |
| statement is not recorded in the region table. */ |
| |
| int |
| lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t) |
| { |
| if (ifun->eh->throw_stmt_table == NULL) |
| return 0; |
| |
| int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t)); |
| return lp_nr ? *lp_nr : 0; |
| } |
| |
| /* Likewise, but always use the current function. */ |
| |
| int |
| lookup_stmt_eh_lp (const gimple *t) |
| { |
| /* We can get called from initialized data when -fnon-call-exceptions |
| is on; prevent crash. */ |
| if (!cfun) |
| return 0; |
| return lookup_stmt_eh_lp_fn (cfun, t); |
| } |
| |
| /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY |
| nodes and LABEL_DECL nodes. We will use this during the second phase to |
| determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */ |
| |
| struct finally_tree_node |
| { |
| /* When storing a GIMPLE_TRY, we have to record a gimple. However |
| when deciding whether a GOTO to a certain LABEL_DECL (which is a |
| tree) leaves the TRY block, its necessary to record a tree in |
| this field. Thus a treemple is used. */ |
| treemple child; |
| gtry *parent; |
| }; |
| |
| /* Hashtable helpers. */ |
| |
| struct finally_tree_hasher : free_ptr_hash <finally_tree_node> |
| { |
| static inline hashval_t hash (const finally_tree_node *); |
| static inline bool equal (const finally_tree_node *, |
| const finally_tree_node *); |
| }; |
| |
| inline hashval_t |
| finally_tree_hasher::hash (const finally_tree_node *v) |
| { |
| return (intptr_t)v->child.t >> 4; |
| } |
| |
| inline bool |
| finally_tree_hasher::equal (const finally_tree_node *v, |
| const finally_tree_node *c) |
| { |
| return v->child.t == c->child.t; |
| } |
| |
| /* Note that this table is *not* marked GTY. It is short-lived. */ |
| static hash_table<finally_tree_hasher> *finally_tree; |
| |
| static void |
| record_in_finally_tree (treemple child, gtry *parent) |
| { |
| struct finally_tree_node *n; |
| finally_tree_node **slot; |
| |
| n = XNEW (struct finally_tree_node); |
| n->child = child; |
| n->parent = parent; |
| |
| slot = finally_tree->find_slot (n, INSERT); |
| gcc_assert (!*slot); |
| *slot = n; |
| } |
| |
| static void |
| collect_finally_tree (gimple *stmt, gtry *region); |
| |
| /* Go through the gimple sequence. Works with collect_finally_tree to |
| record all GIMPLE_LABEL and GIMPLE_TRY statements. */ |
| |
| static void |
| collect_finally_tree_1 (gimple_seq seq, gtry *region) |
| { |
| gimple_stmt_iterator gsi; |
| |
| for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) |
| collect_finally_tree (gsi_stmt (gsi), region); |
| } |
| |
| static void |
| collect_finally_tree (gimple *stmt, gtry *region) |
| { |
| treemple temp; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_LABEL: |
| temp.t = gimple_label_label (as_a <glabel *> (stmt)); |
| record_in_finally_tree (temp, region); |
| break; |
| |
| case GIMPLE_TRY: |
| if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) |
| { |
| temp.g = stmt; |
| record_in_finally_tree (temp, region); |
| collect_finally_tree_1 (gimple_try_eval (stmt), |
| as_a <gtry *> (stmt)); |
| collect_finally_tree_1 (gimple_try_cleanup (stmt), region); |
| } |
| else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH) |
| { |
| collect_finally_tree_1 (gimple_try_eval (stmt), region); |
| collect_finally_tree_1 (gimple_try_cleanup (stmt), region); |
| } |
| break; |
| |
| case GIMPLE_CATCH: |
| collect_finally_tree_1 (gimple_catch_handler ( |
| as_a <gcatch *> (stmt)), |
| region); |
| break; |
| |
| case GIMPLE_EH_FILTER: |
| collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region); |
| break; |
| |
| case GIMPLE_EH_ELSE: |
| { |
| geh_else *eh_else_stmt = as_a <geh_else *> (stmt); |
| collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region); |
| collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region); |
| } |
| break; |
| |
| default: |
| /* A type, a decl, or some kind of statement that we're not |
| interested in. Don't walk them. */ |
| break; |
| } |
| } |
| |
| |
| /* Use the finally tree to determine if a jump from START to TARGET |
| would leave the try_finally node that START lives in. */ |
| |
| static bool |
| outside_finally_tree (treemple start, gimple *target) |
| { |
| struct finally_tree_node n, *p; |
| |
| do |
| { |
| n.child = start; |
| p = finally_tree->find (&n); |
| if (!p) |
| return true; |
| start.g = p->parent; |
| } |
| while (start.g != target); |
| |
| return false; |
| } |
| |
| /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY |
| nodes into a set of gotos, magic labels, and eh regions. |
| The eh region creation is straight-forward, but frobbing all the gotos |
| and such into shape isn't. */ |
| |
| /* The sequence into which we record all EH stuff. This will be |
| placed at the end of the function when we're all done. */ |
| static gimple_seq eh_seq; |
| |
| /* Record whether an EH region contains something that can throw, |
| indexed by EH region number. */ |
| static bitmap eh_region_may_contain_throw_map; |
| |
| /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN |
| statements that are seen to escape this GIMPLE_TRY_FINALLY node. |
| The idea is to record a gimple statement for everything except for |
| the conditionals, which get their labels recorded. Since labels are |
| of type 'tree', we need this node to store both gimple and tree |
| objects. REPL_STMT is the sequence used to replace the goto/return |
| statement. CONT_STMT is used to store the statement that allows |
| the return/goto to jump to the original destination. */ |
| |
| struct goto_queue_node |
| { |
| treemple stmt; |
| location_t location; |
| gimple_seq repl_stmt; |
| gimple *cont_stmt; |
| int index; |
| /* This is used when index >= 0 to indicate that stmt is a label (as |
| opposed to a goto stmt). */ |
| int is_label; |
| }; |
| |
| /* State of the world while lowering. */ |
| |
| struct leh_state |
| { |
| /* What's "current" while constructing the eh region tree. These |
| correspond to variables of the same name in cfun->eh, which we |
| don't have easy access to. */ |
| eh_region cur_region; |
| |
| /* What's "current" for the purposes of __builtin_eh_pointer. For |
| a CATCH, this is the associated TRY. For an EH_FILTER, this is |
| the associated ALLOWED_EXCEPTIONS, etc. */ |
| eh_region ehp_region; |
| |
| /* Processing of TRY_FINALLY requires a bit more state. This is |
| split out into a separate structure so that we don't have to |
| copy so much when processing other nodes. */ |
| struct leh_tf_state *tf; |
| |
| /* Outer non-clean up region. */ |
| eh_region outer_non_cleanup; |
| }; |
| |
| struct leh_tf_state |
| { |
| /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The |
| try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain |
| this so that outside_finally_tree can reliably reference the tree used |
| in the collect_finally_tree data structures. */ |
| gtry *try_finally_expr; |
| gtry *top_p; |
| |
| /* While lowering a top_p usually it is expanded into multiple statements, |
| thus we need the following field to store them. */ |
| gimple_seq top_p_seq; |
| |
| /* The state outside this try_finally node. */ |
| struct leh_state *outer; |
| |
| /* The exception region created for it. */ |
| eh_region region; |
| |
| /* The goto queue. */ |
| struct goto_queue_node *goto_queue; |
| size_t goto_queue_size; |
| size_t goto_queue_active; |
| |
| /* Pointer map to help in searching goto_queue when it is large. */ |
| hash_map<gimple *, goto_queue_node *> *goto_queue_map; |
| |
| /* The set of unique labels seen as entries in the goto queue. */ |
| vec<tree> dest_array; |
| |
| /* A label to be added at the end of the completed transformed |
| sequence. It will be set if may_fallthru was true *at one time*, |
| though subsequent transformations may have cleared that flag. */ |
| tree fallthru_label; |
| |
| /* True if it is possible to fall out the bottom of the try block. |
| Cleared if the fallthru is converted to a goto. */ |
| bool may_fallthru; |
| |
| /* True if any entry in goto_queue is a GIMPLE_RETURN. */ |
| bool may_return; |
| |
| /* True if the finally block can receive an exception edge. |
| Cleared if the exception case is handled by code duplication. */ |
| bool may_throw; |
| }; |
| |
| static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *); |
| |
| /* Search for STMT in the goto queue. Return the replacement, |
| or null if the statement isn't in the queue. */ |
| |
| #define LARGE_GOTO_QUEUE 20 |
| |
| static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq); |
| |
| static gimple_seq |
| find_goto_replacement (struct leh_tf_state *tf, treemple stmt) |
| { |
| unsigned int i; |
| |
| if (tf->goto_queue_active < LARGE_GOTO_QUEUE) |
| { |
| for (i = 0; i < tf->goto_queue_active; i++) |
| if ( tf->goto_queue[i].stmt.g == stmt.g) |
| return tf->goto_queue[i].repl_stmt; |
| return NULL; |
| } |
| |
| /* If we have a large number of entries in the goto_queue, create a |
| pointer map and use that for searching. */ |
| |
| if (!tf->goto_queue_map) |
| { |
| tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>; |
| for (i = 0; i < tf->goto_queue_active; i++) |
| { |
| bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g, |
| &tf->goto_queue[i]); |
| gcc_assert (!existed); |
| } |
| } |
| |
| goto_queue_node **slot = tf->goto_queue_map->get (stmt.g); |
| if (slot != NULL) |
| return ((*slot)->repl_stmt); |
| |
| return NULL; |
| } |
| |
| /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a |
| lowered GIMPLE_COND. If, by chance, the replacement is a simple goto, |
| then we can just splat it in, otherwise we add the new stmts immediately |
| after the GIMPLE_COND and redirect. */ |
| |
| static void |
| replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf, |
| gimple_stmt_iterator *gsi) |
| { |
| tree label; |
| gimple_seq new_seq; |
| treemple temp; |
| location_t loc = gimple_location (gsi_stmt (*gsi)); |
| |
| temp.tp = tp; |
| new_seq = find_goto_replacement (tf, temp); |
| if (!new_seq) |
| return; |
| |
| if (gimple_seq_singleton_p (new_seq) |
| && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO) |
| { |
| *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq)); |
| return; |
| } |
| |
| label = create_artificial_label (loc); |
| /* Set the new label for the GIMPLE_COND */ |
| *tp = label; |
| |
| gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); |
| gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING); |
| } |
| |
| /* The real work of replace_goto_queue. Returns with TSI updated to |
| point to the next statement. */ |
| |
| static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *); |
| |
| static void |
| replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf, |
| gimple_stmt_iterator *gsi) |
| { |
| gimple_seq seq; |
| treemple temp; |
| temp.g = NULL; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_GOTO: |
| case GIMPLE_RETURN: |
| temp.g = stmt; |
| seq = find_goto_replacement (tf, temp); |
| if (seq) |
| { |
| gimple_stmt_iterator i; |
| seq = gimple_seq_copy (seq); |
| for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) |
| gimple_set_location (gsi_stmt (i), gimple_location (stmt)); |
| gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT); |
| gsi_remove (gsi, false); |
| return; |
| } |
| break; |
| |
| case GIMPLE_COND: |
| replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi); |
| replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi); |
| break; |
| |
| case GIMPLE_TRY: |
| replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf); |
| replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf); |
| break; |
| case GIMPLE_CATCH: |
| replace_goto_queue_stmt_list (gimple_catch_handler_ptr ( |
| as_a <gcatch *> (stmt)), |
| tf); |
| break; |
| case GIMPLE_EH_FILTER: |
| replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf); |
| break; |
| case GIMPLE_EH_ELSE: |
| { |
| geh_else *eh_else_stmt = as_a <geh_else *> (stmt); |
| replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt), |
| tf); |
| replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt), |
| tf); |
| } |
| break; |
| |
| default: |
| /* These won't have gotos in them. */ |
| break; |
| } |
| |
| gsi_next (gsi); |
| } |
| |
| /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */ |
| |
| static void |
| replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf) |
| { |
| gimple_stmt_iterator gsi = gsi_start (*seq); |
| |
| while (!gsi_end_p (gsi)) |
| replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi); |
| } |
| |
| /* Replace all goto queue members. */ |
| |
| static void |
| replace_goto_queue (struct leh_tf_state *tf) |
| { |
| if (tf->goto_queue_active == 0) |
| return; |
| replace_goto_queue_stmt_list (&tf->top_p_seq, tf); |
| replace_goto_queue_stmt_list (&eh_seq, tf); |
| } |
| |
| /* Add a new record to the goto queue contained in TF. NEW_STMT is the |
| data to be added, IS_LABEL indicates whether NEW_STMT is a label or |
| a gimple return. */ |
| |
| static void |
| record_in_goto_queue (struct leh_tf_state *tf, |
| treemple new_stmt, |
| int index, |
| bool is_label, |
| location_t location) |
| { |
| size_t active, size; |
| struct goto_queue_node *q; |
| |
| gcc_assert (!tf->goto_queue_map); |
| |
| active = tf->goto_queue_active; |
| size = tf->goto_queue_size; |
| if (active >= size) |
| { |
| size = (size ? size * 2 : 32); |
| tf->goto_queue_size = size; |
| tf->goto_queue |
| = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size); |
| } |
| |
| q = &tf->goto_queue[active]; |
| tf->goto_queue_active = active + 1; |
| |
| memset (q, 0, sizeof (*q)); |
| q->stmt = new_stmt; |
| q->index = index; |
| q->location = location; |
| q->is_label = is_label; |
| } |
| |
| /* Record the LABEL label in the goto queue contained in TF. |
| TF is not null. */ |
| |
| static void |
| record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label, |
| location_t location) |
| { |
| int index; |
| treemple temp, new_stmt; |
| |
| if (!label) |
| return; |
| |
| /* Computed and non-local gotos do not get processed. Given |
| their nature we can neither tell whether we've escaped the |
| finally block nor redirect them if we knew. */ |
| if (TREE_CODE (label) != LABEL_DECL) |
| return; |
| |
| /* No need to record gotos that don't leave the try block. */ |
| temp.t = label; |
| if (!outside_finally_tree (temp, tf->try_finally_expr)) |
| return; |
| |
| if (! tf->dest_array.exists ()) |
| { |
| tf->dest_array.create (10); |
| tf->dest_array.quick_push (label); |
| index = 0; |
| } |
| else |
| { |
| int n = tf->dest_array.length (); |
| for (index = 0; index < n; ++index) |
| if (tf->dest_array[index] == label) |
| break; |
| if (index == n) |
| tf->dest_array.safe_push (label); |
| } |
| |
| /* In the case of a GOTO we want to record the destination label, |
| since with a GIMPLE_COND we have an easy access to the then/else |
| labels. */ |
| new_stmt = stmt; |
| record_in_goto_queue (tf, new_stmt, index, true, location); |
| } |
| |
| /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally |
| node, and if so record that fact in the goto queue associated with that |
| try_finally node. */ |
| |
| static void |
| maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt) |
| { |
| struct leh_tf_state *tf = state->tf; |
| treemple new_stmt; |
| |
| if (!tf) |
| return; |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_COND: |
| { |
| gcond *cond_stmt = as_a <gcond *> (stmt); |
| new_stmt.tp = gimple_op_ptr (cond_stmt, 2); |
| record_in_goto_queue_label (tf, new_stmt, |
| gimple_cond_true_label (cond_stmt), |
| EXPR_LOCATION (*new_stmt.tp)); |
| new_stmt.tp = gimple_op_ptr (cond_stmt, 3); |
| record_in_goto_queue_label (tf, new_stmt, |
| gimple_cond_false_label (cond_stmt), |
| EXPR_LOCATION (*new_stmt.tp)); |
| } |
| break; |
| case GIMPLE_GOTO: |
| new_stmt.g = stmt; |
| record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt), |
| gimple_location (stmt)); |
| break; |
| |
| case GIMPLE_RETURN: |
| tf->may_return = true; |
| new_stmt.g = stmt; |
| record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt)); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| |
| #if CHECKING_P |
| /* We do not process GIMPLE_SWITCHes for now. As long as the original source |
| was in fact structured, and we've not yet done jump threading, then none |
| of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */ |
| |
| static void |
| verify_norecord_switch_expr (struct leh_state *state, |
| gswitch *switch_expr) |
| { |
| struct leh_tf_state *tf = state->tf; |
| size_t i, n; |
| |
| if (!tf) |
| return; |
| |
| n = gimple_switch_num_labels (switch_expr); |
| |
| for (i = 0; i < n; ++i) |
| { |
| treemple temp; |
| tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i)); |
| temp.t = lab; |
| gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr)); |
| } |
| } |
| #else |
| #define verify_norecord_switch_expr(state, switch_expr) |
| #endif |
| |
| /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is |
| non-null, insert it before the new branch. */ |
| |
| static void |
| do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod) |
| { |
| gimple *x; |
| |
| /* In the case of a return, the queue node must be a gimple statement. */ |
| gcc_assert (!q->is_label); |
| |
| /* Note that the return value may have already been computed, e.g., |
| |
| int x; |
| int foo (void) |
| { |
| x = 0; |
| try { |
| return x; |
| } finally { |
| x++; |
| } |
| } |
| |
| should return 0, not 1. We don't have to do anything to make |
| this happens because the return value has been placed in the |
| RESULT_DECL already. */ |
| |
| q->cont_stmt = q->stmt.g; |
| |
| if (mod) |
| gimple_seq_add_seq (&q->repl_stmt, mod); |
| |
| x = gimple_build_goto (finlab); |
| gimple_set_location (x, q->location); |
| gimple_seq_add_stmt (&q->repl_stmt, x); |
| } |
| |
| /* Similar, but easier, for GIMPLE_GOTO. */ |
| |
| static void |
| do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod, |
| struct leh_tf_state *tf) |
| { |
| ggoto *x; |
| |
| gcc_assert (q->is_label); |
| |
| q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]); |
| |
| if (mod) |
| gimple_seq_add_seq (&q->repl_stmt, mod); |
| |
| x = gimple_build_goto (finlab); |
| gimple_set_location (x, q->location); |
| gimple_seq_add_stmt (&q->repl_stmt, x); |
| } |
| |
| /* Emit a standard landing pad sequence into SEQ for REGION. */ |
| |
| static void |
| emit_post_landing_pad (gimple_seq *seq, eh_region region) |
| { |
| eh_landing_pad lp = region->landing_pads; |
| glabel *x; |
| |
| if (lp == NULL) |
| lp = gen_eh_landing_pad (region); |
| |
| lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION); |
| EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index; |
| |
| x = gimple_build_label (lp->post_landing_pad); |
| gimple_seq_add_stmt (seq, x); |
| } |
| |
| /* Emit a RESX statement into SEQ for REGION. */ |
| |
| static void |
| emit_resx (gimple_seq *seq, eh_region region) |
| { |
| gresx *x = gimple_build_resx (region->index); |
| gimple_seq_add_stmt (seq, x); |
| if (region->outer) |
| record_stmt_eh_region (region->outer, x); |
| } |
| |
| /* Note that the current EH region may contain a throw, or a |
| call to a function which itself may contain a throw. */ |
| |
| static void |
| note_eh_region_may_contain_throw (eh_region region) |
| { |
| while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index)) |
| { |
| if (region->type == ERT_MUST_NOT_THROW) |
| break; |
| region = region->outer; |
| if (region == NULL) |
| break; |
| } |
| } |
| |
| /* Check if REGION has been marked as containing a throw. If REGION is |
| NULL, this predicate is false. */ |
| |
| static inline bool |
| eh_region_may_contain_throw (eh_region r) |
| { |
| return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index); |
| } |
| |
| /* We want to transform |
| try { body; } catch { stuff; } |
| to |
| normal_sequence: |
| body; |
| over: |
| eh_sequence: |
| landing_pad: |
| stuff; |
| goto over; |
| |
| TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad |
| should be placed before the second operand, or NULL. OVER is |
| an existing label that should be put at the exit, or NULL. */ |
| |
| static gimple_seq |
| frob_into_branch_around (gtry *tp, eh_region region, tree over) |
| { |
| gimple *x; |
| gimple_seq cleanup, result; |
| location_t loc = gimple_location (tp); |
| |
| cleanup = gimple_try_cleanup (tp); |
| result = gimple_try_eval (tp); |
| |
| if (region) |
| emit_post_landing_pad (&eh_seq, region); |
| |
| if (gimple_seq_may_fallthru (cleanup)) |
| { |
| if (!over) |
| over = create_artificial_label (loc); |
| x = gimple_build_goto (over); |
| gimple_set_location (x, loc); |
| gimple_seq_add_stmt (&cleanup, x); |
| } |
| gimple_seq_add_seq (&eh_seq, cleanup); |
| |
| if (over) |
| { |
| x = gimple_build_label (over); |
| gimple_seq_add_stmt (&result, x); |
| } |
| return result; |
| } |
| |
| /* A subroutine of lower_try_finally. Duplicate the tree rooted at T. |
| Make sure to record all new labels found. */ |
| |
| static gimple_seq |
| lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state, |
| location_t loc) |
| { |
| gtry *region = NULL; |
| gimple_seq new_seq; |
| gimple_stmt_iterator gsi; |
| |
| new_seq = copy_gimple_seq_and_replace_locals (seq); |
| |
| for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) |
| { |
| tree block = gimple_block (stmt); |
| gimple_set_location (stmt, loc); |
| gimple_set_block (stmt, block); |
| } |
| } |
| |
| if (outer_state->tf) |
| region = outer_state->tf->try_finally_expr; |
| collect_finally_tree_1 (new_seq, region); |
| |
| return new_seq; |
| } |
| |
| /* A subroutine of lower_try_finally. Create a fallthru label for |
| the given try_finally state. The only tricky bit here is that |
| we have to make sure to record the label in our outer context. */ |
| |
| static tree |
| lower_try_finally_fallthru_label (struct leh_tf_state *tf) |
| { |
| tree label = tf->fallthru_label; |
| treemple temp; |
| |
| if (!label) |
| { |
| label = create_artificial_label (gimple_location (tf->try_finally_expr)); |
| tf->fallthru_label = label; |
| if (tf->outer->tf) |
| { |
| temp.t = label; |
| record_in_finally_tree (temp, tf->outer->tf->try_finally_expr); |
| } |
| } |
| return label; |
| } |
| |
| /* A subroutine of lower_try_finally. If FINALLY consits of a |
| GIMPLE_EH_ELSE node, return it. */ |
| |
| static inline geh_else * |
| get_eh_else (gimple_seq finally) |
| { |
| gimple *x = gimple_seq_first_stmt (finally); |
| if (x && gimple_code (x) == GIMPLE_EH_ELSE) |
| { |
| gcc_assert (gimple_seq_singleton_p (finally)); |
| return as_a <geh_else *> (x); |
| } |
| return NULL; |
| } |
| |
| /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions |
| langhook returns non-null, then the language requires that the exception |
| path out of a try_finally be treated specially. To wit: the code within |
| the finally block may not itself throw an exception. We have two choices |
| here. First we can duplicate the finally block and wrap it in a |
| must_not_throw region. Second, we can generate code like |
| |
| try { |
| finally_block; |
| } catch { |
| if (fintmp == eh_edge) |
| protect_cleanup_actions; |
| } |
| |
| where "fintmp" is the temporary used in the switch statement generation |
| alternative considered below. For the nonce, we always choose the first |
| option. |
| |
| THIS_STATE may be null if this is a try-cleanup, not a try-finally. */ |
| |
| static void |
| honor_protect_cleanup_actions (struct leh_state *outer_state, |
| struct leh_state *this_state, |
| struct leh_tf_state *tf) |
| { |
| gimple_seq finally = gimple_try_cleanup (tf->top_p); |
| |
| /* EH_ELSE doesn't come from user code; only compiler generated stuff. |
| It does need to be handled here, so as to separate the (different) |
| EH path from the normal path. But we should not attempt to wrap |
| it with a must-not-throw node (which indeed gets in the way). */ |
| if (geh_else *eh_else = get_eh_else (finally)) |
| { |
| gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else)); |
| finally = gimple_eh_else_e_body (eh_else); |
| |
| /* Let the ELSE see the exception that's being processed, but |
| since the cleanup is outside the try block, process it with |
| outer_state, otherwise it may be used as a cleanup for |
| itself, and Bad Things (TM) ensue. */ |
| eh_region save_ehp = outer_state->ehp_region; |
| outer_state->ehp_region = this_state->cur_region; |
| lower_eh_constructs_1 (outer_state, &finally); |
| outer_state->ehp_region = save_ehp; |
| } |
| else |
| { |
| /* First check for nothing to do. */ |
| if (lang_hooks.eh_protect_cleanup_actions == NULL) |
| return; |
| tree actions = lang_hooks.eh_protect_cleanup_actions (); |
| if (actions == NULL) |
| return; |
| |
| if (this_state) |
| finally = lower_try_finally_dup_block (finally, outer_state, |
| gimple_location (tf->try_finally_expr)); |
| |
| /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP |
| set, the handler of the TRY_CATCH_EXPR is another cleanup which ought |
| to be in an enclosing scope, but needs to be implemented at this level |
| to avoid a nesting violation (see wrap_temporary_cleanups in |
| cp/decl.cc). Since it's logically at an outer level, we should call |
| terminate before we get to it, so strip it away before adding the |
| MUST_NOT_THROW filter. */ |
| gimple_stmt_iterator gsi = gsi_start (finally); |
| gimple *x = gsi_stmt (gsi); |
| if (gimple_code (x) == GIMPLE_TRY |
| && gimple_try_kind (x) == GIMPLE_TRY_CATCH |
| && gimple_try_catch_is_cleanup (x)) |
| { |
| gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT); |
| gsi_remove (&gsi, false); |
| } |
| |
| /* Wrap the block with protect_cleanup_actions as the action. */ |
| geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions); |
| gtry *try_stmt = gimple_build_try (finally, |
| gimple_seq_alloc_with_stmt (eh_mnt), |
| GIMPLE_TRY_CATCH); |
| finally = lower_eh_must_not_throw (outer_state, try_stmt); |
| } |
| |
| /* Drop all of this into the exception sequence. */ |
| emit_post_landing_pad (&eh_seq, tf->region); |
| gimple_seq_add_seq (&eh_seq, finally); |
| if (gimple_seq_may_fallthru (finally)) |
| emit_resx (&eh_seq, tf->region); |
| |
| /* Having now been handled, EH isn't to be considered with |
| the rest of the outgoing edges. */ |
| tf->may_throw = false; |
| } |
| |
| /* A subroutine of lower_try_finally. We have determined that there is |
| no fallthru edge out of the finally block. This means that there is |
| no outgoing edge corresponding to any incoming edge. Restructure the |
| try_finally node for this special case. */ |
| |
| static void |
| lower_try_finally_nofallthru (struct leh_state *state, |
| struct leh_tf_state *tf) |
| { |
| tree lab; |
| gimple *x; |
| geh_else *eh_else; |
| gimple_seq finally; |
| struct goto_queue_node *q, *qe; |
| |
| lab = create_artificial_label (gimple_location (tf->try_finally_expr)); |
| |
| /* We expect that tf->top_p is a GIMPLE_TRY. */ |
| finally = gimple_try_cleanup (tf->top_p); |
| tf->top_p_seq = gimple_try_eval (tf->top_p); |
| |
| x = gimple_build_label (lab); |
| gimple_seq_add_stmt (&tf->top_p_seq, x); |
| |
| q = tf->goto_queue; |
| qe = q + tf->goto_queue_active; |
| for (; q < qe; ++q) |
| if (q->index < 0) |
| do_return_redirection (q, lab, NULL); |
| else |
| do_goto_redirection (q, lab, NULL, tf); |
| |
| replace_goto_queue (tf); |
| |
| /* Emit the finally block into the stream. Lower EH_ELSE at this time. */ |
| eh_else = get_eh_else (finally); |
| if (eh_else) |
| { |
| finally = gimple_eh_else_n_body (eh_else); |
| lower_eh_constructs_1 (state, &finally); |
| gimple_seq_add_seq (&tf->top_p_seq, finally); |
| |
| if (tf->may_throw) |
| { |
| finally = gimple_eh_else_e_body (eh_else); |
| lower_eh_constructs_1 (state, &finally); |
| |
| emit_post_landing_pad (&eh_seq, tf->region); |
| gimple_seq_add_seq (&eh_seq, finally); |
| } |
| } |
| else |
| { |
| lower_eh_constructs_1 (state, &finally); |
| gimple_seq_add_seq (&tf->top_p_seq, finally); |
| |
| if (tf->may_throw) |
| { |
| emit_post_landing_pad (&eh_seq, tf->region); |
| |
| x = gimple_build_goto (lab); |
| gimple_set_location (x, gimple_location (tf->try_finally_expr)); |
| gimple_seq_add_stmt (&eh_seq, x); |
| } |
| } |
| } |
| |
| /* A subroutine of lower_try_finally. We have determined that there is |
| exactly one destination of the finally block. Restructure the |
| try_finally node for this special case. */ |
| |
| static void |
| lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf) |
| { |
| struct goto_queue_node *q, *qe; |
| geh_else *eh_else; |
| glabel *label_stmt; |
| gimple *x; |
| gimple_seq finally; |
| gimple_stmt_iterator gsi; |
| tree finally_label; |
| location_t loc = gimple_location (tf->try_finally_expr); |
| |
| finally = gimple_try_cleanup (tf->top_p); |
| tf->top_p_seq = gimple_try_eval (tf->top_p); |
| |
| /* Since there's only one destination, and the destination edge can only |
| either be EH or non-EH, that implies that all of our incoming edges |
| are of the same type. Therefore we can lower EH_ELSE immediately. */ |
| eh_else = get_eh_else (finally); |
| if (eh_else) |
| { |
| if (tf->may_throw) |
| finally = gimple_eh_else_e_body (eh_else); |
| else |
| finally = gimple_eh_else_n_body (eh_else); |
| } |
| |
| lower_eh_constructs_1 (state, &finally); |
| |
| for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) |
| { |
| tree block = gimple_block (stmt); |
| gimple_set_location (stmt, gimple_location (tf->try_finally_expr)); |
| gimple_set_block (stmt, block); |
| } |
| } |
| |
| if (tf->may_throw) |
| { |
| /* Only reachable via the exception edge. Add the given label to |
| the head of the FINALLY block. Append a RESX at the end. */ |
| emit_post_landing_pad (&eh_seq, tf->region); |
| gimple_seq_add_seq (&eh_seq, finally); |
| emit_resx (&eh_seq, tf->region); |
| return; |
| } |
| |
| if (tf->may_fallthru) |
| { |
| /* Only reachable via the fallthru edge. Do nothing but let |
| the two blocks run together; we'll fall out the bottom. */ |
| gimple_seq_add_seq (&tf->top_p_seq, finally); |
| return; |
| } |
| |
| finally_label = create_artificial_label (loc); |
| label_stmt = gimple_build_label (finally_label); |
| gimple_seq_add_stmt (&tf->top_p_seq, label_stmt); |
| |
| gimple_seq_add_seq (&tf->top_p_seq, finally); |
| |
| q = tf->goto_queue; |
| qe = q + tf->goto_queue_active; |
| |
| if (tf->may_return) |
| { |
| /* Reachable by return expressions only. Redirect them. */ |
| for (; q < qe; ++q) |
| do_return_redirection (q, finally_label, NULL); |
| replace_goto_queue (tf); |
| } |
| else |
| { |
| /* Reachable by goto expressions only. Redirect them. */ |
| for (; q < qe; ++q) |
| do_goto_redirection (q, finally_label, NULL, tf); |
| replace_goto_queue (tf); |
| |
| if (tf->dest_array[0] == tf->fallthru_label) |
| { |
| /* Reachable by goto to fallthru label only. Redirect it |
| to the new label (already created, sadly), and do not |
| emit the final branch out, or the fallthru label. */ |
| tf->fallthru_label = NULL; |
| return; |
| } |
| } |
| |
| /* Place the original return/goto to the original destination |
| immediately after the finally block. */ |
| x = tf->goto_queue[0].cont_stmt; |
| gimple_seq_add_stmt (&tf->top_p_seq, x); |
| maybe_record_in_goto_queue (state, x); |
| } |
| |
| /* A subroutine of lower_try_finally. There are multiple edges incoming |
| and outgoing from the finally block. Implement this by duplicating the |
| finally block for every destination. */ |
| |
| static void |
| lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf) |
| { |
| gimple_seq finally; |
| gimple_seq new_stmt; |
| gimple_seq seq; |
| gimple *x; |
| geh_else *eh_else; |
| tree tmp; |
| location_t tf_loc = gimple_location (tf->try_finally_expr); |
| |
| finally = gimple_try_cleanup (tf->top_p); |
| |
| /* Notice EH_ELSE, and simplify some of the remaining code |
| by considering FINALLY to be the normal return path only. */ |
| eh_else = get_eh_else (finally); |
| if (eh_else) |
| finally = gimple_eh_else_n_body (eh_else); |
| |
| tf->top_p_seq = gimple_try_eval (tf->top_p); |
| new_stmt = NULL; |
| |
| if (tf->may_fallthru) |
| { |
| seq = lower_try_finally_dup_block (finally, state, tf_loc); |
| lower_eh_constructs_1 (state, &seq); |
| gimple_seq_add_seq (&new_stmt, seq); |
| |
| tmp = lower_try_finally_fallthru_label (tf); |
| x = gimple_build_goto (tmp); |
| gimple_set_location (x, tf_loc); |
| gimple_seq_add_stmt (&new_stmt, x); |
| } |
| |
| if (tf->may_throw) |
| { |
| /* We don't need to copy the EH path of EH_ELSE, |
| since it is only emitted once. */ |
| if (eh_else) |
| seq = gimple_eh_else_e_body (eh_else); |
| else |
| seq = lower_try_finally_dup_block (finally, state, tf_loc); |
| lower_eh_constructs_1 (state, &seq); |
| |
| emit_post_landing_pad (&eh_seq, tf->region); |
| gimple_seq_add_seq (&eh_seq, seq); |
| emit_resx (&eh_seq, tf->region); |
| } |
| |
| if (tf->goto_queue) |
| { |
| struct goto_queue_node *q, *qe; |
| int return_index, index; |
| struct labels_s |
| { |
| struct goto_queue_node *q; |
| tree label; |
| } *labels; |
| |
| return_index = tf->dest_array.length (); |
| labels = XCNEWVEC (struct labels_s, return_index + 1); |
| |
| q = tf->goto_queue; |
| qe = q + tf->goto_queue_active; |
| for (; q < qe; q++) |
| { |
| index = q->index < 0 ? return_index : q->index; |
| |
| if (!labels[index].q) |
| labels[index].q = q; |
| } |
| |
| for (index = 0; index < return_index + 1; index++) |
| { |
| tree lab; |
| |
| q = labels[index].q; |
| if (! q) |
| continue; |
| |
| lab = labels[index].label |
| = create_artificial_label (tf_loc); |
| |
| if (index == return_index) |
| do_return_redirection (q, lab, NULL); |
| else |
| do_goto_redirection (q, lab, NULL, tf); |
| |
| x = gimple_build_label (lab); |
| gimple_seq_add_stmt (&new_stmt, x); |
| |
| seq = lower_try_finally_dup_block (finally, state, q->location); |
| lower_eh_constructs_1 (state, &seq); |
| gimple_seq_add_seq (&new_stmt, seq); |
| |
| gimple_seq_add_stmt (&new_stmt, q->cont_stmt); |
| maybe_record_in_goto_queue (state, q->cont_stmt); |
| } |
| |
| for (q = tf->goto_queue; q < qe; q++) |
| { |
| tree lab; |
| |
| index = q->index < 0 ? return_index : q->index; |
| |
| if (labels[index].q == q) |
| continue; |
| |
| lab = labels[index].label; |
| |
| if (index == return_index) |
| do_return_redirection (q, lab, NULL); |
| else |
| do_goto_redirection (q, lab, NULL, tf); |
| } |
| |
| replace_goto_queue (tf); |
| free (labels); |
| } |
| |
| /* Need to link new stmts after running replace_goto_queue due |
| to not wanting to process the same goto stmts twice. */ |
| gimple_seq_add_seq (&tf->top_p_seq, new_stmt); |
| } |
| |
| /* A subroutine of lower_try_finally. There are multiple edges incoming |
| and outgoing from the finally block. Implement this by instrumenting |
| each incoming edge and creating a switch statement at the end of the |
| finally block that branches to the appropriate destination. */ |
| |
| static void |
| lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf) |
| { |
| struct goto_queue_node *q, *qe; |
| tree finally_tmp, finally_label; |
| int return_index, eh_index, fallthru_index; |
| int nlabels, ndests, j, last_case_index; |
| tree last_case; |
| auto_vec<tree> case_label_vec; |
| gimple_seq switch_body = NULL; |
| gimple *x; |
| geh_else *eh_else; |
| tree tmp; |
| gimple *switch_stmt; |
| gimple_seq finally; |
| hash_map<tree, gimple *> *cont_map = NULL; |
| /* The location of the TRY_FINALLY stmt. */ |
| location_t tf_loc = gimple_location (tf->try_finally_expr); |
| /* The location of the finally block. */ |
| location_t finally_loc; |
| |
| finally = gimple_try_cleanup (tf->top_p); |
| eh_else = get_eh_else (finally); |
| |
| /* Mash the TRY block to the head of the chain. */ |
| tf->top_p_seq = gimple_try_eval (tf->top_p); |
| |
| /* The location of the finally is either the last stmt in the finally |
| block or the location of the TRY_FINALLY itself. */ |
| x = gimple_seq_last_stmt (finally); |
| finally_loc = x ? gimple_location (x) : tf_loc; |
| |
| /* Prepare for switch statement generation. */ |
| nlabels = tf->dest_array.length (); |
| return_index = nlabels; |
| eh_index = return_index + tf->may_return; |
| fallthru_index = eh_index + (tf->may_throw && !eh_else); |
| ndests = fallthru_index + tf->may_fallthru; |
| |
| finally_tmp = create_tmp_var (integer_type_node, "finally_tmp"); |
| finally_label = create_artificial_label (finally_loc); |
| |
| /* We use vec::quick_push on case_label_vec throughout this function, |
| since we know the size in advance and allocate precisely as muce |
| space as needed. */ |
| case_label_vec.create (ndests); |
| last_case = NULL; |
| last_case_index = 0; |
| |
| /* Begin inserting code for getting to the finally block. Things |
| are done in this order to correspond to the sequence the code is |
| laid out. */ |
| |
| if (tf->may_fallthru) |
| { |
| x = gimple_build_assign (finally_tmp, |
| build_int_cst (integer_type_node, |
| fallthru_index)); |
| gimple_set_location (x, finally_loc); |
| gimple_seq_add_stmt (&tf->top_p_seq, x); |
| |
| tmp = build_int_cst (integer_type_node, fallthru_index); |
| last_case = build_case_label (tmp, NULL, |
| create_artificial_label (finally_loc)); |
| case_label_vec.quick_push (last_case); |
| last_case_index++; |
| |
| x = gimple_build_label (CASE_LABEL (last_case)); |
| gimple_seq_add_stmt (&switch_body, x); |
| |
| tmp = lower_try_finally_fallthru_label (tf); |
| x = gimple_build_goto (tmp); |
| gimple_set_location (x, finally_loc); |
| gimple_seq_add_stmt (&switch_body, x); |
| } |
| |
| /* For EH_ELSE, emit the exception path (plus resx) now, then |
| subsequently we only need consider the normal path. */ |
| if (eh_else) |
| { |
| if (tf->may_throw) |
| { |
| finally = gimple_eh_else_e_body (eh_else); |
| lower_eh_constructs_1 (state, &finally); |
| |
| emit_post_landing_pad (&eh_seq, tf->region); |
| gimple_seq_add_seq (&eh_seq, finally); |
| emit_resx (&eh_seq, tf->region); |
| } |
| |
| finally = gimple_eh_else_n_body (eh_else); |
| } |
| else if (tf->may_throw) |
| { |
| emit_post_landing_pad (&eh_seq, tf->region); |
| |
| x = gimple_build_assign (finally_tmp, |
| build_int_cst (integer_type_node, eh_index)); |
| gimple_seq_add_stmt (&eh_seq, x); |
| |
| x = gimple_build_goto (finally_label); |
| gimple_set_location (x, tf_loc); |
| gimple_seq_add_stmt (&eh_seq, x); |
| |
| tmp = build_int_cst (integer_type_node, eh_index); |
| last_case = build_case_label (tmp, NULL, |
| create_artificial_label (tf_loc)); |
| case_label_vec.quick_push (last_case); |
| last_case_index++; |
| |
| x = gimple_build_label (CASE_LABEL (last_case)); |
| gimple_seq_add_stmt (&eh_seq, x); |
| emit_resx (&eh_seq, tf->region); |
| } |
| |
| x = gimple_build_label (finally_label); |
| gimple_seq_add_stmt (&tf->top_p_seq, x); |
| |
| lower_eh_constructs_1 (state, &finally); |
| gimple_seq_add_seq (&tf->top_p_seq, finally); |
| |
| /* Redirect each incoming goto edge. */ |
| q = tf->goto_queue; |
| qe = q + tf->goto_queue_active; |
| j = last_case_index + tf->may_return; |
| /* Prepare the assignments to finally_tmp that are executed upon the |
| entrance through a particular edge. */ |
| for (; q < qe; ++q) |
| { |
| gimple_seq mod = NULL; |
| int switch_id; |
| unsigned int case_index; |
| |
| if (q->index < 0) |
| { |
| x = gimple_build_assign (finally_tmp, |
| build_int_cst (integer_type_node, |
| return_index)); |
| gimple_seq_add_stmt (&mod, x); |
| do_return_redirection (q, finally_label, mod); |
| switch_id = return_index; |
| } |
| else |
| { |
| x = gimple_build_assign (finally_tmp, |
| build_int_cst (integer_type_node, q->index)); |
| gimple_seq_add_stmt (&mod, x); |
| do_goto_redirection (q, finally_label, mod, tf); |
| switch_id = q->index; |
| } |
| |
| case_index = j + q->index; |
| if (case_label_vec.length () <= case_index || !case_label_vec[case_index]) |
| { |
| tree case_lab; |
| tmp = build_int_cst (integer_type_node, switch_id); |
| case_lab = build_case_label (tmp, NULL, |
| create_artificial_label (tf_loc)); |
| /* We store the cont_stmt in the pointer map, so that we can recover |
| it in the loop below. */ |
| if (!cont_map) |
| cont_map = new hash_map<tree, gimple *>; |
| cont_map->put (case_lab, q->cont_stmt); |
| case_label_vec.quick_push (case_lab); |
| } |
| } |
| for (j = last_case_index; j < last_case_index + nlabels; j++) |
| { |
| gimple *cont_stmt; |
| |
| last_case = case_label_vec[j]; |
| |
| gcc_assert (last_case); |
| gcc_assert (cont_map); |
| |
| cont_stmt = *cont_map->get (last_case); |
| |
| x = gimple_build_label (CASE_LABEL (last_case)); |
| gimple_seq_add_stmt (&switch_body, x); |
| gimple_seq_add_stmt (&switch_body, cont_stmt); |
| maybe_record_in_goto_queue (state, cont_stmt); |
| } |
| if (cont_map) |
| delete cont_map; |
| |
| replace_goto_queue (tf); |
| |
| /* Make sure that the last case is the default label, as one is required. |
| Then sort the labels, which is also required in GIMPLE. */ |
| CASE_LOW (last_case) = NULL; |
| tree tem = case_label_vec.pop (); |
| gcc_assert (tem == last_case); |
| sort_case_labels (case_label_vec); |
| |
| /* Build the switch statement, setting last_case to be the default |
| label. */ |
| switch_stmt = gimple_build_switch (finally_tmp, last_case, |
| case_label_vec); |
| gimple_set_location (switch_stmt, finally_loc); |
| |
| /* Need to link SWITCH_STMT after running replace_goto_queue |
| due to not wanting to process the same goto stmts twice. */ |
| gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt); |
| gimple_seq_add_seq (&tf->top_p_seq, switch_body); |
| } |
| |
| /* Decide whether or not we are going to duplicate the finally block. |
| There are several considerations. |
| |
| Second, we'd like to prevent egregious code growth. One way to |
| do this is to estimate the size of the finally block, multiply |
| that by the number of copies we'd need to make, and compare against |
| the estimate of the size of the switch machinery we'd have to add. */ |
| |
| static bool |
| decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) |
| { |
| int f_estimate, sw_estimate; |
| geh_else *eh_else; |
| |
| /* If there's an EH_ELSE involved, the exception path is separate |
| and really doesn't come into play for this computation. */ |
| eh_else = get_eh_else (finally); |
| if (eh_else) |
| { |
| ndests -= may_throw; |
| finally = gimple_eh_else_n_body (eh_else); |
| } |
| |
| if (!optimize) |
| { |
| gimple_stmt_iterator gsi; |
| |
| if (ndests == 1) |
| return true; |
| |
| for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| /* Duplicate __builtin_stack_restore in the hope of eliminating it |
| on the EH paths and, consequently, useless cleanups. */ |
| gimple *stmt = gsi_stmt (gsi); |
| if (!is_gimple_debug (stmt) |
| && !gimple_clobber_p (stmt) |
| && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) |
| return false; |
| } |
| return true; |
| } |
| |
| /* Finally estimate N times, plus N gotos. */ |
| f_estimate = estimate_num_insns_seq (finally, &eni_size_weights); |
| f_estimate = (f_estimate + 1) * ndests; |
| |
| /* Switch statement (cost 10), N variable assignments, N gotos. */ |
| sw_estimate = 10 + 2 * ndests; |
| |
| /* Optimize for size clearly wants our best guess. */ |
| if (optimize_function_for_size_p (cfun)) |
| return f_estimate < sw_estimate; |
| |
| /* ??? These numbers are completely made up so far. */ |
| if (optimize > 1) |
| return f_estimate < 100 || f_estimate < sw_estimate * 2; |
| else |
| return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3; |
| } |
| |
| /* REG is current region of a LEH state. |
| is the enclosing region for a possible cleanup region, or the region |
| itself. Returns TRUE if such a region would be unreachable. |
| |
| Cleanup regions within a must-not-throw region aren't actually reachable |
| even if there are throwing stmts within them, because the personality |
| routine will call terminate before unwinding. */ |
| |
| static bool |
| cleanup_is_dead_in (leh_state *state) |
| { |
| if (flag_checking) |
| { |
| eh_region reg = state->cur_region; |
| while (reg && reg->type == ERT_CLEANUP) |
| reg = reg->outer; |
| |
| gcc_assert (reg == state->outer_non_cleanup); |
| } |
| |
| eh_region reg = state->outer_non_cleanup; |
| return (reg && reg->type == ERT_MUST_NOT_THROW); |
| } |
| |
| /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes |
| to a sequence of labels and blocks, plus the exception region trees |
| that record all the magic. This is complicated by the need to |
| arrange for the FINALLY block to be executed on all exits. */ |
| |
| static gimple_seq |
| lower_try_finally (struct leh_state *state, gtry *tp) |
| { |
| struct leh_tf_state this_tf; |
| struct leh_state this_state; |
| int ndests; |
| gimple_seq old_eh_seq; |
| |
| /* Process the try block. */ |
| |
| memset (&this_tf, 0, sizeof (this_tf)); |
| this_tf.try_finally_expr = tp; |
| this_tf.top_p = tp; |
| this_tf.outer = state; |
| if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state)) |
| { |
| this_tf.region = gen_eh_region_cleanup (state->cur_region); |
| this_state.cur_region = this_tf.region; |
| } |
| else |
| { |
| this_tf.region = NULL; |
| this_state.cur_region = state->cur_region; |
| } |
| |
| this_state.outer_non_cleanup = state->outer_non_cleanup; |
| this_state.ehp_region = state->ehp_region; |
| this_state.tf = &this_tf; |
| |
| old_eh_seq = eh_seq; |
| eh_seq = NULL; |
| |
| lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp)); |
| |
| /* Determine if the try block is escaped through the bottom. */ |
| this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); |
| |
| /* Determine if any exceptions are possible within the try block. */ |
| if (this_tf.region) |
| this_tf.may_throw = eh_region_may_contain_throw (this_tf.region); |
| if (this_tf.may_throw) |
| honor_protect_cleanup_actions (state, &this_state, &this_tf); |
| |
| /* Determine how many edges (still) reach the finally block. Or rather, |
| how many destinations are reached by the finally block. Use this to |
| determine how we process the finally block itself. */ |
| |
| ndests = this_tf.dest_array.length (); |
| ndests += this_tf.may_fallthru; |
| ndests += this_tf.may_return; |
| ndests += this_tf.may_throw; |
| |
| /* If the FINALLY block is not reachable, dike it out. */ |
| if (ndests == 0) |
| { |
| gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp)); |
| gimple_try_set_cleanup (tp, NULL); |
| } |
| /* If the finally block doesn't fall through, then any destination |
| we might try to impose there isn't reached either. There may be |
| some minor amount of cleanup and redirection still needed. */ |
| else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp))) |
| lower_try_finally_nofallthru (state, &this_tf); |
| |
| /* We can easily special-case redirection to a single destination. */ |
| else if (ndests == 1) |
| lower_try_finally_onedest (state, &this_tf); |
| else if (decide_copy_try_finally (ndests, this_tf.may_throw, |
| gimple_try_cleanup (tp))) |
| lower_try_finally_copy (state, &this_tf); |
| else |
| lower_try_finally_switch (state, &this_tf); |
| |
| /* If someone requested we add a label at the end of the transformed |
| block, do so. */ |
| if (this_tf.fallthru_label) |
| { |
| /* This must be reached only if ndests == 0. */ |
| gimple *x = gimple_build_label (this_tf.fallthru_label); |
| gimple_seq_add_stmt (&this_tf.top_p_seq, x); |
| } |
| |
| this_tf.dest_array.release (); |
| free (this_tf.goto_queue); |
| if (this_tf.goto_queue_map) |
| delete this_tf.goto_queue_map; |
| |
| /* If there was an old (aka outer) eh_seq, append the current eh_seq. |
| If there was no old eh_seq, then the append is trivially already done. */ |
| if (old_eh_seq) |
| { |
| if (eh_seq == NULL) |
| eh_seq = old_eh_seq; |
| else |
| { |
| gimple_seq new_eh_seq = eh_seq; |
| eh_seq = old_eh_seq; |
| gimple_seq_add_seq (&eh_seq, new_eh_seq); |
| } |
| } |
| |
| return this_tf.top_p_seq; |
| } |
| |
| /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a |
| list of GIMPLE_CATCH to a sequence of labels and blocks, plus the |
| exception region trees that records all the magic. */ |
| |
| static gimple_seq |
| lower_catch (struct leh_state *state, gtry *tp) |
| { |
| eh_region try_region = NULL; |
| struct leh_state this_state = *state; |
| gimple_stmt_iterator gsi; |
| tree out_label; |
| gimple_seq new_seq, cleanup; |
| gimple *x; |
| geh_dispatch *eh_dispatch; |
| location_t try_catch_loc = gimple_location (tp); |
| location_t catch_loc = UNKNOWN_LOCATION; |
| |
| if (flag_exceptions) |
| { |
| try_region = gen_eh_region_try (state->cur_region); |
| this_state.cur_region = try_region; |
| this_state.outer_non_cleanup = this_state.cur_region; |
| } |
| |
| lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp)); |
| |
| if (!eh_region_may_contain_throw (try_region)) |
| return gimple_try_eval (tp); |
| |
| new_seq = NULL; |
| eh_dispatch = gimple_build_eh_dispatch (try_region->index); |
| gimple_seq_add_stmt (&new_seq, eh_dispatch); |
| emit_resx (&new_seq, try_region); |
| |
| this_state.cur_region = state->cur_region; |
| this_state.outer_non_cleanup = state->outer_non_cleanup; |
| this_state.ehp_region = try_region; |
| |
| /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup |
| itself, so that e.g. for coverage purposes the nested cleanups don't |
| appear before the cleanup body. See PR64634 for details. */ |
| gimple_seq old_eh_seq = eh_seq; |
| eh_seq = NULL; |
| |
| out_label = NULL; |
| cleanup = gimple_try_cleanup (tp); |
| for (gsi = gsi_start (cleanup); |
| !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| eh_catch c; |
| gcatch *catch_stmt; |
| gimple_seq handler; |
| |
| catch_stmt = as_a <gcatch *> (gsi_stmt (gsi)); |
| if (catch_loc == UNKNOWN_LOCATION) |
| catch_loc = gimple_location (catch_stmt); |
| c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt)); |
| |
| handler = gimple_catch_handler (catch_stmt); |
| lower_eh_constructs_1 (&this_state, &handler); |
| |
| c->label = create_artificial_label (UNKNOWN_LOCATION); |
| x = gimple_build_label (c->label); |
| gimple_seq_add_stmt (&new_seq, x); |
| |
| gimple_seq_add_seq (&new_seq, handler); |
| |
| if (gimple_seq_may_fallthru (new_seq)) |
| { |
| if (!out_label) |
| out_label = create_artificial_label (try_catch_loc); |
| |
| x = gimple_build_goto (out_label); |
| gimple_seq_add_stmt (&new_seq, x); |
| } |
| if (!c->type_list) |
| break; |
| } |
| |
| /* Try to set a location on the dispatching construct to avoid inheriting |
| the location of the previous statement. */ |
| gimple_set_location (eh_dispatch, catch_loc); |
| |
| gimple_try_set_cleanup (tp, new_seq); |
| |
| gimple_seq new_eh_seq = eh_seq; |
| eh_seq = old_eh_seq; |
| gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label); |
| gimple_seq_add_seq (&eh_seq, new_eh_seq); |
| return ret_seq; |
| } |
| |
| /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a |
| GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception |
| region trees that record all the magic. */ |
| |
| static gimple_seq |
| lower_eh_filter (struct leh_state *state, gtry *tp) |
| { |
| struct leh_state this_state = *state; |
| eh_region this_region = NULL; |
| gimple *inner, *x; |
| gimple_seq new_seq; |
| |
| inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); |
| |
| if (flag_exceptions) |
| { |
| this_region = gen_eh_region_allowed (state->cur_region, |
| gimple_eh_filter_types (inner)); |
| this_state.cur_region = this_region; |
| this_state.outer_non_cleanup = this_state.cur_region; |
| } |
| |
| lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp)); |
| |
| if (!eh_region_may_contain_throw (this_region)) |
| return gimple_try_eval (tp); |
| |
| this_state.cur_region = state->cur_region; |
| this_state.ehp_region = this_region; |
| |
| new_seq = NULL; |
| x = gimple_build_eh_dispatch (this_region->index); |
| gimple_set_location (x, gimple_location (tp)); |
| gimple_seq_add_stmt (&new_seq, x); |
| emit_resx (&new_seq, this_region); |
| |
| this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION); |
| x = gimple_build_label (this_region->u.allowed.label); |
| gimple_seq_add_stmt (&new_seq, x); |
| |
| lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner)); |
| gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner)); |
| |
| gimple_try_set_cleanup (tp, new_seq); |
| |
| return frob_into_branch_around (tp, this_region, NULL); |
| } |
| |
| /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with |
| an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks, |
| plus the exception region trees that record all the magic. */ |
| |
| static gimple_seq |
| lower_eh_must_not_throw (struct leh_state *state, gtry *tp) |
| { |
| struct leh_state this_state = *state; |
| |
| if (flag_exceptions) |
| { |
| gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); |
| eh_region this_region; |
| |
| this_region = gen_eh_region_must_not_throw (state->cur_region); |
| this_region->u.must_not_throw.failure_decl |
| = gimple_eh_must_not_throw_fndecl ( |
| as_a <geh_mnt *> (inner)); |
| this_region->u.must_not_throw.failure_loc |
| = LOCATION_LOCUS (gimple_location (tp)); |
| |
| /* In order to get mangling applied to this decl, we must mark it |
| used now. Otherwise, pass_ipa_free_lang_data won't think it |
| needs to happen. */ |
| TREE_USED (this_region->u.must_not_throw.failure_decl) = 1; |
| |
| this_state.cur_region = this_region; |
| this_state.outer_non_cleanup = this_state.cur_region; |
| } |
| |
| lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp)); |
| |
| return gimple_try_eval (tp); |
| } |
| |
| /* Implement a cleanup expression. This is similar to try-finally, |
| except that we only execute the cleanup block for exception edges. */ |
| |
| static gimple_seq |
| lower_cleanup (struct leh_state *state, gtry *tp) |
| { |
| struct leh_state this_state = *state; |
| eh_region this_region = NULL; |
| struct leh_tf_state fake_tf; |
| gimple_seq result; |
| bool cleanup_dead = cleanup_is_dead_in (state); |
| |
| if (flag_exceptions && !cleanup_dead) |
| { |
| this_region = gen_eh_region_cleanup (state->cur_region); |
| this_state.cur_region = this_region; |
| this_state.outer_non_cleanup = state->outer_non_cleanup; |
| } |
| |
| lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp)); |
| |
| if (cleanup_dead || !eh_region_may_contain_throw (this_region)) |
| return gimple_try_eval (tp); |
| |
| /* Build enough of a try-finally state so that we can reuse |
| honor_protect_cleanup_actions. */ |
| memset (&fake_tf, 0, sizeof (fake_tf)); |
| fake_tf.top_p = fake_tf.try_finally_expr = tp; |
| fake_tf.outer = state; |
| fake_tf.region = this_region; |
| fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); |
| fake_tf.may_throw = true; |
| |
| honor_protect_cleanup_actions (state, NULL, &fake_tf); |
| |
| if (fake_tf.may_throw) |
| { |
| /* In this case honor_protect_cleanup_actions had nothing to do, |
| and we should process this normally. */ |
| lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp)); |
| result = frob_into_branch_around (tp, this_region, |
| fake_tf.fallthru_label); |
| } |
| else |
| { |
| /* In this case honor_protect_cleanup_actions did nearly all of |
| the work. All we have left is to append the fallthru_label. */ |
| |
| result = gimple_try_eval (tp); |
| if (fake_tf.fallthru_label) |
| { |
| gimple *x = gimple_build_label (fake_tf.fallthru_label); |
| gimple_seq_add_stmt (&result, x); |
| } |
| } |
| return result; |
| } |
| |
| /* Main loop for lowering eh constructs. Also moves gsi to the next |
| statement. */ |
| |
| static void |
| lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi) |
| { |
| gimple_seq replace; |
| gimple *x; |
| gimple *stmt = gsi_stmt (*gsi); |
| |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_CALL: |
| { |
| tree fndecl = gimple_call_fndecl (stmt); |
| tree rhs, lhs; |
| |
| if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) |
| switch (DECL_FUNCTION_CODE (fndecl)) |
| { |
| case BUILT_IN_EH_POINTER: |
| /* The front end may have generated a call to |
| __builtin_eh_pointer (0) within a catch region. Replace |
| this zero argument with the current catch region number. */ |
| if (state->ehp_region) |
| { |
| tree nr = build_int_cst (integer_type_node, |
| state->ehp_region->index); |
| gimple_call_set_arg (stmt, 0, nr); |
| } |
| else |
| { |
| /* The user has dome something silly. Remove it. */ |
| rhs = null_pointer_node; |
| goto do_replace; |
| } |
| break; |
| |
| case BUILT_IN_EH_FILTER: |
| /* ??? This should never appear, but since it's a builtin it |
| is accessible to abuse by users. Just remove it and |
| replace the use with the arbitrary value zero. */ |
| rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0); |
| do_replace: |
| lhs = gimple_call_lhs (stmt); |
| x = gimple_build_assign (lhs, rhs); |
| gsi_insert_before (gsi, x, GSI_SAME_STMT); |
| /* FALLTHRU */ |
| |
| case BUILT_IN_EH_COPY_VALUES: |
| /* Likewise this should not appear. Remove it. */ |
| gsi_remove (gsi, true); |
| return; |
| |
| default: |
| break; |
| } |
| } |
| /* FALLTHRU */ |
| |
| case GIMPLE_ASSIGN: |
| /* If the stmt can throw, use a new temporary for the assignment |
| to a LHS. This makes sure the old value of the LHS is |
| available on the EH edge. Only do so for statements that |
| potentially fall through (no noreturn calls e.g.), otherwise |
| this new assignment might create fake fallthru regions. */ |
| if (stmt_could_throw_p (cfun, stmt) |
| && gimple_has_lhs (stmt) |
| && gimple_stmt_may_fallthru (stmt) |
| && !tree_could_throw_p (gimple_get_lhs (stmt)) |
| && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt)))) |
| { |
| tree lhs = gimple_get_lhs (stmt); |
| tree tmp = create_tmp_var (TREE_TYPE (lhs)); |
| gimple *s = gimple_build_assign (lhs, tmp); |
| gimple_set_location (s, gimple_location (stmt)); |
| gimple_set_block (s, gimple_block (stmt)); |
| gimple_set_lhs (stmt, tmp); |
| gsi_insert_after (gsi, s, GSI_SAME_STMT); |
| } |
| /* Look for things that can throw exceptions, and record them. */ |
| if (state->cur_region && stmt_could_throw_p (cfun, stmt)) |
| { |
| record_stmt_eh_region (state->cur_region, stmt); |
| note_eh_region_may_contain_throw (state->cur_region); |
| } |
| break; |
| |
| case GIMPLE_COND: |
| case GIMPLE_GOTO: |
| case GIMPLE_RETURN: |
| maybe_record_in_goto_queue (state, stmt); |
| break; |
| |
| case GIMPLE_SWITCH: |
| verify_norecord_switch_expr (state, as_a <gswitch *> (stmt)); |
| break; |
| |
| case GIMPLE_TRY: |
| { |
| gtry *try_stmt = as_a <gtry *> (stmt); |
| if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY) |
| replace = lower_try_finally (state, try_stmt); |
| else |
| { |
| x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt)); |
| if (!x) |
| { |
| replace = gimple_try_eval (try_stmt); |
| lower_eh_constructs_1 (state, &replace); |
| } |
| else |
| switch (gimple_code (x)) |
| { |
| case GIMPLE_CATCH: |
| replace = lower_catch (state, try_stmt); |
| break; |
| case GIMPLE_EH_FILTER: |
| replace = lower_eh_filter (state, try_stmt); |
| break; |
| case GIMPLE_EH_MUST_NOT_THROW: |
| replace = lower_eh_must_not_throw (state, try_stmt); |
| break; |
| case GIMPLE_EH_ELSE: |
| /* This code is only valid with GIMPLE_TRY_FINALLY. */ |
| gcc_unreachable (); |
| default: |
| replace = lower_cleanup (state, try_stmt); |
| break; |
| } |
| } |
| } |
| |
| /* Remove the old stmt and insert the transformed sequence |
| instead. */ |
| gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT); |
| gsi_remove (gsi, true); |
| |
| /* Return since we don't want gsi_next () */ |
| return; |
| |
| case GIMPLE_EH_ELSE: |
| /* We should be eliminating this in lower_try_finally et al. */ |
| gcc_unreachable (); |
| |
| default: |
| /* A type, a decl, or some kind of statement that we're not |
| interested in. Don't walk them. */ |
| break; |
| } |
| |
| gsi_next (gsi); |
| } |
| |
| /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */ |
| |
| static void |
| lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq) |
| { |
| gimple_stmt_iterator gsi; |
| for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);) |
| lower_eh_constructs_2 (state, &gsi); |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_lower_eh = |
| { |
| GIMPLE_PASS, /* type */ |
| "eh", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_EH, /* tv_id */ |
| PROP_gimple_lcf, /* properties_required */ |
| PROP_gimple_leh, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_lower_eh : public gimple_opt_pass |
| { |
| public: |
| pass_lower_eh (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_lower_eh, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| unsigned int execute (function *) final override; |
| |
| }; // class pass_lower_eh |
| |
| unsigned int |
| pass_lower_eh::execute (function *fun) |
| { |
| struct leh_state null_state; |
| gimple_seq bodyp; |
| |
| bodyp = gimple_body (current_function_decl); |
| if (bodyp == NULL) |
| return 0; |
| |
| finally_tree = new hash_table<finally_tree_hasher> (31); |
| eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL); |
| memset (&null_state, 0, sizeof (null_state)); |
| |
| collect_finally_tree_1 (bodyp, NULL); |
| lower_eh_constructs_1 (&null_state, &bodyp); |
| gimple_set_body (current_function_decl, bodyp); |
| |
| /* We assume there's a return statement, or something, at the end of |
| the function, and thus ploping the EH sequence afterward won't |
| change anything. */ |
| gcc_assert (!gimple_seq_may_fallthru (bodyp)); |
| gimple_seq_add_seq (&bodyp, eh_seq); |
| |
| /* We assume that since BODYP already existed, adding EH_SEQ to it |
| didn't change its value, and we don't have to re-set the function. */ |
| gcc_assert (bodyp == gimple_body (current_function_decl)); |
| |
| delete finally_tree; |
| finally_tree = NULL; |
| BITMAP_FREE (eh_region_may_contain_throw_map); |
| eh_seq = NULL; |
| |
| /* If this function needs a language specific EH personality routine |
| and the frontend didn't already set one do so now. */ |
| if (function_needs_eh_personality (fun) == eh_personality_lang |
| && !DECL_FUNCTION_PERSONALITY (current_function_decl)) |
| DECL_FUNCTION_PERSONALITY (current_function_decl) |
| = lang_hooks.eh_personality (); |
| |
| return 0; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_lower_eh (gcc::context *ctxt) |
| { |
| return new pass_lower_eh (ctxt); |
| } |
| |
| /* Create the multiple edges from an EH_DISPATCH statement to all of |
| the possible handlers for its EH region. Return true if there's |
| no fallthru edge; false if there is. */ |
| |
| bool |
| make_eh_dispatch_edges (geh_dispatch *stmt) |
| { |
| eh_region r; |
| eh_catch c; |
| basic_block src, dst; |
| |
| r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); |
| src = gimple_bb (stmt); |
| |
| switch (r->type) |
| { |
| case ERT_TRY: |
| for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
| { |
| dst = label_to_block (cfun, c->label); |
| make_edge (src, dst, 0); |
| |
| /* A catch-all handler doesn't have a fallthru. */ |
| if (c->type_list == NULL) |
| return false; |
| } |
| break; |
| |
| case ERT_ALLOWED_EXCEPTIONS: |
| dst = label_to_block (cfun, r->u.allowed.label); |
| make_edge (src, dst, 0); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| return true; |
| } |
| |
| /* Create the single EH edge from STMT to its nearest landing pad, |
| if there is such a landing pad within the current function. */ |
| |
| edge |
| make_eh_edge (gimple *stmt) |
| { |
| basic_block src, dst; |
| eh_landing_pad lp; |
| int lp_nr; |
| |
| lp_nr = lookup_stmt_eh_lp (stmt); |
| if (lp_nr <= 0) |
| return NULL; |
| |
| lp = get_eh_landing_pad_from_number (lp_nr); |
| gcc_assert (lp != NULL); |
| |
| src = gimple_bb (stmt); |
| dst = label_to_block (cfun, lp->post_landing_pad); |
| return make_edge (src, dst, EDGE_EH); |
| } |
| |
| /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree; |
| do not actually perform the final edge redirection. |
| |
| CHANGE_REGION is true when we're being called from cleanup_empty_eh and |
| we intend to change the destination EH region as well; this means |
| EH_LANDING_PAD_NR must already be set on the destination block label. |
| If false, we're being called from generic cfg manipulation code and we |
| should preserve our place within the region tree. */ |
| |
| static void |
| redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region) |
| { |
| eh_landing_pad old_lp, new_lp; |
| basic_block old_bb; |
| gimple *throw_stmt; |
| int old_lp_nr, new_lp_nr; |
| tree old_label, new_label; |
| edge_iterator ei; |
| edge e; |
| |
| old_bb = edge_in->dest; |
| old_label = gimple_block_label (old_bb); |
| old_lp_nr = EH_LANDING_PAD_NR (old_label); |
| gcc_assert (old_lp_nr > 0); |
| old_lp = get_eh_landing_pad_from_number (old_lp_nr); |
| |
| throw_stmt = *gsi_last_bb (edge_in->src); |
| gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr); |
| |
| new_label = gimple_block_label (new_bb); |
| |
| /* Look for an existing region that might be using NEW_BB already. */ |
| new_lp_nr = EH_LANDING_PAD_NR (new_label); |
| if (new_lp_nr) |
| { |
| new_lp = get_eh_landing_pad_from_number (new_lp_nr); |
| gcc_assert (new_lp); |
| |
| /* Unless CHANGE_REGION is true, the new and old landing pad |
| had better be associated with the same EH region. */ |
| gcc_assert (change_region || new_lp->region == old_lp->region); |
| } |
| else |
| { |
| new_lp = NULL; |
| gcc_assert (!change_region); |
| } |
| |
| /* Notice when we redirect the last EH edge away from OLD_BB. */ |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| if (e != edge_in && (e->flags & EDGE_EH)) |
| break; |
| |
| if (new_lp) |
| { |
| /* NEW_LP already exists. If there are still edges into OLD_LP, |
| there's nothing to do with the EH tree. If there are no more |
| edges into OLD_LP, then we want to remove OLD_LP as it is unused. |
| If CHANGE_REGION is true, then our caller is expecting to remove |
| the landing pad. */ |
| if (e == NULL && !change_region) |
| remove_eh_landing_pad (old_lp); |
| } |
| else |
| { |
| /* No correct landing pad exists. If there are no more edges |
| into OLD_LP, then we can simply re-use the existing landing pad. |
| Otherwise, we have to create a new landing pad. */ |
| if (e == NULL) |
| { |
| EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0; |
| new_lp = old_lp; |
| } |
| else |
| new_lp = gen_eh_landing_pad (old_lp->region); |
| new_lp->post_landing_pad = new_label; |
| EH_LANDING_PAD_NR (new_label) = new_lp->index; |
| } |
| |
| /* Maybe move the throwing statement to the new region. */ |
| if (old_lp != new_lp) |
| { |
| remove_stmt_from_eh_lp (throw_stmt); |
| add_stmt_to_eh_lp (throw_stmt, new_lp->index); |
| } |
| } |
| |
| /* Redirect EH edge E to NEW_BB. */ |
| |
| edge |
| redirect_eh_edge (edge edge_in, basic_block new_bb) |
| { |
| redirect_eh_edge_1 (edge_in, new_bb, false); |
| return ssa_redirect_edge (edge_in, new_bb); |
| } |
| |
| /* This is a subroutine of gimple_redirect_edge_and_branch. Update the |
| labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB. |
| The actual edge update will happen in the caller. */ |
| |
| void |
| redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb) |
| { |
| tree new_lab = gimple_block_label (new_bb); |
| bool any_changed = false; |
| basic_block old_bb; |
| eh_region r; |
| eh_catch c; |
| |
| r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); |
| switch (r->type) |
| { |
| case ERT_TRY: |
| for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
| { |
| old_bb = label_to_block (cfun, c->label); |
| if (old_bb == e->dest) |
| { |
| c->label = new_lab; |
| any_changed = true; |
| } |
| } |
| break; |
| |
| case ERT_ALLOWED_EXCEPTIONS: |
| old_bb = label_to_block (cfun, r->u.allowed.label); |
| gcc_assert (old_bb == e->dest); |
| r->u.allowed.label = new_lab; |
| any_changed = true; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| gcc_assert (any_changed); |
| } |
| |
| /* Helper function for operation_could_trap_p and stmt_could_throw_p. */ |
| |
| bool |
| operation_could_trap_helper_p (enum tree_code op, |
| bool fp_operation, |
| bool honor_trapv, |
| bool honor_nans, |
| bool honor_snans, |
| tree divisor, |
| bool *handled) |
| { |
| *handled = true; |
| switch (op) |
| { |
| case TRUNC_DIV_EXPR: |
| case CEIL_DIV_EXPR: |
| case FLOOR_DIV_EXPR: |
| case ROUND_DIV_EXPR: |
| case EXACT_DIV_EXPR: |
| case CEIL_MOD_EXPR: |
| case FLOOR_MOD_EXPR: |
| case ROUND_MOD_EXPR: |
| case TRUNC_MOD_EXPR: |
| if (!TREE_CONSTANT (divisor) || integer_zerop (divisor)) |
| return true; |
| if (TREE_CODE (divisor) == VECTOR_CST) |
| { |
| /* Inspired by initializer_each_zero_or_onep. */ |
| unsigned HOST_WIDE_INT nelts = vector_cst_encoded_nelts (divisor); |
| if (VECTOR_CST_STEPPED_P (divisor) |
| && !TYPE_VECTOR_SUBPARTS (TREE_TYPE (divisor)) |
| .is_constant (&nelts)) |
| return true; |
| for (unsigned int i = 0; i < nelts; ++i) |
| { |
| tree elt = vector_cst_elt (divisor, i); |
| if (integer_zerop (elt)) |
| return true; |
| } |
| } |
| return false; |
| |
| case RDIV_EXPR: |
| if (fp_operation) |
| { |
| if (honor_snans) |
| return true; |
| return flag_trapping_math; |
| } |
| /* Fixed point operations also use RDIV_EXPR. */ |
| if (!TREE_CONSTANT (divisor) || fixed_zerop (divisor)) |
| return true; |
| return false; |
| |
| case LT_EXPR: |
| case LE_EXPR: |
| case GT_EXPR: |
| case GE_EXPR: |
| case LTGT_EXPR: |
| /* MIN/MAX similar as LT/LE/GT/GE. */ |
| case MIN_EXPR: |
| case MAX_EXPR: |
| /* Some floating point comparisons may trap. */ |
| return honor_nans; |
| |
| case EQ_EXPR: |
| case NE_EXPR: |
| case UNORDERED_EXPR: |
| case ORDERED_EXPR: |
| case UNLT_EXPR: |
| case UNLE_EXPR: |
| case UNGT_EXPR: |
| case UNGE_EXPR: |
| case UNEQ_EXPR: |
| return honor_snans; |
| |
| case NEGATE_EXPR: |
| case ABS_EXPR: |
| case CONJ_EXPR: |
| /* These operations don't trap with floating point. */ |
| if (honor_trapv) |
| return true; |
| return false; |
| |
| case ABSU_EXPR: |
| /* ABSU_EXPR never traps. */ |
| return false; |
| |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| case MULT_EXPR: |
| /* Any floating arithmetic may trap. */ |
| if (fp_operation && flag_trapping_math) |
| return true; |
| if (honor_trapv) |
| return true; |
| return false; |
| |
| case COMPLEX_EXPR: |
| case CONSTRUCTOR: |
| /* Constructing an object cannot trap. */ |
| return false; |
| |
| case COND_EXPR: |
| case VEC_COND_EXPR: |
| /* Whether *COND_EXPR can trap depends on whether the |
| first argument can trap, so signal it as not handled. |
| Whether lhs is floating or not doesn't matter. */ |
| *handled = false; |
| return false; |
| |
| default: |
| /* Any floating arithmetic may trap. */ |
| if (fp_operation && flag_trapping_math) |
| return true; |
| |
| *handled = false; |
| return false; |
| } |
| } |
| |
| /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied |
| on floating-point values. HONOR_TRAPV is true if OP is applied on integer |
| type operands that may trap. If OP is a division operator, DIVISOR contains |
| the value of the divisor. */ |
| |
| bool |
| operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv, |
| tree divisor) |
| { |
| bool honor_nans = (fp_operation && flag_trapping_math |
| && !flag_finite_math_only); |
| bool honor_snans = fp_operation && flag_signaling_nans != 0; |
| bool handled; |
| |
| /* This function cannot tell whether or not COND_EXPR could trap, |
| because that depends on its condition op. */ |
| gcc_assert (op != COND_EXPR); |
| |
| if (TREE_CODE_CLASS (op) != tcc_comparison |
| && TREE_CODE_CLASS (op) != tcc_unary |
| && TREE_CODE_CLASS (op) != tcc_binary) |
| return false; |
| |
| return operation_could_trap_helper_p (op, fp_operation, honor_trapv, |
| honor_nans, honor_snans, divisor, |
| &handled); |
| } |
| |
| |
| /* Returns true if it is possible to prove that the index of |
| an array access REF (an ARRAY_REF expression) falls into the |
| array bounds. */ |
| |
| static bool |
| in_array_bounds_p (tree ref) |
| { |
| tree idx = TREE_OPERAND (ref, 1); |
| tree min, max; |
| |
| if (TREE_CODE (idx) != INTEGER_CST) |
| return false; |
| |
| min = array_ref_low_bound (ref); |
| max = array_ref_up_bound (ref); |
| if (!min |
| || !max |
| || TREE_CODE (min) != INTEGER_CST |
| || TREE_CODE (max) != INTEGER_CST) |
| return false; |
| |
| if (tree_int_cst_lt (idx, min) |
| || tree_int_cst_lt (max, idx)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Returns true if it is possible to prove that the range of |
| an array access REF (an ARRAY_RANGE_REF expression) falls |
| into the array bounds. */ |
| |
| static bool |
| range_in_array_bounds_p (tree ref) |
| { |
| tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref)); |
| tree range_min, range_max, min, max; |
| |
| range_min = TYPE_MIN_VALUE (domain_type); |
| range_max = TYPE_MAX_VALUE (domain_type); |
| if (!range_min |
| || !range_max |
| || TREE_CODE (range_min) != INTEGER_CST |
| || TREE_CODE (range_max) != INTEGER_CST) |
| return false; |
| |
| min = array_ref_low_bound (ref); |
| max = array_ref_up_bound (ref); |
| if (!min |
| || !max |
| || TREE_CODE (min) != INTEGER_CST |
| || TREE_CODE (max) != INTEGER_CST) |
| return false; |
| |
| if (tree_int_cst_lt (range_min, min) |
| || tree_int_cst_lt (max, range_max)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Return true if EXPR can trap, as in dereferencing an invalid pointer |
| location or floating point arithmetic. C.f. the rtl version, may_trap_p. |
| This routine expects only GIMPLE lhs or rhs input. */ |
| |
| bool |
| tree_could_trap_p (tree expr) |
| { |
| enum tree_code code; |
| bool fp_operation = false; |
| bool honor_trapv = false; |
| tree t, base, div = NULL_TREE; |
| |
| if (!expr) |
| return false; |
| |
| /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but |
| they won't appear as operands in GIMPLE form, so this is just for the |
| GENERIC uses where it needs to recurse on the operands and so |
| *COND_EXPR itself doesn't trap. */ |
| if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR) |
| return false; |
| |
| code = TREE_CODE (expr); |
| t = TREE_TYPE (expr); |
| |
| if (t) |
| { |
| if (COMPARISON_CLASS_P (expr)) |
| fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))); |
| else |
| fp_operation = FLOAT_TYPE_P (t); |
| honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t); |
| } |
| |
| if (TREE_CODE_CLASS (code) == tcc_binary) |
| div = TREE_OPERAND (expr, 1); |
| if (operation_could_trap_p (code, fp_operation, honor_trapv, div)) |
| return true; |
| |
| restart: |
| switch (code) |
| { |
| case COMPONENT_REF: |
| case REALPART_EXPR: |
| case IMAGPART_EXPR: |
| case BIT_FIELD_REF: |
| case VIEW_CONVERT_EXPR: |
| case WITH_SIZE_EXPR: |
| expr = TREE_OPERAND (expr, 0); |
| code = TREE_CODE (expr); |
| goto restart; |
| |
| case ARRAY_RANGE_REF: |
| base = TREE_OPERAND (expr, 0); |
| if (tree_could_trap_p (base)) |
| return true; |
| if (TREE_THIS_NOTRAP (expr)) |
| return false; |
| return !range_in_array_bounds_p (expr); |
| |
| case ARRAY_REF: |
| base = TREE_OPERAND (expr, 0); |
| if (tree_could_trap_p (base)) |
| return true; |
| if (TREE_THIS_NOTRAP (expr)) |
| return false; |
| return !in_array_bounds_p (expr); |
| |
| case TARGET_MEM_REF: |
| case MEM_REF: |
| if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR |
| && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0))) |
| return true; |
| if (TREE_THIS_NOTRAP (expr)) |
| return false; |
| /* We cannot prove that the access is in-bounds when we have |
| variable-index TARGET_MEM_REFs. */ |
| if (code == TARGET_MEM_REF |
| && (TMR_INDEX (expr) || TMR_INDEX2 (expr))) |
| return true; |
| if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR) |
| { |
| tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0); |
| poly_offset_int off = mem_ref_offset (expr); |
| if (maybe_lt (off, 0)) |
| return true; |
| if (TREE_CODE (base) == STRING_CST) |
| return maybe_le (TREE_STRING_LENGTH (base), off); |
| tree size = DECL_SIZE_UNIT (base); |
| if (size == NULL_TREE |
| || !poly_int_tree_p (size) |
| || maybe_le (wi::to_poly_offset (size), off)) |
| return true; |
| /* Now we are sure the first byte of the access is inside |
| the object. */ |
| return false; |
| } |
| return true; |
| |
| case INDIRECT_REF: |
| return !TREE_THIS_NOTRAP (expr); |
| |
| case ASM_EXPR: |
| return TREE_THIS_VOLATILE (expr); |
| |
| case CALL_EXPR: |
| /* Internal function calls do not trap. */ |
| if (CALL_EXPR_FN (expr) == NULL_TREE) |
| return false; |
| t = get_callee_fndecl (expr); |
| /* Assume that indirect and calls to weak functions may trap. */ |
| if (!t || !DECL_P (t)) |
| return true; |
| if (DECL_WEAK (t)) |
| return tree_could_trap_p (t); |
| return false; |
| |
| case FUNCTION_DECL: |
| /* Assume that accesses to weak functions may trap, unless we know |
| they are certainly defined in current TU or in some other |
| LTO partition. */ |
| if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr)) |
| { |
| cgraph_node *node = cgraph_node::get (expr); |
| if (node) |
| node = node->function_symbol (); |
| return !(node && node->in_other_partition); |
| } |
| return false; |
| |
| case VAR_DECL: |
| /* Assume that accesses to weak vars may trap, unless we know |
| they are certainly defined in current TU or in some other |
| LTO partition. */ |
| if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr)) |
| { |
| varpool_node *node = varpool_node::get (expr); |
| if (node) |
| node = node->ultimate_alias_target (); |
| return !(node && node->in_other_partition); |
| } |
| return false; |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Return non-NULL if there is an integer operation with trapping overflow |
| we can rewrite into non-trapping. Called via walk_tree from |
| rewrite_to_non_trapping_overflow. */ |
| |
| static tree |
| find_trapping_overflow (tree *tp, int *walk_subtrees, void *data) |
| { |
| if (EXPR_P (*tp) |
| && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp)) |
| && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp))) |
| return *tp; |
| if (IS_TYPE_OR_DECL_P (*tp) |
| || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL)) |
| *walk_subtrees = 0; |
| return NULL_TREE; |
| } |
| |
| /* Rewrite selected operations into unsigned arithmetics, so that they |
| don't trap on overflow. */ |
| |
| static tree |
| replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data) |
| { |
| if (find_trapping_overflow (tp, walk_subtrees, data)) |
| { |
| tree type = TREE_TYPE (*tp); |
| tree utype = unsigned_type_for (type); |
| *walk_subtrees = 0; |
| int len = TREE_OPERAND_LENGTH (*tp); |
| for (int i = 0; i < len; ++i) |
| walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow, |
| data, (hash_set<tree> *) data); |
| |
| if (TREE_CODE (*tp) == ABS_EXPR) |
| { |
| TREE_SET_CODE (*tp, ABSU_EXPR); |
| TREE_TYPE (*tp) = utype; |
| *tp = fold_convert (type, *tp); |
| } |
| else |
| { |
| TREE_TYPE (*tp) = utype; |
| len = TREE_OPERAND_LENGTH (*tp); |
| for (int i = 0; i < len; ++i) |
| TREE_OPERAND (*tp, i) |
| = fold_convert (utype, TREE_OPERAND (*tp, i)); |
| *tp = fold_convert (type, *tp); |
| } |
| } |
| return NULL_TREE; |
| } |
| |
| /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it |
| using unsigned arithmetics to avoid traps in it. */ |
| |
| tree |
| rewrite_to_non_trapping_overflow (tree expr) |
| { |
| if (!flag_trapv) |
| return expr; |
| hash_set<tree> pset; |
| if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset)) |
| return expr; |
| expr = unshare_expr (expr); |
| pset.empty (); |
| walk_tree (&expr, replace_trapping_overflow, &pset, &pset); |
| return expr; |
| } |
| |
| /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a |
| an assignment or a conditional) may throw. */ |
| |
| static bool |
| stmt_could_throw_1_p (gassign *stmt) |
| { |
| enum tree_code code = gimple_assign_rhs_code (stmt); |
| bool honor_nans = false; |
| bool honor_snans = false; |
| bool fp_operation = false; |
| bool honor_trapv = false; |
| tree t; |
| size_t i; |
| bool handled, ret; |
| |
| if (TREE_CODE_CLASS (code) == tcc_comparison |
| || TREE_CODE_CLASS (code) == tcc_unary |
| || TREE_CODE_CLASS (code) == tcc_binary) |
| { |
| if (TREE_CODE_CLASS (code) == tcc_comparison) |
| t = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| else |
| t = TREE_TYPE (gimple_assign_lhs (stmt)); |
| fp_operation = FLOAT_TYPE_P (t); |
| if (fp_operation) |
| { |
| honor_nans = flag_trapping_math && !flag_finite_math_only; |
| honor_snans = flag_signaling_nans != 0; |
| } |
| else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t)) |
| honor_trapv = true; |
| } |
| |
| /* First check the LHS. */ |
| if (tree_could_trap_p (gimple_assign_lhs (stmt))) |
| return true; |
| |
| /* Check if the main expression may trap. */ |
| ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv, |
| honor_nans, honor_snans, |
| gimple_assign_rhs2 (stmt), |
| &handled); |
| if (handled) |
| return ret; |
| |
| /* If the expression does not trap, see if any of the individual operands may |
| trap. */ |
| for (i = 1; i < gimple_num_ops (stmt); i++) |
| if (tree_could_trap_p (gimple_op (stmt, i))) |
| return true; |
| |
| return false; |
| } |
| |
| |
| /* Return true if statement STMT within FUN could throw an exception. */ |
| |
| bool |
| stmt_could_throw_p (function *fun, gimple *stmt) |
| { |
| if (!flag_exceptions) |
| return false; |
| |
| /* The only statements that can throw an exception are assignments, |
| conditionals, calls, resx, and asms. */ |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_RESX: |
| return true; |
| |
| case GIMPLE_CALL: |
| return !gimple_call_nothrow_p (as_a <gcall *> (stmt)); |
| |
| case GIMPLE_COND: |
| { |
| if (fun && !fun->can_throw_non_call_exceptions) |
| return false; |
| gcond *cond = as_a <gcond *> (stmt); |
| tree lhs = gimple_cond_lhs (cond); |
| return operation_could_trap_p (gimple_cond_code (cond), |
| FLOAT_TYPE_P (TREE_TYPE (lhs)), |
| false, NULL_TREE); |
| } |
| |
| case GIMPLE_ASSIGN: |
| if ((fun && !fun->can_throw_non_call_exceptions) |
| || gimple_clobber_p (stmt)) |
| return false; |
| return stmt_could_throw_1_p (as_a <gassign *> (stmt)); |
| |
| case GIMPLE_ASM: |
| if (fun && !fun->can_throw_non_call_exceptions) |
| return false; |
| return gimple_asm_volatile_p (as_a <gasm *> (stmt)); |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Return true if STMT in function FUN must be assumed necessary because of |
| non-call exceptions. */ |
| |
| bool |
| stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt) |
| { |
| return (fun->can_throw_non_call_exceptions |
| && !fun->can_delete_dead_exceptions |
| && stmt_could_throw_p (fun, stmt)); |
| } |
| |
| /* Return true if expression T could throw an exception. */ |
| |
| bool |
| tree_could_throw_p (tree t) |
| { |
| if (!flag_exceptions) |
| return false; |
| if (TREE_CODE (t) == MODIFY_EXPR) |
| { |
| if (cfun->can_throw_non_call_exceptions |
| && tree_could_trap_p (TREE_OPERAND (t, 0))) |
| return true; |
| t = TREE_OPERAND (t, 1); |
| } |
| |
| if (TREE_CODE (t) == WITH_SIZE_EXPR) |
| t = TREE_OPERAND (t, 0); |
| if (TREE_CODE (t) == CALL_EXPR) |
| return (call_expr_flags (t) & ECF_NOTHROW) == 0; |
| if (cfun->can_throw_non_call_exceptions) |
| return tree_could_trap_p (t); |
| return false; |
| } |
| |
| /* Return true if STMT can throw an exception that is not caught within its |
| function FUN. FUN can be NULL but the function is extra conservative |
| then. */ |
| |
| bool |
| stmt_can_throw_external (function *fun, gimple *stmt) |
| { |
| int lp_nr; |
| |
| if (!stmt_could_throw_p (fun, stmt)) |
| return false; |
| if (!fun) |
| return true; |
| |
| lp_nr = lookup_stmt_eh_lp_fn (fun, stmt); |
| return lp_nr == 0; |
| } |
| |
| /* Return true if STMT can throw an exception that is caught within its |
| function FUN. */ |
| |
| bool |
| stmt_can_throw_internal (function *fun, gimple *stmt) |
| { |
| int lp_nr; |
| |
| gcc_checking_assert (fun); |
| if (!stmt_could_throw_p (fun, stmt)) |
| return false; |
| |
| lp_nr = lookup_stmt_eh_lp_fn (fun, stmt); |
| return lp_nr > 0; |
| } |
| |
| /* Given a statement STMT in IFUN, if STMT can no longer throw, then |
| remove any entry it might have from the EH table. Return true if |
| any change was made. */ |
| |
| bool |
| maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt) |
| { |
| if (stmt_could_throw_p (ifun, stmt)) |
| return false; |
| return remove_stmt_from_eh_lp_fn (ifun, stmt); |
| } |
| |
| /* Likewise, but always use the current function. */ |
| |
| bool |
| maybe_clean_eh_stmt (gimple *stmt) |
| { |
| return maybe_clean_eh_stmt_fn (cfun, stmt); |
| } |
| |
| /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced |
| OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT |
| in the table if it should be in there. Return TRUE if a replacement was |
| done that my require an EH edge purge. */ |
| |
| bool |
| maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt) |
| { |
| int lp_nr = lookup_stmt_eh_lp (old_stmt); |
| |
| if (lp_nr != 0) |
| { |
| bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt); |
| |
| if (new_stmt == old_stmt && new_stmt_could_throw) |
| return false; |
| |
| remove_stmt_from_eh_lp (old_stmt); |
| if (new_stmt_could_throw) |
| { |
| add_stmt_to_eh_lp (new_stmt, lp_nr); |
| return false; |
| } |
| else |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT |
| in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP |
| operand is the return value of duplicate_eh_regions. */ |
| |
| bool |
| maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt, |
| struct function *old_fun, gimple *old_stmt, |
| hash_map<void *, void *> *map, |
| int default_lp_nr) |
| { |
| int old_lp_nr, new_lp_nr; |
| |
| if (!stmt_could_throw_p (new_fun, new_stmt)) |
| return false; |
| |
| old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt); |
| if (old_lp_nr == 0) |
| { |
| if (default_lp_nr == 0) |
| return false; |
| new_lp_nr = default_lp_nr; |
| } |
| else if (old_lp_nr > 0) |
| { |
| eh_landing_pad old_lp, new_lp; |
| |
| old_lp = (*old_fun->eh->lp_array)[old_lp_nr]; |
| new_lp = static_cast<eh_landing_pad> (*map->get (old_lp)); |
| new_lp_nr = new_lp->index; |
| } |
| else |
| { |
| eh_region old_r, new_r; |
| |
| old_r = (*old_fun->eh->region_array)[-old_lp_nr]; |
| new_r = static_cast<eh_region> (*map->get (old_r)); |
| new_lp_nr = -new_r->index; |
| } |
| |
| add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr); |
| return true; |
| } |
| |
| /* Similar, but both OLD_STMT and NEW_STMT are within the current function, |
| and thus no remapping is required. */ |
| |
| bool |
| maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt) |
| { |
| int lp_nr; |
| |
| if (!stmt_could_throw_p (cfun, new_stmt)) |
| return false; |
| |
| lp_nr = lookup_stmt_eh_lp (old_stmt); |
| if (lp_nr == 0) |
| return false; |
| |
| add_stmt_to_eh_lp (new_stmt, lp_nr); |
| return true; |
| } |
| |
| /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of |
| GIMPLE_TRY) that are similar enough to be considered the same. Currently |
| this only handles handlers consisting of a single call, as that's the |
| important case for C++: a destructor call for a particular object showing |
| up in multiple handlers. */ |
| |
| static bool |
| same_handler_p (gimple_seq oneh, gimple_seq twoh) |
| { |
| gimple_stmt_iterator gsi; |
| gimple *ones, *twos; |
| unsigned int ai; |
| |
| gsi = gsi_start (oneh); |
| if (!gsi_one_before_end_p (gsi)) |
| return false; |
| ones = gsi_stmt (gsi); |
| |
| gsi = gsi_start (twoh); |
| if (!gsi_one_before_end_p (gsi)) |
| return false; |
| twos = gsi_stmt (gsi); |
| |
| if (!is_gimple_call (ones) |
| || !is_gimple_call (twos) |
| || gimple_call_lhs (ones) |
| || gimple_call_lhs (twos) |
| || gimple_call_chain (ones) |
| || gimple_call_chain (twos) |
| || !gimple_call_same_target_p (ones, twos) |
| || gimple_call_num_args (ones) != gimple_call_num_args (twos)) |
| return false; |
| |
| for (ai = 0; ai < gimple_call_num_args (ones); ++ai) |
| if (!operand_equal_p (gimple_call_arg (ones, ai), |
| gimple_call_arg (twos, ai), 0)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Optimize |
| try { A() } finally { try { ~B() } catch { ~A() } } |
| try { ... } finally { ~A() } |
| into |
| try { A() } catch { ~B() } |
| try { ~B() ... } finally { ~A() } |
| |
| This occurs frequently in C++, where A is a local variable and B is a |
| temporary used in the initializer for A. */ |
| |
| static void |
| optimize_double_finally (gtry *one, gtry *two) |
| { |
| gimple *oneh; |
| gimple_stmt_iterator gsi; |
| gimple_seq cleanup; |
| |
| cleanup = gimple_try_cleanup (one); |
| gsi = gsi_start (cleanup); |
| if (!gsi_one_before_end_p (gsi)) |
| return; |
| |
| oneh = gsi_stmt (gsi); |
| if (gimple_code (oneh) != GIMPLE_TRY |
| || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH) |
| return; |
| |
| if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two))) |
| { |
| gimple_seq seq = gimple_try_eval (oneh); |
| |
| gimple_try_set_cleanup (one, seq); |
| gimple_try_set_kind (one, GIMPLE_TRY_CATCH); |
| seq = copy_gimple_seq_and_replace_locals (seq); |
| gimple_seq_add_seq (&seq, gimple_try_eval (two)); |
| gimple_try_set_eval (two, seq); |
| } |
| } |
| |
| /* Perform EH refactoring optimizations that are simpler to do when code |
| flow has been lowered but EH structures haven't. */ |
| |
| static void |
| refactor_eh_r (gimple_seq seq) |
| { |
| gimple_stmt_iterator gsi; |
| gimple *one, *two; |
| |
| one = NULL; |
| two = NULL; |
| gsi = gsi_start (seq); |
| while (1) |
| { |
| one = two; |
| if (gsi_end_p (gsi)) |
| two = NULL; |
| else |
| two = gsi_stmt (gsi); |
| if (one && two) |
| if (gtry *try_one = dyn_cast <gtry *> (one)) |
| if (gtry *try_two = dyn_cast <gtry *> (two)) |
| if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY |
| && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY) |
| optimize_double_finally (try_one, try_two); |
| if (one) |
| switch (gimple_code (one)) |
| { |
| case GIMPLE_TRY: |
| refactor_eh_r (gimple_try_eval (one)); |
| refactor_eh_r (gimple_try_cleanup (one)); |
| break; |
| case GIMPLE_CATCH: |
| refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one))); |
| break; |
| case GIMPLE_EH_FILTER: |
| refactor_eh_r (gimple_eh_filter_failure (one)); |
| break; |
| case GIMPLE_EH_ELSE: |
| { |
| geh_else *eh_else_stmt = as_a <geh_else *> (one); |
| refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt)); |
| refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt)); |
| } |
| break; |
| default: |
| break; |
| } |
| if (two) |
| gsi_next (&gsi); |
| else |
| break; |
| } |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_refactor_eh = |
| { |
| GIMPLE_PASS, /* type */ |
| "ehopt", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_EH, /* tv_id */ |
| PROP_gimple_lcf, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_refactor_eh : public gimple_opt_pass |
| { |
| public: |
| pass_refactor_eh (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_refactor_eh, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| bool gate (function *) final override { return flag_exceptions != 0; } |
| unsigned int execute (function *) final override |
| { |
| refactor_eh_r (gimple_body (current_function_decl)); |
| return 0; |
| } |
| |
| }; // class pass_refactor_eh |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_refactor_eh (gcc::context *ctxt) |
| { |
| return new pass_refactor_eh (ctxt); |
| } |
| |
| /* At the end of gimple optimization, we can lower RESX. */ |
| |
| static bool |
| lower_resx (basic_block bb, gresx *stmt, |
| hash_map<eh_region, tree> *mnt_map) |
| { |
| int lp_nr; |
| eh_region src_r, dst_r; |
| gimple_stmt_iterator gsi; |
| gcall *x; |
| tree fn, src_nr; |
| bool ret = false; |
| |
| lp_nr = lookup_stmt_eh_lp (stmt); |
| if (lp_nr != 0) |
| dst_r = get_eh_region_from_lp_number (lp_nr); |
| else |
| dst_r = NULL; |
| |
| src_r = get_eh_region_from_number (gimple_resx_region (stmt)); |
| gsi = gsi_last_bb (bb); |
| |
| if (src_r == NULL) |
| { |
| /* We can wind up with no source region when pass_cleanup_eh shows |
| that there are no entries into an eh region and deletes it, but |
| then the block that contains the resx isn't removed. This can |
| happen without optimization when the switch statement created by |
| lower_try_finally_switch isn't simplified to remove the eh case. |
| |
| Resolve this by expanding the resx node to an abort. */ |
| |
| fn = builtin_decl_implicit (BUILT_IN_TRAP); |
| x = gimple_build_call (fn, 0); |
| gimple_call_set_ctrl_altering (x, true); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| while (EDGE_COUNT (bb->succs) > 0) |
| remove_edge (EDGE_SUCC (bb, 0)); |
| } |
| else if (dst_r) |
| { |
| /* When we have a destination region, we resolve this by copying |
| the excptr and filter values into place, and changing the edge |
| to immediately after the landing pad. */ |
| edge e; |
| |
| if (lp_nr < 0) |
| { |
| basic_block new_bb; |
| tree lab; |
| |
| /* We are resuming into a MUST_NOT_CALL region. Expand a call to |
| the failure decl into a new block, if needed. */ |
| gcc_assert (dst_r->type == ERT_MUST_NOT_THROW); |
| |
| tree *slot = mnt_map->get (dst_r); |
| if (slot == NULL) |
| { |
| gimple_stmt_iterator gsi2; |
| |
| new_bb = create_empty_bb (bb); |
| new_bb->count = bb->count; |
| add_bb_to_loop (new_bb, bb->loop_father); |
| lab = gimple_block_label (new_bb); |
| gsi2 = gsi_start_bb (new_bb); |
| |
| /* Handle failure fns that expect either no arguments or the |
| exception pointer. */ |
| fn = dst_r->u.must_not_throw.failure_decl; |
| if (TYPE_ARG_TYPES (TREE_TYPE (fn)) != void_list_node) |
| { |
| tree epfn = builtin_decl_implicit (BUILT_IN_EH_POINTER); |
| src_nr = build_int_cst (integer_type_node, src_r->index); |
| x = gimple_build_call (epfn, 1, src_nr); |
| tree var = create_tmp_var (ptr_type_node); |
| var = make_ssa_name (var, x); |
| gimple_call_set_lhs (x, var); |
| gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); |
| x = gimple_build_call (fn, 1, var); |
| } |
| else |
| x = gimple_build_call (fn, 0); |
| gimple_set_location (x, dst_r->u.must_not_throw.failure_loc); |
| gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); |
| |
| mnt_map->put (dst_r, lab); |
| } |
| else |
| { |
| lab = *slot; |
| new_bb = label_to_block (cfun, lab); |
| } |
| |
| gcc_assert (EDGE_COUNT (bb->succs) == 0); |
| e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU); |
| } |
| else |
| { |
| edge_iterator ei; |
| tree dst_nr = build_int_cst (integer_type_node, dst_r->index); |
| |
| fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES); |
| src_nr = build_int_cst (integer_type_node, src_r->index); |
| x = gimple_build_call (fn, 2, dst_nr, src_nr); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| /* Update the flags for the outgoing edge. */ |
| e = single_succ_edge (bb); |
| gcc_assert (e->flags & EDGE_EH); |
| e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU; |
| e->probability = profile_probability::always (); |
| |
| /* If there are no more EH users of the landing pad, delete it. */ |
| FOR_EACH_EDGE (e, ei, e->dest->preds) |
| if (e->flags & EDGE_EH) |
| break; |
| if (e == NULL) |
| { |
| eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr); |
| remove_eh_landing_pad (lp); |
| } |
| } |
| |
| ret = true; |
| } |
| else |
| { |
| tree var; |
| |
| /* When we don't have a destination region, this exception escapes |
| up the call chain. We resolve this by generating a call to the |
| _Unwind_Resume library function. */ |
| |
| /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup |
| with no arguments for C++. Check for that. */ |
| if (src_r->use_cxa_end_cleanup) |
| { |
| fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP); |
| x = gimple_build_call (fn, 0); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| } |
| else |
| { |
| fn = builtin_decl_implicit (BUILT_IN_EH_POINTER); |
| src_nr = build_int_cst (integer_type_node, src_r->index); |
| x = gimple_build_call (fn, 1, src_nr); |
| var = create_tmp_var (ptr_type_node); |
| var = make_ssa_name (var, x); |
| gimple_call_set_lhs (x, var); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| /* When exception handling is delegated to a caller function, we |
| have to guarantee that shadow memory variables living on stack |
| will be cleaner before control is given to a parent function. */ |
| if (sanitize_flags_p (SANITIZE_ADDRESS)) |
| { |
| tree decl |
| = builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN); |
| gimple *g = gimple_build_call (decl, 0); |
| gimple_set_location (g, gimple_location (stmt)); |
| gsi_insert_before (&gsi, g, GSI_SAME_STMT); |
| } |
| |
| fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME); |
| x = gimple_build_call (fn, 1, var); |
| gimple_call_set_ctrl_altering (x, true); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| } |
| |
| gcc_assert (EDGE_COUNT (bb->succs) == 0); |
| } |
| |
| gsi_remove (&gsi, true); |
| |
| return ret; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_lower_resx = |
| { |
| GIMPLE_PASS, /* type */ |
| "resx", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_EH, /* tv_id */ |
| PROP_gimple_lcf, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_lower_resx : public gimple_opt_pass |
| { |
| public: |
| pass_lower_resx (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_lower_resx, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| bool gate (function *) final override { return flag_exceptions != 0; } |
| unsigned int execute (function *) final override; |
| |
| }; // class pass_lower_resx |
| |
| unsigned |
| pass_lower_resx::execute (function *fun) |
| { |
| basic_block bb; |
| bool dominance_invalidated = false; |
| bool any_rewritten = false; |
| |
| hash_map<eh_region, tree> mnt_map; |
| |
| FOR_EACH_BB_FN (bb, fun) |
| { |
| if (gresx *last = safe_dyn_cast <gresx *> (*gsi_last_bb (bb))) |
| { |
| dominance_invalidated |= lower_resx (bb, last, &mnt_map); |
| any_rewritten = true; |
| } |
| } |
| |
| if (dominance_invalidated) |
| { |
| free_dominance_info (CDI_DOMINATORS); |
| free_dominance_info (CDI_POST_DOMINATORS); |
| } |
| |
| return any_rewritten ? TODO_update_ssa_only_virtuals : 0; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_lower_resx (gcc::context *ctxt) |
| { |
| return new pass_lower_resx (ctxt); |
| } |
| |
| /* Try to optimize var = {v} {CLOBBER} stmts followed just by |
| external throw. */ |
| |
| static void |
| optimize_clobbers (basic_block bb) |
| { |
| gimple_stmt_iterator gsi = gsi_last_bb (bb); |
| bool any_clobbers = false; |
| bool seen_stack_restore = false; |
| edge_iterator ei; |
| edge e; |
| |
| /* Only optimize anything if the bb contains at least one clobber, |
| ends with resx (checked by caller), optionally contains some |
| debug stmts or labels, or at most one __builtin_stack_restore |
| call, and has an incoming EH edge. */ |
| for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (is_gimple_debug (stmt)) |
| continue; |
| if (gimple_clobber_p (stmt)) |
| { |
| any_clobbers = true; |
| continue; |
| } |
| if (!seen_stack_restore |
| && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) |
| { |
| seen_stack_restore = true; |
| continue; |
| } |
| if (gimple_code (stmt) == GIMPLE_LABEL) |
| break; |
| return; |
| } |
| if (!any_clobbers) |
| return; |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| if (e->flags & EDGE_EH) |
| break; |
| if (e == NULL) |
| return; |
| gsi = gsi_last_bb (bb); |
| for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (!gimple_clobber_p (stmt)) |
| continue; |
| unlink_stmt_vdef (stmt); |
| gsi_remove (&gsi, true); |
| release_defs (stmt); |
| } |
| } |
| |
| /* Try to sink var = {v} {CLOBBER} stmts followed just by |
| internal throw to successor BB. |
| SUNK, if not NULL, is an array of sequences indexed by basic-block |
| index to sink to and to pick up sinking opportunities from. |
| If FOUND_OPPORTUNITY is not NULL then do not perform the optimization |
| but set *FOUND_OPPORTUNITY to true. */ |
| |
| static int |
| sink_clobbers (basic_block bb, |
| gimple_seq *sunk = NULL, bool *found_opportunity = NULL) |
| { |
| edge e; |
| edge_iterator ei; |
| gimple_stmt_iterator gsi, dgsi; |
| basic_block succbb; |
| bool any_clobbers = false; |
| unsigned todo = 0; |
| |
| /* Only optimize if BB has a single EH successor and |
| all predecessor edges are EH too. */ |
| if (!single_succ_p (bb) |
| || (single_succ_edge (bb)->flags & EDGE_EH) == 0) |
| return 0; |
| |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| { |
| if ((e->flags & EDGE_EH) == 0) |
| return 0; |
| } |
| |
| /* And BB contains only CLOBBER stmts before the final |
| RESX. */ |
| gsi = gsi_last_bb (bb); |
| for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (is_gimple_debug (stmt)) |
| continue; |
| if (gimple_code (stmt) == GIMPLE_LABEL) |
| break; |
| if (!gimple_clobber_p (stmt)) |
| return 0; |
| any_clobbers = true; |
| } |
| if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index]))) |
| return 0; |
| |
| /* If this was a dry run, tell it we found clobbers to sink. */ |
| if (found_opportunity) |
| { |
| *found_opportunity = true; |
| return 0; |
| } |
| |
| edge succe = single_succ_edge (bb); |
| succbb = succe->dest; |
| |
| /* See if there is a virtual PHI node to take an updated virtual |
| operand from. */ |
| gphi *vphi = NULL; |
| for (gphi_iterator gpi = gsi_start_phis (succbb); |
| !gsi_end_p (gpi); gsi_next (&gpi)) |
| { |
| tree res = gimple_phi_result (gpi.phi ()); |
| if (virtual_operand_p (res)) |
| { |
| vphi = gpi.phi (); |
| break; |
| } |
| } |
| |
| gimple *first_sunk = NULL; |
| gimple *last_sunk = NULL; |
| if (sunk && !(succbb->flags & BB_VISITED)) |
| dgsi = gsi_start (sunk[succbb->index]); |
| else |
| dgsi = gsi_after_labels (succbb); |
| gsi = gsi_last_bb (bb); |
| for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| tree lhs; |
| if (is_gimple_debug (stmt)) |
| continue; |
| if (gimple_code (stmt) == GIMPLE_LABEL) |
| break; |
| lhs = gimple_assign_lhs (stmt); |
| /* Unfortunately we don't have dominance info updated at this |
| point, so checking if |
| dominated_by_p (CDI_DOMINATORS, succbb, |
| gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0))) |
| would be too costly. Thus, avoid sinking any clobbers that |
| refer to non-(D) SSA_NAMEs. */ |
| if (TREE_CODE (lhs) == MEM_REF |
| && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME |
| && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0))) |
| { |
| unlink_stmt_vdef (stmt); |
| gsi_remove (&gsi, true); |
| release_defs (stmt); |
| continue; |
| } |
| |
| /* As we do not change stmt order when sinking across a |
| forwarder edge we can keep virtual operands in place. */ |
| gsi_remove (&gsi, false); |
| gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT); |
| if (!first_sunk) |
| first_sunk = stmt; |
| last_sunk = stmt; |
| } |
| if (sunk && !gimple_seq_empty_p (sunk[bb->index])) |
| { |
| if (!first_sunk) |
| first_sunk = gsi_stmt (gsi_last (sunk[bb->index])); |
| last_sunk = gsi_stmt (gsi_start (sunk[bb->index])); |
| gsi_insert_seq_before_without_update (&dgsi, |
| sunk[bb->index], GSI_NEW_STMT); |
| sunk[bb->index] = NULL; |
| } |
| if (first_sunk) |
| { |
| /* Adjust virtual operands if we sunk across a virtual PHI. */ |
| if (vphi) |
| { |
| imm_use_iterator iter; |
| use_operand_p use_p; |
| gimple *use_stmt; |
| tree phi_def = gimple_phi_result (vphi); |
| FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def) |
| FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
| SET_USE (use_p, gimple_vdef (first_sunk)); |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def)) |
| { |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1; |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0; |
| } |
| SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), |
| gimple_vuse (last_sunk)); |
| SET_USE (gimple_vuse_op (last_sunk), phi_def); |
| } |
| /* If there isn't a single predecessor but no virtual PHI node |
| arrange for virtual operands to be renamed. */ |
| else if (!single_pred_p (succbb) |
| && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME) |
| { |
| mark_virtual_operand_for_renaming (gimple_vuse (last_sunk)); |
| todo |= TODO_update_ssa_only_virtuals; |
| } |
| } |
| |
| return todo; |
| } |
| |
| /* At the end of inlining, we can lower EH_DISPATCH. Return true when |
| we have found some duplicate labels and removed some edges. */ |
| |
| static bool |
| lower_eh_dispatch (basic_block src, geh_dispatch *stmt) |
| { |
| gimple_stmt_iterator gsi; |
| int region_nr; |
| eh_region r; |
| tree filter, fn; |
| gimple *x; |
| bool redirected = false; |
| |
| region_nr = gimple_eh_dispatch_region (stmt); |
| r = get_eh_region_from_number (region_nr); |
| |
| gsi = gsi_last_bb (src); |
| |
| switch (r->type) |
| { |
| case ERT_TRY: |
| { |
| auto_vec<tree> labels; |
| tree default_label = NULL; |
| eh_catch c; |
| edge_iterator ei; |
| edge e; |
| hash_set<tree> seen_values; |
| |
| /* Collect the labels for a switch. Zero the post_landing_pad |
| field becase we'll no longer have anything keeping these labels |
| in existence and the optimizer will be free to merge these |
| blocks at will. */ |
| for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
| { |
| tree tp_node, flt_node, lab = c->label; |
| bool have_label = false; |
| |
| c->label = NULL; |
| tp_node = c->type_list; |
| flt_node = c->filter_list; |
| |
| if (tp_node == NULL) |
| { |
| default_label = lab; |
| break; |
| } |
| do |
| { |
| /* Filter out duplicate labels that arise when this handler |
| is shadowed by an earlier one. When no labels are |
| attached to the handler anymore, we remove |
| the corresponding edge and then we delete unreachable |
| blocks at the end of this pass. */ |
| if (! seen_values.contains (TREE_VALUE (flt_node))) |
| { |
| tree t = build_case_label (TREE_VALUE (flt_node), |
| NULL, lab); |
| labels.safe_push (t); |
| seen_values.add (TREE_VALUE (flt_node)); |
| have_label = true; |
| } |
| |
| tp_node = TREE_CHAIN (tp_node); |
| flt_node = TREE_CHAIN (flt_node); |
| } |
| while (tp_node); |
| if (! have_label) |
| { |
| remove_edge (find_edge (src, label_to_block (cfun, lab))); |
| redirected = true; |
| } |
| } |
| |
| /* Clean up the edge flags. */ |
| FOR_EACH_EDGE (e, ei, src->succs) |
| { |
| if (e->flags & EDGE_FALLTHRU) |
| { |
| /* If there was no catch-all, use the fallthru edge. */ |
| if (default_label == NULL) |
| default_label = gimple_block_label (e->dest); |
| e->flags &= ~EDGE_FALLTHRU; |
| } |
| } |
| gcc_assert (default_label != NULL); |
| |
| /* Don't generate a switch if there's only a default case. |
| This is common in the form of try { A; } catch (...) { B; }. */ |
| if (!labels.exists ()) |
| { |
| e = single_succ_edge (src); |
| e->flags |= EDGE_FALLTHRU; |
| } |
| else |
| { |
| fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); |
| x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, |
| region_nr)); |
| filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn))); |
| filter = make_ssa_name (filter, x); |
| gimple_call_set_lhs (x, filter); |
| gimple_set_location (x, gimple_location (stmt)); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| /* Turn the default label into a default case. */ |
| default_label = build_case_label (NULL, NULL, default_label); |
| sort_case_labels (labels); |
| |
| x = gimple_build_switch (filter, default_label, labels); |
| gimple_set_location (x, gimple_location (stmt)); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| } |
| } |
| break; |
| |
| case ERT_ALLOWED_EXCEPTIONS: |
| { |
| edge b_e = BRANCH_EDGE (src); |
| edge f_e = FALLTHRU_EDGE (src); |
| |
| fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); |
| x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, |
| region_nr)); |
| filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn))); |
| filter = make_ssa_name (filter, x); |
| gimple_call_set_lhs (x, filter); |
| gimple_set_location (x, gimple_location (stmt)); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| r->u.allowed.label = NULL; |
| x = gimple_build_cond (EQ_EXPR, filter, |
| build_int_cst (TREE_TYPE (filter), |
| r->u.allowed.filter), |
| NULL_TREE, NULL_TREE); |
| gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
| |
| b_e->flags = b_e->flags | EDGE_TRUE_VALUE; |
| f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE; |
| } |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */ |
| gsi_remove (&gsi, true); |
| return redirected; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_lower_eh_dispatch = |
| { |
| GIMPLE_PASS, /* type */ |
| "ehdisp", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_EH, /* tv_id */ |
| PROP_gimple_lcf, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_lower_eh_dispatch : public gimple_opt_pass |
| { |
| public: |
| pass_lower_eh_dispatch (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| bool gate (function *fun) final override |
| { |
| return fun->eh->region_tree != NULL; |
| } |
| unsigned int execute (function *) final override; |
| |
| }; // class pass_lower_eh_dispatch |
| |
| unsigned |
| pass_lower_eh_dispatch::execute (function *fun) |
| { |
| basic_block bb; |
| int flags = 0; |
| bool redirected = false; |
| bool any_resx_to_process = false; |
| |
| assign_filter_values (); |
| |
| FOR_EACH_BB_FN (bb, fun) |
| { |
| gimple *last = *gsi_last_bb (bb); |
| if (last == NULL) |
| continue; |
| if (gimple_code (last) == GIMPLE_EH_DISPATCH) |
| { |
| redirected |= lower_eh_dispatch (bb, |
| as_a <geh_dispatch *> (last)); |
| flags |= TODO_update_ssa_only_virtuals; |
| } |
| else if (gimple_code (last) == GIMPLE_RESX) |
| { |
| if (stmt_can_throw_external (fun, last)) |
| optimize_clobbers (bb); |
| else if (!any_resx_to_process) |
| sink_clobbers (bb, NULL, &any_resx_to_process); |
| } |
| bb->flags &= ~BB_VISITED; |
| } |
| if (redirected) |
| { |
| free_dominance_info (CDI_DOMINATORS); |
| delete_unreachable_blocks (); |
| } |
| |
| if (any_resx_to_process) |
| { |
| /* Make sure to catch all secondary sinking opportunities by processing |
| blocks in RPO order and after all CFG modifications from lowering |
| and unreachable block removal. */ |
| int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun)); |
| int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false); |
| gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun)); |
| for (int i = 0; i < rpo_n; ++i) |
| { |
| bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]); |
| gimple *last = *gsi_last_bb (bb); |
| if (last |
| && gimple_code (last) == GIMPLE_RESX |
| && !stmt_can_throw_external (fun, last)) |
| flags |= sink_clobbers (bb, sunk); |
| /* If there were any clobbers sunk into this BB, insert them now. */ |
| if (!gimple_seq_empty_p (sunk[bb->index])) |
| { |
| gimple_stmt_iterator gsi = gsi_after_labels (bb); |
| gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT); |
| sunk[bb->index] = NULL; |
| } |
| bb->flags |= BB_VISITED; |
| } |
| free (rpo); |
| free (sunk); |
| } |
| |
| return flags; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_lower_eh_dispatch (gcc::context *ctxt) |
| { |
| return new pass_lower_eh_dispatch (ctxt); |
| } |
| |
| /* Walk statements, see what regions and, optionally, landing pads |
| are really referenced. |
| |
| Returns in R_REACHABLEP an sbitmap with bits set for reachable regions, |
| and in LP_REACHABLE an sbitmap with bits set for reachable landing pads. |
| |
| Passing NULL for LP_REACHABLE is valid, in this case only reachable |
| regions are marked. |
| |
| The caller is responsible for freeing the returned sbitmaps. */ |
| |
| static void |
| mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep) |
| { |
| sbitmap r_reachable, lp_reachable; |
| basic_block bb; |
| bool mark_landing_pads = (lp_reachablep != NULL); |
| gcc_checking_assert (r_reachablep != NULL); |
| |
| r_reachable = sbitmap_alloc (cfun->eh->region_array->length ()); |
| bitmap_clear (r_reachable); |
| *r_reachablep = r_reachable; |
| |
| if (mark_landing_pads) |
| { |
| lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ()); |
| bitmap_clear (lp_reachable); |
| *lp_reachablep = lp_reachable; |
| } |
| else |
| lp_reachable = NULL; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple_stmt_iterator gsi; |
| |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (mark_landing_pads) |
| { |
| int lp_nr = lookup_stmt_eh_lp (stmt); |
| |
| /* Negative LP numbers are MUST_NOT_THROW regions which |
| are not considered BB enders. */ |
| if (lp_nr < 0) |
| bitmap_set_bit (r_reachable, -lp_nr); |
| |
| /* Positive LP numbers are real landing pads, and BB enders. */ |
| else if (lp_nr > 0) |
| { |
| gcc_assert (gsi_one_before_end_p (gsi)); |
| eh_region region = get_eh_region_from_lp_number (lp_nr); |
| bitmap_set_bit (r_reachable, region->index); |
| bitmap_set_bit (lp_reachable, lp_nr); |
| } |
| } |
| |
| /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_RESX: |
| bitmap_set_bit (r_reachable, |
| gimple_resx_region (as_a <gresx *> (stmt))); |
| break; |
| case GIMPLE_EH_DISPATCH: |
| bitmap_set_bit (r_reachable, |
| gimple_eh_dispatch_region ( |
| as_a <geh_dispatch *> (stmt))); |
| break; |
| case GIMPLE_CALL: |
| if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES)) |
| for (int i = 0; i < 2; ++i) |
| { |
| tree rt = gimple_call_arg (stmt, i); |
| HOST_WIDE_INT ri = tree_to_shwi (rt); |
| |
| gcc_assert (ri == (int)ri); |
| bitmap_set_bit (r_reachable, ri); |
| } |
| break; |
| default: |
| break; |
| } |
| } |
| } |
| } |
| |
| /* Remove unreachable handlers and unreachable landing pads. */ |
| |
| static void |
| remove_unreachable_handlers (void) |
| { |
| sbitmap r_reachable, lp_reachable; |
| eh_region region; |
| eh_landing_pad lp; |
| unsigned i; |
| |
| mark_reachable_handlers (&r_reachable, &lp_reachable); |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "Before removal of unreachable regions:\n"); |
| dump_eh_tree (dump_file, cfun); |
| fprintf (dump_file, "Reachable regions: "); |
| dump_bitmap_file (dump_file, r_reachable); |
| fprintf (dump_file, "Reachable landing pads: "); |
| dump_bitmap_file (dump_file, lp_reachable); |
| } |
| |
| if (dump_file) |
| { |
| FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region) |
| if (region && !bitmap_bit_p (r_reachable, region->index)) |
| fprintf (dump_file, |
| "Removing unreachable region %d\n", |
| region->index); |
| } |
| |
| remove_unreachable_eh_regions (r_reachable); |
| |
| FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp) |
| if (lp && !bitmap_bit_p (lp_reachable, lp->index)) |
| { |
| if (dump_file) |
| fprintf (dump_file, |
| "Removing unreachable landing pad %d\n", |
| lp->index); |
| remove_eh_landing_pad (lp); |
| } |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n"); |
| dump_eh_tree (dump_file, cfun); |
| fprintf (dump_file, "\n\n"); |
| } |
| |
| sbitmap_free (r_reachable); |
| sbitmap_free (lp_reachable); |
| |
| if (flag_checking) |
| verify_eh_tree (cfun); |
| } |
| |
| /* Remove unreachable handlers if any landing pads have been removed after |
| last ehcleanup pass (due to gimple_purge_dead_eh_edges). */ |
| |
| void |
| maybe_remove_unreachable_handlers (void) |
| { |
| eh_landing_pad lp; |
| unsigned i; |
| |
| if (cfun->eh == NULL) |
| return; |
| |
| FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp) |
| if (lp |
| && (lp->post_landing_pad == NULL_TREE |
| || label_to_block (cfun, lp->post_landing_pad) == NULL)) |
| { |
| remove_unreachable_handlers (); |
| return; |
| } |
| } |
| |
| /* Remove regions that do not have landing pads. This assumes |
| that remove_unreachable_handlers has already been run, and |
| that we've just manipulated the landing pads since then. |
| |
| Preserve regions with landing pads and regions that prevent |
| exceptions from propagating further, even if these regions |
| are not reachable. */ |
| |
| static void |
| remove_unreachable_handlers_no_lp (void) |
| { |
| eh_region region; |
| sbitmap r_reachable; |
| unsigned i; |
| |
| mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL); |
| |
| FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region) |
| { |
| if (! region) |
| continue; |
| |
| if (region->landing_pads != NULL |
| || region->type == ERT_MUST_NOT_THROW) |
| bitmap_set_bit (r_reachable, region->index); |
| |
| if (dump_file |
| && !bitmap_bit_p (r_reachable, region->index)) |
| fprintf (dump_file, |
| "Removing unreachable region %d\n", |
| region->index); |
| } |
| |
| remove_unreachable_eh_regions (r_reachable); |
| |
| sbitmap_free (r_reachable); |
| } |
| |
| /* Undo critical edge splitting on an EH landing pad. Earlier, we |
| optimisticaly split all sorts of edges, including EH edges. The |
| optimization passes in between may not have needed them; if not, |
| we should undo the split. |
| |
| Recognize this case by having one EH edge incoming to the BB and |
| one normal edge outgoing; BB should be empty apart from the |
| post_landing_pad label. |
| |
| Note that this is slightly different from the empty handler case |
| handled by cleanup_empty_eh, in that the actual handler may yet |
| have actual code but the landing pad has been separated from the |
| handler. As such, cleanup_empty_eh relies on this transformation |
| having been done first. */ |
| |
| static bool |
| unsplit_eh (eh_landing_pad lp) |
| { |
| basic_block bb = label_to_block (cfun, lp->post_landing_pad); |
| gimple_stmt_iterator gsi; |
| edge e_in, e_out; |
| |
| /* Quickly check the edge counts on BB for singularity. */ |
| if (!single_pred_p (bb) || !single_succ_p (bb)) |
| return false; |
| e_in = single_pred_edge (bb); |
| e_out = single_succ_edge (bb); |
| |
| /* Input edge must be EH and output edge must be normal. */ |
| if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0) |
| return false; |
| |
| /* The block must be empty except for the labels and debug insns. */ |
| gsi = gsi_after_labels (bb); |
| if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) |
| gsi_next_nondebug (&gsi); |
| if (!gsi_end_p (gsi)) |
| return false; |
| |
| /* The destination block must not already have a landing pad |
| for a different region. */ |
| for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)); |
| tree lab; |
| int lp_nr; |
| |
| if (!label_stmt) |
| break; |
| lab = gimple_label_label (label_stmt); |
| lp_nr = EH_LANDING_PAD_NR (lab); |
| if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) |
| return false; |
| } |
| |
| /* The new destination block must not already be a destination of |
| the source block, lest we merge fallthru and eh edges and get |
| all sorts of confused. */ |
| if (find_edge (e_in->src, e_out->dest)) |
| return false; |
| |
| /* ??? We can get degenerate phis due to cfg cleanups. I would have |
| thought this should have been cleaned up by a phicprop pass, but |
| that doesn't appear to handle virtuals. Propagate by hand. */ |
| if (!gimple_seq_empty_p (phi_nodes (bb))) |
| { |
| for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); ) |
| { |
| gimple *use_stmt; |
| gphi *phi = gpi.phi (); |
| tree lhs = gimple_phi_result (phi); |
| tree rhs = gimple_phi_arg_def (phi, 0); |
| use_operand_p use_p; |
| imm_use_iterator iter; |
| |
| FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) |
| { |
| FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
| SET_USE (use_p, rhs); |
| } |
| |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1; |
| |
| remove_phi_node (&gpi, true); |
| } |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n", |
| lp->index, e_out->dest->index); |
| |
| /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving |
| a successor edge, humor it. But do the real CFG change with the |
| predecessor of E_OUT in order to preserve the ordering of arguments |
| to the PHI nodes in E_OUT->DEST. */ |
| redirect_eh_edge_1 (e_in, e_out->dest, false); |
| redirect_edge_pred (e_out, e_in->src); |
| e_out->flags = e_in->flags; |
| e_out->probability = e_in->probability; |
| remove_edge (e_in); |
| |
| return true; |
| } |
| |
| /* Examine each landing pad block and see if it matches unsplit_eh. */ |
| |
| static bool |
| unsplit_all_eh (void) |
| { |
| bool changed = false; |
| eh_landing_pad lp; |
| int i; |
| |
| for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) |
| if (lp) |
| changed |= unsplit_eh (lp); |
| |
| return changed; |
| } |
| |
| /* Wrapper around unsplit_all_eh that makes it usable everywhere. */ |
| |
| void |
| unsplit_eh_edges (void) |
| { |
| bool changed; |
| |
| /* unsplit_all_eh can die looking up unreachable landing pads. */ |
| maybe_remove_unreachable_handlers (); |
| |
| changed = unsplit_all_eh (); |
| |
| /* If EH edges have been unsplit, delete unreachable forwarder blocks. */ |
| if (changed) |
| { |
| free_dominance_info (CDI_DOMINATORS); |
| free_dominance_info (CDI_POST_DOMINATORS); |
| delete_unreachable_blocks (); |
| } |
| } |
| |
| /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming |
| to OLD_BB to NEW_BB; return true on success, false on failure. |
| |
| OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any |
| PHI variables from OLD_BB we can pick them up from OLD_BB_OUT. |
| Virtual PHIs may be deleted and marked for renaming. */ |
| |
| static bool |
| cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb, |
| edge old_bb_out, bool change_region) |
| { |
| gphi_iterator ngsi, ogsi; |
| edge_iterator ei; |
| edge e; |
| bitmap ophi_handled; |
| |
| /* The destination block must not be a regular successor for any |
| of the preds of the landing pad. Thus, avoid turning |
| <..> |
| | \ EH |
| | <..> |
| | / |
| <..> |
| into |
| <..> |
| | | EH |
| <..> |
| which CFG verification would choke on. See PR45172 and PR51089. */ |
| if (!single_pred_p (new_bb)) |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| if (find_edge (e->src, new_bb)) |
| return false; |
| |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| redirect_edge_var_map_clear (e); |
| |
| ophi_handled = BITMAP_ALLOC (NULL); |
| |
| /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map |
| for the edges we're going to move. */ |
| for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi)) |
| { |
| gphi *ophi, *nphi = ngsi.phi (); |
| tree nresult, nop; |
| |
| nresult = gimple_phi_result (nphi); |
| nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx); |
| |
| /* Find the corresponding PHI in OLD_BB so we can forward-propagate |
| the source ssa_name. */ |
| ophi = NULL; |
| for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) |
| { |
| ophi = ogsi.phi (); |
| if (gimple_phi_result (ophi) == nop) |
| break; |
| ophi = NULL; |
| } |
| |
| /* If we did find the corresponding PHI, copy those inputs. */ |
| if (ophi) |
| { |
| /* If NOP is used somewhere else beyond phis in new_bb, give up. */ |
| if (!has_single_use (nop)) |
| { |
| imm_use_iterator imm_iter; |
| use_operand_p use_p; |
| |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop) |
| { |
| if (!gimple_debug_bind_p (USE_STMT (use_p)) |
| && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI |
| || gimple_bb (USE_STMT (use_p)) != new_bb)) |
| goto fail; |
| } |
| } |
| bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop)); |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| { |
| location_t oloc; |
| tree oop; |
| |
| if ((e->flags & EDGE_EH) == 0) |
| continue; |
| oop = gimple_phi_arg_def (ophi, e->dest_idx); |
| oloc = gimple_phi_arg_location (ophi, e->dest_idx); |
| redirect_edge_var_map_add (e, nresult, oop, oloc); |
| } |
| } |
| /* If we didn't find the PHI, if it's a real variable or a VOP, we know |
| from the fact that OLD_BB is tree_empty_eh_handler_p that the |
| variable is unchanged from input to the block and we can simply |
| re-use the input to NEW_BB from the OLD_BB_OUT edge. */ |
| else |
| { |
| location_t nloc |
| = gimple_phi_arg_location (nphi, old_bb_out->dest_idx); |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| redirect_edge_var_map_add (e, nresult, nop, nloc); |
| } |
| } |
| |
| /* Second, verify that all PHIs from OLD_BB have been handled. If not, |
| we don't know what values from the other edges into NEW_BB to use. */ |
| for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) |
| { |
| gphi *ophi = ogsi.phi (); |
| tree oresult = gimple_phi_result (ophi); |
| if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult))) |
| goto fail; |
| } |
| |
| /* Finally, move the edges and update the PHIs. */ |
| for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); ) |
| if (e->flags & EDGE_EH) |
| { |
| /* ??? CFG manipluation routines do not try to update loop |
| form on edge redirection. Do so manually here for now. */ |
| /* If we redirect a loop entry or latch edge that will either create |
| a multiple entry loop or rotate the loop. If the loops merge |
| we may have created a loop with multiple latches. |
| All of this isn't easily fixed thus cancel the affected loop |
| and mark the other loop as possibly having multiple latches. */ |
| if (e->dest == e->dest->loop_father->header) |
| { |
| mark_loop_for_removal (e->dest->loop_father); |
| new_bb->loop_father->latch = NULL; |
| loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES); |
| } |
| redirect_eh_edge_1 (e, new_bb, change_region); |
| redirect_edge_succ (e, new_bb); |
| flush_pending_stmts (e); |
| } |
| else |
| ei_next (&ei); |
| |
| BITMAP_FREE (ophi_handled); |
| return true; |
| |
| fail: |
| FOR_EACH_EDGE (e, ei, old_bb->preds) |
| redirect_edge_var_map_clear (e); |
| BITMAP_FREE (ophi_handled); |
| return false; |
| } |
| |
| /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its |
| old region to NEW_REGION at BB. */ |
| |
| static void |
| cleanup_empty_eh_move_lp (basic_block bb, edge e_out, |
| eh_landing_pad lp, eh_region new_region) |
| { |
| gimple_stmt_iterator gsi; |
| eh_landing_pad *pp; |
| |
| for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) |
| continue; |
| *pp = lp->next_lp; |
| |
| lp->region = new_region; |
| lp->next_lp = new_region->landing_pads; |
| new_region->landing_pads = lp; |
| |
| /* Delete the RESX that was matched within the empty handler block. */ |
| gsi = gsi_last_bb (bb); |
| unlink_stmt_vdef (gsi_stmt (gsi)); |
| gsi_remove (&gsi, true); |
| |
| /* Clean up E_OUT for the fallthru. */ |
| e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU; |
| e_out->probability = profile_probability::always (); |
| } |
| |
| /* A subroutine of cleanup_empty_eh. Handle more complex cases of |
| unsplitting than unsplit_eh was prepared to handle, e.g. when |
| multiple incoming edges and phis are involved. */ |
| |
| static bool |
| cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp) |
| { |
| gimple_stmt_iterator gsi; |
| tree lab; |
| |
| /* We really ought not have totally lost everything following |
| a landing pad label. Given that BB is empty, there had better |
| be a successor. */ |
| gcc_assert (e_out != NULL); |
| |
| /* The destination block must not already have a landing pad |
| for a different region. */ |
| lab = NULL; |
| for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi)); |
| int lp_nr; |
| |
| if (!stmt) |
| break; |
| lab = gimple_label_label (stmt); |
| lp_nr = EH_LANDING_PAD_NR (lab); |
| if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) |
| return false; |
| } |
| |
| /* Attempt to move the PHIs into the successor block. */ |
| if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, |
| "Unsplit EH landing pad %d to block %i " |
| "(via cleanup_empty_eh).\n", |
| lp->index, e_out->dest->index); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Return true if edge E_FIRST is part of an empty infinite loop |
| or leads to such a loop through a series of single successor |
| empty bbs. */ |
| |
| static bool |
| infinite_empty_loop_p (edge e_first) |
| { |
| bool inf_loop = false; |
| edge e; |
| |
| if (e_first->dest == e_first->src) |
| return true; |
| |
| e_first->src->aux = (void *) 1; |
| for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest)) |
| { |
| gimple_stmt_iterator gsi; |
| if (e->dest->aux) |
| { |
| inf_loop = true; |
| break; |
| } |
| e->dest->aux = (void *) 1; |
| gsi = gsi_after_labels (e->dest); |
| if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) |
| gsi_next_nondebug (&gsi); |
| if (!gsi_end_p (gsi)) |
| break; |
| } |
| e_first->src->aux = NULL; |
| for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest)) |
| e->dest->aux = NULL; |
| |
| return inf_loop; |
| } |
| |
| /* Examine the block associated with LP to determine if it's an empty |
| handler for its EH region. If so, attempt to redirect EH edges to |
| an outer region. Return true the CFG was updated in any way. This |
| is similar to jump forwarding, just across EH edges. */ |
| |
| static bool |
| cleanup_empty_eh (eh_landing_pad lp) |
| { |
| basic_block bb = label_to_block (cfun, lp->post_landing_pad); |
| gimple_stmt_iterator gsi; |
| gimple *resx; |
| eh_region new_region; |
| edge_iterator ei; |
| edge e, e_out; |
| bool has_non_eh_pred; |
| bool ret = false; |
| int new_lp_nr; |
| |
| /* There can be zero or one edges out of BB. This is the quickest test. */ |
| switch (EDGE_COUNT (bb->succs)) |
| { |
| case 0: |
| e_out = NULL; |
| break; |
| case 1: |
| e_out = single_succ_edge (bb); |
| break; |
| default: |
| return false; |
| } |
| |
| gsi = gsi_last_nondebug_bb (bb); |
| resx = gsi_stmt (gsi); |
| if (resx && is_gimple_resx (resx)) |
| { |
| if (stmt_can_throw_external (cfun, resx)) |
| optimize_clobbers (bb); |
| else if (sink_clobbers (bb)) |
| ret = true; |
| } |
| |
| gsi = gsi_after_labels (bb); |
| |
| /* Make sure to skip debug statements. */ |
| if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) |
| gsi_next_nondebug (&gsi); |
| |
| /* If the block is totally empty, look for more unsplitting cases. */ |
| if (gsi_end_p (gsi)) |
| { |
| /* For the degenerate case of an infinite loop bail out. |
| If bb has no successors and is totally empty, which can happen e.g. |
| because of incorrect noreturn attribute, bail out too. */ |
| if (e_out == NULL |
| || infinite_empty_loop_p (e_out)) |
| return ret; |
| |
| return ret | cleanup_empty_eh_unsplit (bb, e_out, lp); |
| } |
| |
| /* The block should consist only of a single RESX statement, modulo a |
| preceding call to __builtin_stack_restore if there is no outgoing |
| edge, since the call can be eliminated in this case. */ |
| resx = gsi_stmt (gsi); |
| if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE)) |
| { |
| gsi_next_nondebug (&gsi); |
| resx = gsi_stmt (gsi); |
| } |
| if (!is_gimple_resx (resx)) |
| return ret; |
| gcc_assert (gsi_one_nondebug_before_end_p (gsi)); |
| |
| /* Determine if there are non-EH edges, or resx edges into the handler. */ |
| has_non_eh_pred = false; |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| if (!(e->flags & EDGE_EH)) |
| has_non_eh_pred = true; |
| |
| /* Find the handler that's outer of the empty handler by looking at |
| where the RESX instruction was vectored. */ |
| new_lp_nr = lookup_stmt_eh_lp (resx); |
| new_region = get_eh_region_from_lp_number (new_lp_nr); |
| |
| /* If there's no destination region within the current function, |
| redirection is trivial via removing the throwing statements from |
| the EH region, removing the EH edges, and allowing the block |
| to go unreachable. */ |
| if (new_region == NULL) |
| { |
| gcc_assert (e_out == NULL); |
| for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) |
| if (e->flags & EDGE_EH) |
| { |
| gimple *stmt = *gsi_last_bb (e->src); |
| remove_stmt_from_eh_lp (stmt); |
| remove_edge (e); |
| } |
| else |
| ei_next (&ei); |
| goto succeed; |
| } |
| |
| /* If the destination region is a MUST_NOT_THROW, allow the runtime |
| to handle the abort and allow the blocks to go unreachable. */ |
| if (new_region->type == ERT_MUST_NOT_THROW) |
| { |
| for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) |
| if (e->flags & EDGE_EH) |
| { |
| gimple *stmt = *gsi_last_bb (e->src); |
| remove_stmt_from_eh_lp (stmt); |
| add_stmt_to_eh_lp (stmt, new_lp_nr); |
| remove_edge (e); |
| } |
| else |
| ei_next (&ei); |
| goto succeed; |
| } |
| |
| /* Try to redirect the EH edges and merge the PHIs into the destination |
| landing pad block. If the merge succeeds, we'll already have redirected |
| all the EH edges. The handler itself will go unreachable if there were |
| no normal edges. */ |
| if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true)) |
| goto succeed; |
| |
| /* Finally, if all input edges are EH edges, then we can (potentially) |
| reduce the number of transfers from the runtime by moving the landing |
| pad from the original region to the new region. This is a win when |
| we remove the last CLEANUP region along a particular exception |
| propagation path. Since nothing changes except for the region with |
| which the landing pad is associated, the PHI nodes do not need to be |
| adjusted at all. */ |
| if (!has_non_eh_pred) |
| { |
| cleanup_empty_eh_move_lp (bb, e_out, lp, new_region); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n", |
| lp->index, new_region->index); |
| |
| /* ??? The CFG didn't change, but we may have rendered the |
| old EH region unreachable. Trigger a cleanup there. */ |
| return true; |
| } |
| |
| return ret; |
| |
| succeed: |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index); |
| remove_eh_landing_pad (lp); |
| return true; |
| } |
| |
| /* Do a post-order traversal of the EH region tree. Examine each |
| post_landing_pad block and see if we can eliminate it as empty. */ |
| |
| static bool |
| cleanup_all_empty_eh (void) |
| { |
| bool changed = false; |
| eh_landing_pad lp; |
| int i; |
| |
| /* The post-order traversal may lead to quadraticness in the redirection |
| of incoming EH edges from inner LPs, so first try to walk the region |
| tree from inner to outer LPs in order to eliminate these edges. */ |
| for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i) |
| { |
| lp = (*cfun->eh->lp_array)[i]; |
| if (lp) |
| changed |= cleanup_empty_eh (lp); |
| } |
| |
| /* Now do the post-order traversal to eliminate outer empty LPs. */ |
| for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) |
| if (lp) |
| changed |= cleanup_empty_eh (lp); |
| |
| return changed; |
| } |
| |
| /* Perform cleanups and lowering of exception handling |
| 1) cleanups regions with handlers doing nothing are optimized out |
| 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out |
| 3) Info about regions that are containing instructions, and regions |
| reachable via local EH edges is collected |
| 4) Eh tree is pruned for regions no longer necessary. |
| |
| TODO: Push MUST_NOT_THROW regions to the root of the EH tree. |
| Unify those that have the same failure decl and locus. |
| */ |
| |
| static unsigned int |
| execute_cleanup_eh_1 (void) |
| { |
| /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die |
| looking up unreachable landing pads. */ |
| remove_unreachable_handlers (); |
| |
| /* Watch out for the region tree vanishing due to all unreachable. */ |
| if (cfun->eh->region_tree) |
| { |
| bool changed = false; |
| |
| if (optimize) |
| changed |= unsplit_all_eh (); |
| changed |= cleanup_all_empty_eh (); |
| |
| if (changed) |
| { |
| free_dominance_info (CDI_DOMINATORS); |
| free_dominance_info (CDI_POST_DOMINATORS); |
| |
| /* We delayed all basic block deletion, as we may have performed |
| cleanups on EH edges while non-EH edges were still present. */ |
| delete_unreachable_blocks (); |
| |
| /* We manipulated the landing pads. Remove any region that no |
| longer has a landing pad. */ |
| remove_unreachable_handlers_no_lp (); |
| |
| return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; |
| } |
| } |
| |
| return 0; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_cleanup_eh = |
| { |
| GIMPLE_PASS, /* type */ |
| "ehcleanup", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_TREE_EH, /* tv_id */ |
| PROP_gimple_lcf, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_cleanup_eh : public gimple_opt_pass |
| { |
| public: |
| pass_cleanup_eh (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_cleanup_eh, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| opt_pass * clone () final override { return new pass_cleanup_eh (m_ctxt); } |
| bool gate (function *fun) final override |
| { |
| return fun->eh != NULL && fun->eh->region_tree != NULL; |
| } |
| |
| unsigned int execute (function *) final override; |
| |
| }; // class pass_cleanup_eh |
| |
| unsigned int |
| pass_cleanup_eh::execute (function *fun) |
| { |
| int ret = execute_cleanup_eh_1 (); |
| |
| /* If the function no longer needs an EH personality routine |
| clear it. This exposes cross-language inlining opportunities |
| and avoids references to a never defined personality routine. */ |
| if (DECL_FUNCTION_PERSONALITY (current_function_decl) |
| && function_needs_eh_personality (fun) != eh_personality_lang) |
| DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE; |
| |
| return ret; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_cleanup_eh (gcc::context *ctxt) |
| { |
| return new pass_cleanup_eh (ctxt); |
| } |
| |
| /* Disable warnings about missing quoting in GCC diagnostics for |
| the verification errors. Their format strings don't follow GCC |
| diagnostic conventions but are only used for debugging. */ |
| #if __GNUC__ >= 10 |
| # pragma GCC diagnostic push |
| # pragma GCC diagnostic ignored "-Wformat-diag" |
| #endif |
| |
| /* Verify that BB containing STMT as the last statement, has precisely the |
| edge that make_eh_edge would create. */ |
| |
| DEBUG_FUNCTION bool |
| verify_eh_edges (gimple *stmt) |
| { |
| basic_block bb = gimple_bb (stmt); |
| eh_landing_pad lp = NULL; |
| int lp_nr; |
| edge_iterator ei; |
| edge e, eh_edge; |
| |
| lp_nr = lookup_stmt_eh_lp (stmt); |
| if (lp_nr > 0) |
| lp = get_eh_landing_pad_from_number (lp_nr); |
| |
| eh_edge = NULL; |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| if (e->flags & EDGE_EH) |
| { |
| if (eh_edge) |
| { |
| error ("BB %i has multiple EH edges", bb->index); |
| return true; |
| } |
| else |
| eh_edge = e; |
| } |
| } |
| |
| if (lp == NULL) |
| { |
| if (eh_edge) |
| { |
| error ("BB %i cannot throw but has an EH edge", bb->index); |
| return true; |
| } |
| return false; |
| } |
| |
| if (!stmt_could_throw_p (cfun, stmt)) |
| { |
| error ("BB %i last statement has incorrectly set lp", bb->index); |
| return true; |
| } |
| |
| if (eh_edge == NULL) |
| { |
| error ("BB %i is missing an EH edge", bb->index); |
| return true; |
| } |
| |
| if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad)) |
| { |
| error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */ |
| |
| DEBUG_FUNCTION bool |
| verify_eh_dispatch_edge (geh_dispatch *stmt) |
| { |
| eh_region r; |
| eh_catch c; |
| basic_block src, dst; |
| bool want_fallthru = true; |
| edge_iterator ei; |
| edge e, fall_edge; |
| |
| r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); |
| src = gimple_bb (stmt); |
| |
| FOR_EACH_EDGE (e, ei, src->succs) |
| gcc_assert (e->aux == NULL); |
| |
| switch (r->type) |
| { |
| case ERT_TRY: |
| for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
| { |
| dst = label_to_block (cfun, c->label); |
| e = find_edge (src, dst); |
| if (e == NULL) |
| { |
| error ("BB %i is missing an edge", src->index); |
| return true; |
| } |
| e->aux = (void *)e; |
| |
| /* A catch-all handler doesn't have a fallthru. */ |
| if (c->type_list == NULL) |
| { |
| want_fallthru = false; |
| break; |
| } |
| } |
| break; |
| |
| case ERT_ALLOWED_EXCEPTIONS: |
| dst = label_to_block (cfun, r->u.allowed.label); |
| e = find_edge (src, dst); |
| if (e == NULL) |
| { |
| error ("BB %i is missing an edge", src->index); |
| return true; |
| } |
| e->aux = (void *)e; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| fall_edge = NULL; |
| FOR_EACH_EDGE (e, ei, src->succs) |
| { |
| if (e->flags & EDGE_FALLTHRU) |
| { |
| if (fall_edge != NULL) |
| { |
| error ("BB %i too many fallthru edges", src->index); |
| return true; |
| } |
| fall_edge = e; |
| } |
| else if (e->aux) |
| e->aux = NULL; |
| else |
| { |
| error ("BB %i has incorrect edge", src->index); |
| return true; |
| } |
| } |
| if ((fall_edge != NULL) ^ want_fallthru) |
| { |
| error ("BB %i has incorrect fallthru edge", src->index); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| #if __GNUC__ >= 10 |
| # pragma GCC diagnostic pop |
| #endif |