| /* Instruction scheduling pass. Selective scheduler and pipeliner. |
| Copyright (C) 2006-2022 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "cfghooks.h" |
| #include "tree.h" |
| #include "rtl.h" |
| #include "df.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "cfgrtl.h" |
| #include "cfganal.h" |
| #include "cfgbuild.h" |
| #include "insn-config.h" |
| #include "insn-attr.h" |
| #include "recog.h" |
| #include "target.h" |
| #include "sched-int.h" |
| #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ |
| |
| #ifdef INSN_SCHEDULING |
| #include "regset.h" |
| #include "cfgloop.h" |
| #include "sel-sched-ir.h" |
| /* We don't have to use it except for sel_print_insn. */ |
| #include "sel-sched-dump.h" |
| |
| /* A vector holding bb info for whole scheduling pass. */ |
| vec<sel_global_bb_info_def> sel_global_bb_info; |
| |
| /* A vector holding bb info. */ |
| vec<sel_region_bb_info_def> sel_region_bb_info; |
| |
| /* A pool for allocating all lists. */ |
| object_allocator<_list_node> sched_lists_pool ("sel-sched-lists"); |
| |
| /* This contains information about successors for compute_av_set. */ |
| struct succs_info current_succs; |
| |
| /* Data structure to describe interaction with the generic scheduler utils. */ |
| static struct common_sched_info_def sel_common_sched_info; |
| |
| /* The loop nest being pipelined. */ |
| class loop *current_loop_nest; |
| |
| /* LOOP_NESTS is a vector containing the corresponding loop nest for |
| each region. */ |
| static vec<loop_p> loop_nests; |
| |
| /* Saves blocks already in loop regions, indexed by bb->index. */ |
| static sbitmap bbs_in_loop_rgns = NULL; |
| |
| /* CFG hooks that are saved before changing create_basic_block hook. */ |
| static struct cfg_hooks orig_cfg_hooks; |
| |
| |
| /* Array containing reverse topological index of function basic blocks, |
| indexed by BB->INDEX. */ |
| static int *rev_top_order_index = NULL; |
| |
| /* Length of the above array. */ |
| static int rev_top_order_index_len = -1; |
| |
| /* A regset pool structure. */ |
| static struct |
| { |
| /* The stack to which regsets are returned. */ |
| regset *v; |
| |
| /* Its pointer. */ |
| int n; |
| |
| /* Its size. */ |
| int s; |
| |
| /* In VV we save all generated regsets so that, when destructing the |
| pool, we can compare it with V and check that every regset was returned |
| back to pool. */ |
| regset *vv; |
| |
| /* The pointer of VV stack. */ |
| int nn; |
| |
| /* Its size. */ |
| int ss; |
| |
| /* The difference between allocated and returned regsets. */ |
| int diff; |
| } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; |
| |
| /* This represents the nop pool. */ |
| static struct |
| { |
| /* The vector which holds previously emitted nops. */ |
| insn_t *v; |
| |
| /* Its pointer. */ |
| int n; |
| |
| /* Its size. */ |
| int s; |
| } nop_pool = { NULL, 0, 0 }; |
| |
| /* The pool for basic block notes. */ |
| static vec<rtx_note *> bb_note_pool; |
| |
| /* A NOP pattern used to emit placeholder insns. */ |
| rtx nop_pattern = NULL_RTX; |
| /* A special instruction that resides in EXIT_BLOCK. |
| EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ |
| rtx_insn *exit_insn = NULL; |
| |
| /* TRUE if while scheduling current region, which is loop, its preheader |
| was removed. */ |
| bool preheader_removed = false; |
| |
| |
| /* Forward static declarations. */ |
| static void fence_clear (fence_t); |
| |
| static void deps_init_id (idata_t, insn_t, bool); |
| static void init_id_from_df (idata_t, insn_t, bool); |
| static expr_t set_insn_init (expr_t, vinsn_t, int); |
| |
| static void cfg_preds (basic_block, insn_t **, int *); |
| static void prepare_insn_expr (insn_t, int); |
| static void free_history_vect (vec<expr_history_def> &); |
| |
| static void move_bb_info (basic_block, basic_block); |
| static void remove_empty_bb (basic_block, bool); |
| static void sel_merge_blocks (basic_block, basic_block); |
| static void sel_remove_loop_preheader (void); |
| static bool bb_has_removable_jump_to_p (basic_block, basic_block); |
| |
| static bool insn_is_the_only_one_in_bb_p (insn_t); |
| static void create_initial_data_sets (basic_block); |
| |
| static void free_av_set (basic_block); |
| static void invalidate_av_set (basic_block); |
| static void extend_insn_data (void); |
| static void sel_init_new_insn (insn_t, int, int = -1); |
| static void finish_insns (void); |
| |
| /* Various list functions. */ |
| |
| /* Copy an instruction list L. */ |
| ilist_t |
| ilist_copy (ilist_t l) |
| { |
| ilist_t head = NULL, *tailp = &head; |
| |
| while (l) |
| { |
| ilist_add (tailp, ILIST_INSN (l)); |
| tailp = &ILIST_NEXT (*tailp); |
| l = ILIST_NEXT (l); |
| } |
| |
| return head; |
| } |
| |
| /* Invert an instruction list L. */ |
| ilist_t |
| ilist_invert (ilist_t l) |
| { |
| ilist_t res = NULL; |
| |
| while (l) |
| { |
| ilist_add (&res, ILIST_INSN (l)); |
| l = ILIST_NEXT (l); |
| } |
| |
| return res; |
| } |
| |
| /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ |
| void |
| blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) |
| { |
| bnd_t bnd; |
| |
| _list_add (lp); |
| bnd = BLIST_BND (*lp); |
| |
| BND_TO (bnd) = to; |
| BND_PTR (bnd) = ptr; |
| BND_AV (bnd) = NULL; |
| BND_AV1 (bnd) = NULL; |
| BND_DC (bnd) = dc; |
| } |
| |
| /* Remove the list note pointed to by LP. */ |
| void |
| blist_remove (blist_t *lp) |
| { |
| bnd_t b = BLIST_BND (*lp); |
| |
| av_set_clear (&BND_AV (b)); |
| av_set_clear (&BND_AV1 (b)); |
| ilist_clear (&BND_PTR (b)); |
| |
| _list_remove (lp); |
| } |
| |
| /* Init a fence tail L. */ |
| void |
| flist_tail_init (flist_tail_t l) |
| { |
| FLIST_TAIL_HEAD (l) = NULL; |
| FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); |
| } |
| |
| /* Try to find fence corresponding to INSN in L. */ |
| fence_t |
| flist_lookup (flist_t l, insn_t insn) |
| { |
| while (l) |
| { |
| if (FENCE_INSN (FLIST_FENCE (l)) == insn) |
| return FLIST_FENCE (l); |
| |
| l = FLIST_NEXT (l); |
| } |
| |
| return NULL; |
| } |
| |
| /* Init the fields of F before running fill_insns. */ |
| static void |
| init_fence_for_scheduling (fence_t f) |
| { |
| FENCE_BNDS (f) = NULL; |
| FENCE_PROCESSED_P (f) = false; |
| FENCE_SCHEDULED_P (f) = false; |
| } |
| |
| /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ |
| static void |
| flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
| insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns, |
| int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
| int cycle, int cycle_issued_insns, int issue_more, |
| bool starts_cycle_p, bool after_stall_p) |
| { |
| fence_t f; |
| |
| _list_add (lp); |
| f = FLIST_FENCE (*lp); |
| |
| FENCE_INSN (f) = insn; |
| |
| gcc_assert (state != NULL); |
| FENCE_STATE (f) = state; |
| |
| FENCE_CYCLE (f) = cycle; |
| FENCE_ISSUED_INSNS (f) = cycle_issued_insns; |
| FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; |
| FENCE_AFTER_STALL_P (f) = after_stall_p; |
| |
| gcc_assert (dc != NULL); |
| FENCE_DC (f) = dc; |
| |
| gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); |
| FENCE_TC (f) = tc; |
| |
| FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
| FENCE_ISSUE_MORE (f) = issue_more; |
| FENCE_EXECUTING_INSNS (f) = executing_insns; |
| FENCE_READY_TICKS (f) = ready_ticks; |
| FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; |
| FENCE_SCHED_NEXT (f) = sched_next; |
| |
| init_fence_for_scheduling (f); |
| } |
| |
| /* Remove the head node of the list pointed to by LP. */ |
| static void |
| flist_remove (flist_t *lp) |
| { |
| if (FENCE_INSN (FLIST_FENCE (*lp))) |
| fence_clear (FLIST_FENCE (*lp)); |
| _list_remove (lp); |
| } |
| |
| /* Clear the fence list pointed to by LP. */ |
| void |
| flist_clear (flist_t *lp) |
| { |
| while (*lp) |
| flist_remove (lp); |
| } |
| |
| /* Add ORIGINAL_INSN the def list DL honoring CROSSED_CALL_ABIS. */ |
| void |
| def_list_add (def_list_t *dl, insn_t original_insn, |
| unsigned int crossed_call_abis) |
| { |
| def_t d; |
| |
| _list_add (dl); |
| d = DEF_LIST_DEF (*dl); |
| |
| d->orig_insn = original_insn; |
| d->crossed_call_abis = crossed_call_abis; |
| } |
| |
| |
| /* Functions to work with target contexts. */ |
| |
| /* Bulk target context. It is convenient for debugging purposes to ensure |
| that there are no uninitialized (null) target contexts. */ |
| static tc_t bulk_tc = (tc_t) 1; |
| |
| /* Target hooks wrappers. In the future we can provide some default |
| implementations for them. */ |
| |
| /* Allocate a store for the target context. */ |
| static tc_t |
| alloc_target_context (void) |
| { |
| return (targetm.sched.alloc_sched_context |
| ? targetm.sched.alloc_sched_context () : bulk_tc); |
| } |
| |
| /* Init target context TC. |
| If CLEAN_P is true, then make TC as it is beginning of the scheduler. |
| Overwise, copy current backend context to TC. */ |
| static void |
| init_target_context (tc_t tc, bool clean_p) |
| { |
| if (targetm.sched.init_sched_context) |
| targetm.sched.init_sched_context (tc, clean_p); |
| } |
| |
| /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as |
| int init_target_context (). */ |
| tc_t |
| create_target_context (bool clean_p) |
| { |
| tc_t tc = alloc_target_context (); |
| |
| init_target_context (tc, clean_p); |
| return tc; |
| } |
| |
| /* Copy TC to the current backend context. */ |
| void |
| set_target_context (tc_t tc) |
| { |
| if (targetm.sched.set_sched_context) |
| targetm.sched.set_sched_context (tc); |
| } |
| |
| /* TC is about to be destroyed. Free any internal data. */ |
| static void |
| clear_target_context (tc_t tc) |
| { |
| if (targetm.sched.clear_sched_context) |
| targetm.sched.clear_sched_context (tc); |
| } |
| |
| /* Clear and free it. */ |
| static void |
| delete_target_context (tc_t tc) |
| { |
| clear_target_context (tc); |
| |
| if (targetm.sched.free_sched_context) |
| targetm.sched.free_sched_context (tc); |
| } |
| |
| /* Make a copy of FROM in TO. |
| NB: May be this should be a hook. */ |
| static void |
| copy_target_context (tc_t to, tc_t from) |
| { |
| tc_t tmp = create_target_context (false); |
| |
| set_target_context (from); |
| init_target_context (to, false); |
| |
| set_target_context (tmp); |
| delete_target_context (tmp); |
| } |
| |
| /* Create a copy of TC. */ |
| static tc_t |
| create_copy_of_target_context (tc_t tc) |
| { |
| tc_t copy = alloc_target_context (); |
| |
| copy_target_context (copy, tc); |
| |
| return copy; |
| } |
| |
| /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P |
| is the same as in init_target_context (). */ |
| void |
| reset_target_context (tc_t tc, bool clean_p) |
| { |
| clear_target_context (tc); |
| init_target_context (tc, clean_p); |
| } |
| |
| /* Functions to work with dependence contexts. |
| Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence |
| context. It accumulates information about processed insns to decide if |
| current insn is dependent on the processed ones. */ |
| |
| /* Make a copy of FROM in TO. */ |
| static void |
| copy_deps_context (deps_t to, deps_t from) |
| { |
| init_deps (to, false); |
| deps_join (to, from); |
| } |
| |
| /* Allocate store for dep context. */ |
| static deps_t |
| alloc_deps_context (void) |
| { |
| return XNEW (class deps_desc); |
| } |
| |
| /* Allocate and initialize dep context. */ |
| static deps_t |
| create_deps_context (void) |
| { |
| deps_t dc = alloc_deps_context (); |
| |
| init_deps (dc, false); |
| return dc; |
| } |
| |
| /* Create a copy of FROM. */ |
| static deps_t |
| create_copy_of_deps_context (deps_t from) |
| { |
| deps_t to = alloc_deps_context (); |
| |
| copy_deps_context (to, from); |
| return to; |
| } |
| |
| /* Clean up internal data of DC. */ |
| static void |
| clear_deps_context (deps_t dc) |
| { |
| free_deps (dc); |
| } |
| |
| /* Clear and free DC. */ |
| static void |
| delete_deps_context (deps_t dc) |
| { |
| clear_deps_context (dc); |
| free (dc); |
| } |
| |
| /* Clear and init DC. */ |
| static void |
| reset_deps_context (deps_t dc) |
| { |
| clear_deps_context (dc); |
| init_deps (dc, false); |
| } |
| |
| /* This structure describes the dependence analysis hooks for advancing |
| dependence context. */ |
| static struct sched_deps_info_def advance_deps_context_sched_deps_info = |
| { |
| NULL, |
| |
| NULL, /* start_insn */ |
| NULL, /* finish_insn */ |
| NULL, /* start_lhs */ |
| NULL, /* finish_lhs */ |
| NULL, /* start_rhs */ |
| NULL, /* finish_rhs */ |
| haifa_note_reg_set, |
| haifa_note_reg_clobber, |
| haifa_note_reg_use, |
| NULL, /* note_mem_dep */ |
| NULL, /* note_dep */ |
| |
| 0, 0, 0 |
| }; |
| |
| /* Process INSN and add its impact on DC. */ |
| void |
| advance_deps_context (deps_t dc, insn_t insn) |
| { |
| sched_deps_info = &advance_deps_context_sched_deps_info; |
| deps_analyze_insn (dc, insn); |
| } |
| |
| |
| /* Functions to work with DFA states. */ |
| |
| /* Allocate store for a DFA state. */ |
| static state_t |
| state_alloc (void) |
| { |
| return xmalloc (dfa_state_size); |
| } |
| |
| /* Allocate and initialize DFA state. */ |
| static state_t |
| state_create (void) |
| { |
| state_t state = state_alloc (); |
| |
| state_reset (state); |
| advance_state (state); |
| return state; |
| } |
| |
| /* Free DFA state. */ |
| static void |
| state_free (state_t state) |
| { |
| free (state); |
| } |
| |
| /* Make a copy of FROM in TO. */ |
| static void |
| state_copy (state_t to, state_t from) |
| { |
| memcpy (to, from, dfa_state_size); |
| } |
| |
| /* Create a copy of FROM. */ |
| static state_t |
| state_create_copy (state_t from) |
| { |
| state_t to = state_alloc (); |
| |
| state_copy (to, from); |
| return to; |
| } |
| |
| |
| /* Functions to work with fences. */ |
| |
| /* Clear the fence. */ |
| static void |
| fence_clear (fence_t f) |
| { |
| state_t s = FENCE_STATE (f); |
| deps_t dc = FENCE_DC (f); |
| void *tc = FENCE_TC (f); |
| |
| ilist_clear (&FENCE_BNDS (f)); |
| |
| gcc_assert ((s != NULL && dc != NULL && tc != NULL) |
| || (s == NULL && dc == NULL && tc == NULL)); |
| |
| free (s); |
| |
| if (dc != NULL) |
| delete_deps_context (dc); |
| |
| if (tc != NULL) |
| delete_target_context (tc); |
| vec_free (FENCE_EXECUTING_INSNS (f)); |
| free (FENCE_READY_TICKS (f)); |
| FENCE_READY_TICKS (f) = NULL; |
| } |
| |
| /* Init a list of fences with successors of OLD_FENCE. */ |
| void |
| init_fences (insn_t old_fence) |
| { |
| insn_t succ; |
| succ_iterator si; |
| bool first = true; |
| int ready_ticks_size = get_max_uid () + 1; |
| |
| FOR_EACH_SUCC_1 (succ, si, old_fence, |
| SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
| { |
| |
| if (first) |
| first = false; |
| else |
| gcc_assert (flag_sel_sched_pipelining_outer_loops); |
| |
| flist_add (&fences, succ, |
| state_create (), |
| create_deps_context () /* dc */, |
| create_target_context (true) /* tc */, |
| NULL /* last_scheduled_insn */, |
| NULL, /* executing_insns */ |
| XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ |
| ready_ticks_size, |
| NULL /* sched_next */, |
| 1 /* cycle */, 0 /* cycle_issued_insns */, |
| issue_rate, /* issue_more */ |
| 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
| } |
| } |
| |
| /* Merges two fences (filling fields of fence F with resulting values) by |
| following rules: 1) state, target context and last scheduled insn are |
| propagated from fallthrough edge if it is available; |
| 2) deps context and cycle is propagated from more probable edge; |
| 3) all other fields are set to corresponding constant values. |
| |
| INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
| READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
| and AFTER_STALL_P are the corresponding fields of the second fence. */ |
| static void |
| merge_fences (fence_t f, insn_t insn, |
| state_t state, deps_t dc, void *tc, |
| rtx_insn *last_scheduled_insn, |
| vec<rtx_insn *, va_gc> *executing_insns, |
| int *ready_ticks, int ready_ticks_size, |
| rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
| { |
| insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); |
| |
| gcc_assert (sel_bb_head_p (FENCE_INSN (f)) |
| && !sched_next && !FENCE_SCHED_NEXT (f)); |
| |
| /* Check if we can decide which path fences came. |
| If we can't (or don't want to) - reset all. */ |
| if (last_scheduled_insn == NULL |
| || last_scheduled_insn_old == NULL |
| /* This is a case when INSN is reachable on several paths from |
| one insn (this can happen when pipelining of outer loops is on and |
| there are two edges: one going around of inner loop and the other - |
| right through it; in such case just reset everything). */ |
| || last_scheduled_insn == last_scheduled_insn_old) |
| { |
| state_reset (FENCE_STATE (f)); |
| state_free (state); |
| |
| reset_deps_context (FENCE_DC (f)); |
| delete_deps_context (dc); |
| |
| reset_target_context (FENCE_TC (f), true); |
| delete_target_context (tc); |
| |
| if (cycle > FENCE_CYCLE (f)) |
| FENCE_CYCLE (f) = cycle; |
| |
| FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
| FENCE_ISSUE_MORE (f) = issue_rate; |
| vec_free (executing_insns); |
| free (ready_ticks); |
| if (FENCE_EXECUTING_INSNS (f)) |
| FENCE_EXECUTING_INSNS (f)->block_remove (0, |
| FENCE_EXECUTING_INSNS (f)->length ()); |
| if (FENCE_READY_TICKS (f)) |
| memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
| } |
| else |
| { |
| edge edge_old = NULL, edge_new = NULL; |
| edge candidate; |
| succ_iterator si; |
| insn_t succ; |
| |
| /* Find fallthrough edge. */ |
| gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); |
| candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); |
| |
| if (!candidate |
| || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) |
| && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) |
| { |
| /* No fallthrough edge leading to basic block of INSN. */ |
| state_reset (FENCE_STATE (f)); |
| state_free (state); |
| |
| reset_target_context (FENCE_TC (f), true); |
| delete_target_context (tc); |
| |
| FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
| FENCE_ISSUE_MORE (f) = issue_rate; |
| } |
| else |
| if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) |
| { |
| state_free (FENCE_STATE (f)); |
| FENCE_STATE (f) = state; |
| |
| delete_target_context (FENCE_TC (f)); |
| FENCE_TC (f) = tc; |
| |
| FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
| FENCE_ISSUE_MORE (f) = issue_more; |
| } |
| else |
| { |
| /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ |
| state_free (state); |
| delete_target_context (tc); |
| |
| gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb |
| != BLOCK_FOR_INSN (last_scheduled_insn)); |
| } |
| |
| /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ |
| FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, |
| SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
| { |
| if (succ == insn) |
| { |
| /* No same successor allowed from several edges. */ |
| gcc_assert (!edge_old); |
| edge_old = si.e1; |
| } |
| } |
| /* Find edge of second predecessor (last_scheduled_insn->insn). */ |
| FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, |
| SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
| { |
| if (succ == insn) |
| { |
| /* No same successor allowed from several edges. */ |
| gcc_assert (!edge_new); |
| edge_new = si.e1; |
| } |
| } |
| |
| /* Check if we can choose most probable predecessor. */ |
| if (edge_old == NULL || edge_new == NULL) |
| { |
| reset_deps_context (FENCE_DC (f)); |
| delete_deps_context (dc); |
| vec_free (executing_insns); |
| free (ready_ticks); |
| |
| FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
| if (FENCE_EXECUTING_INSNS (f)) |
| FENCE_EXECUTING_INSNS (f)->block_remove (0, |
| FENCE_EXECUTING_INSNS (f)->length ()); |
| if (FENCE_READY_TICKS (f)) |
| memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
| } |
| else |
| if (edge_new->probability > edge_old->probability) |
| { |
| delete_deps_context (FENCE_DC (f)); |
| FENCE_DC (f) = dc; |
| vec_free (FENCE_EXECUTING_INSNS (f)); |
| FENCE_EXECUTING_INSNS (f) = executing_insns; |
| free (FENCE_READY_TICKS (f)); |
| FENCE_READY_TICKS (f) = ready_ticks; |
| FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; |
| FENCE_CYCLE (f) = cycle; |
| } |
| else |
| { |
| /* Leave DC and CYCLE untouched. */ |
| delete_deps_context (dc); |
| vec_free (executing_insns); |
| free (ready_ticks); |
| } |
| } |
| |
| /* Fill remaining invariant fields. */ |
| if (after_stall_p) |
| FENCE_AFTER_STALL_P (f) = 1; |
| |
| FENCE_ISSUED_INSNS (f) = 0; |
| FENCE_STARTS_CYCLE_P (f) = 1; |
| FENCE_SCHED_NEXT (f) = NULL; |
| } |
| |
| /* Add a new fence to NEW_FENCES list, initializing it from all |
| other parameters. */ |
| static void |
| add_to_fences (flist_tail_t new_fences, insn_t insn, |
| state_t state, deps_t dc, void *tc, |
| rtx_insn *last_scheduled_insn, |
| vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks, |
| int ready_ticks_size, rtx_insn *sched_next, int cycle, |
| int cycle_issued_insns, int issue_rate, |
| bool starts_cycle_p, bool after_stall_p) |
| { |
| fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); |
| |
| if (! f) |
| { |
| flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, |
| last_scheduled_insn, executing_insns, ready_ticks, |
| ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
| issue_rate, starts_cycle_p, after_stall_p); |
| |
| FLIST_TAIL_TAILP (new_fences) |
| = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); |
| } |
| else |
| { |
| merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
| executing_insns, ready_ticks, ready_ticks_size, |
| sched_next, cycle, issue_rate, after_stall_p); |
| } |
| } |
| |
| /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ |
| void |
| move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) |
| { |
| fence_t f, old; |
| flist_t *tailp = FLIST_TAIL_TAILP (new_fences); |
| |
| old = FLIST_FENCE (old_fences); |
| f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
| FENCE_INSN (FLIST_FENCE (old_fences))); |
| if (f) |
| { |
| merge_fences (f, old->insn, old->state, old->dc, old->tc, |
| old->last_scheduled_insn, old->executing_insns, |
| old->ready_ticks, old->ready_ticks_size, |
| old->sched_next, old->cycle, old->issue_more, |
| old->after_stall_p); |
| } |
| else |
| { |
| _list_add (tailp); |
| FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); |
| *FLIST_FENCE (*tailp) = *old; |
| init_fence_for_scheduling (FLIST_FENCE (*tailp)); |
| } |
| FENCE_INSN (old) = NULL; |
| } |
| |
| /* Add a new fence to NEW_FENCES list and initialize most of its data |
| as a clean one. */ |
| void |
| add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
| { |
| int ready_ticks_size = get_max_uid () + 1; |
| |
| add_to_fences (new_fences, |
| succ, state_create (), create_deps_context (), |
| create_target_context (true), |
| NULL, NULL, |
| XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
| NULL, FENCE_CYCLE (fence) + 1, |
| 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
| } |
| |
| /* Add a new fence to NEW_FENCES list and initialize all of its data |
| from FENCE and SUCC. */ |
| void |
| add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
| { |
| int * new_ready_ticks |
| = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
| |
| memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
| FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); |
| add_to_fences (new_fences, |
| succ, state_create_copy (FENCE_STATE (fence)), |
| create_copy_of_deps_context (FENCE_DC (fence)), |
| create_copy_of_target_context (FENCE_TC (fence)), |
| FENCE_LAST_SCHEDULED_INSN (fence), |
| vec_safe_copy (FENCE_EXECUTING_INSNS (fence)), |
| new_ready_ticks, |
| FENCE_READY_TICKS_SIZE (fence), |
| FENCE_SCHED_NEXT (fence), |
| FENCE_CYCLE (fence), |
| FENCE_ISSUED_INSNS (fence), |
| FENCE_ISSUE_MORE (fence), |
| FENCE_STARTS_CYCLE_P (fence), |
| FENCE_AFTER_STALL_P (fence)); |
| } |
| |
| |
| /* Functions to work with regset and nop pools. */ |
| |
| /* Returns the new regset from pool. It might have some of the bits set |
| from the previous usage. */ |
| regset |
| get_regset_from_pool (void) |
| { |
| regset rs; |
| |
| if (regset_pool.n != 0) |
| rs = regset_pool.v[--regset_pool.n]; |
| else |
| /* We need to create the regset. */ |
| { |
| rs = ALLOC_REG_SET (®_obstack); |
| |
| if (regset_pool.nn == regset_pool.ss) |
| regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, |
| (regset_pool.ss = 2 * regset_pool.ss + 1)); |
| regset_pool.vv[regset_pool.nn++] = rs; |
| } |
| |
| regset_pool.diff++; |
| |
| return rs; |
| } |
| |
| /* Same as above, but returns the empty regset. */ |
| regset |
| get_clear_regset_from_pool (void) |
| { |
| regset rs = get_regset_from_pool (); |
| |
| CLEAR_REG_SET (rs); |
| return rs; |
| } |
| |
| /* Return regset RS to the pool for future use. */ |
| void |
| return_regset_to_pool (regset rs) |
| { |
| gcc_assert (rs); |
| regset_pool.diff--; |
| |
| if (regset_pool.n == regset_pool.s) |
| regset_pool.v = XRESIZEVEC (regset, regset_pool.v, |
| (regset_pool.s = 2 * regset_pool.s + 1)); |
| regset_pool.v[regset_pool.n++] = rs; |
| } |
| |
| /* This is used as a qsort callback for sorting regset pool stacks. |
| X and XX are addresses of two regsets. They are never equal. */ |
| static int |
| cmp_v_in_regset_pool (const void *x, const void *xx) |
| { |
| uintptr_t r1 = (uintptr_t) *((const regset *) x); |
| uintptr_t r2 = (uintptr_t) *((const regset *) xx); |
| if (r1 > r2) |
| return 1; |
| else if (r1 < r2) |
| return -1; |
| gcc_unreachable (); |
| } |
| |
| /* Free the regset pool possibly checking for memory leaks. */ |
| void |
| free_regset_pool (void) |
| { |
| if (flag_checking) |
| { |
| regset *v = regset_pool.v; |
| int i = 0; |
| int n = regset_pool.n; |
| |
| regset *vv = regset_pool.vv; |
| int ii = 0; |
| int nn = regset_pool.nn; |
| |
| int diff = 0; |
| |
| gcc_assert (n <= nn); |
| |
| /* Sort both vectors so it will be possible to compare them. */ |
| qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); |
| qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); |
| |
| while (ii < nn) |
| { |
| if (v[i] == vv[ii]) |
| i++; |
| else |
| /* VV[II] was lost. */ |
| diff++; |
| |
| ii++; |
| } |
| |
| gcc_assert (diff == regset_pool.diff); |
| } |
| |
| /* If not true - we have a memory leak. */ |
| gcc_assert (regset_pool.diff == 0); |
| |
| while (regset_pool.n) |
| { |
| --regset_pool.n; |
| FREE_REG_SET (regset_pool.v[regset_pool.n]); |
| } |
| |
| free (regset_pool.v); |
| regset_pool.v = NULL; |
| regset_pool.s = 0; |
| |
| free (regset_pool.vv); |
| regset_pool.vv = NULL; |
| regset_pool.nn = 0; |
| regset_pool.ss = 0; |
| |
| regset_pool.diff = 0; |
| } |
| |
| |
| /* Functions to work with nop pools. NOP insns are used as temporary |
| placeholders of the insns being scheduled to allow correct update of |
| the data sets. When update is finished, NOPs are deleted. */ |
| |
| /* A vinsn that is used to represent a nop. This vinsn is shared among all |
| nops sel-sched generates. */ |
| static vinsn_t nop_vinsn = NULL; |
| |
| /* Emit a nop before INSN, taking it from pool. */ |
| insn_t |
| get_nop_from_pool (insn_t insn) |
| { |
| rtx nop_pat; |
| insn_t nop; |
| bool old_p = nop_pool.n != 0; |
| int flags; |
| |
| if (old_p) |
| nop_pat = nop_pool.v[--nop_pool.n]; |
| else |
| nop_pat = nop_pattern; |
| |
| nop = emit_insn_before (nop_pat, insn); |
| |
| if (old_p) |
| flags = INSN_INIT_TODO_SSID; |
| else |
| flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; |
| |
| set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); |
| sel_init_new_insn (nop, flags); |
| |
| return nop; |
| } |
| |
| /* Remove NOP from the instruction stream and return it to the pool. */ |
| void |
| return_nop_to_pool (insn_t nop, bool full_tidying) |
| { |
| gcc_assert (INSN_IN_STREAM_P (nop)); |
| sel_remove_insn (nop, false, full_tidying); |
| |
| /* We'll recycle this nop. */ |
| nop->set_undeleted (); |
| |
| if (nop_pool.n == nop_pool.s) |
| nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v, |
| (nop_pool.s = 2 * nop_pool.s + 1)); |
| nop_pool.v[nop_pool.n++] = nop; |
| } |
| |
| /* Free the nop pool. */ |
| void |
| free_nop_pool (void) |
| { |
| nop_pool.n = 0; |
| nop_pool.s = 0; |
| free (nop_pool.v); |
| nop_pool.v = NULL; |
| } |
| |
| |
| /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
| The callback is given two rtxes XX and YY and writes the new rtxes |
| to NX and NY in case some needs to be skipped. */ |
| static int |
| skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) |
| { |
| const_rtx x = *xx; |
| const_rtx y = *yy; |
| |
| if (GET_CODE (x) == UNSPEC |
| && (targetm.sched.skip_rtx_p == NULL |
| || targetm.sched.skip_rtx_p (x))) |
| { |
| *nx = XVECEXP (x, 0, 0); |
| *ny = CONST_CAST_RTX (y); |
| return 1; |
| } |
| |
| if (GET_CODE (y) == UNSPEC |
| && (targetm.sched.skip_rtx_p == NULL |
| || targetm.sched.skip_rtx_p (y))) |
| { |
| *nx = CONST_CAST_RTX (x); |
| *ny = XVECEXP (y, 0, 0); |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
| to support ia64 speculation. When changes are needed, new rtx X and new mode |
| NMODE are written, and the callback returns true. */ |
| static int |
| hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED, |
| rtx *nx, machine_mode* nmode) |
| { |
| if (GET_CODE (x) == UNSPEC |
| && targetm.sched.skip_rtx_p |
| && targetm.sched.skip_rtx_p (x)) |
| { |
| *nx = XVECEXP (x, 0 ,0); |
| *nmode = VOIDmode; |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Returns LHS and RHS are ok to be scheduled separately. */ |
| static bool |
| lhs_and_rhs_separable_p (rtx lhs, rtx rhs) |
| { |
| if (lhs == NULL || rhs == NULL) |
| return false; |
| |
| /* Do not schedule constants as rhs: no point to use reg, if const |
| can be used. Moreover, scheduling const as rhs may lead to mode |
| mismatch cause consts don't have modes but they could be merged |
| from branches where the same const used in different modes. */ |
| if (CONSTANT_P (rhs)) |
| return false; |
| |
| /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ |
| if (COMPARISON_P (rhs)) |
| return false; |
| |
| /* Do not allow single REG to be an rhs. */ |
| if (REG_P (rhs)) |
| return false; |
| |
| /* See comment at find_used_regs_1 (*1) for explanation of this |
| restriction. */ |
| /* FIXME: remove this later. */ |
| if (MEM_P (lhs)) |
| return false; |
| |
| /* This will filter all tricky things like ZERO_EXTRACT etc. |
| For now we don't handle it. */ |
| if (!REG_P (lhs) && !MEM_P (lhs)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
| FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is |
| used e.g. for insns from recovery blocks. */ |
| static void |
| vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) |
| { |
| hash_rtx_callback_function hrcf; |
| int insn_class; |
| |
| VINSN_INSN_RTX (vi) = insn; |
| VINSN_COUNT (vi) = 0; |
| vi->cost = -1; |
| |
| if (INSN_NOP_P (insn)) |
| return; |
| |
| if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
| init_id_from_df (VINSN_ID (vi), insn, force_unique_p); |
| else |
| deps_init_id (VINSN_ID (vi), insn, force_unique_p); |
| |
| /* Hash vinsn depending on whether it is separable or not. */ |
| hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; |
| if (VINSN_SEPARABLE_P (vi)) |
| { |
| rtx rhs = VINSN_RHS (vi); |
| |
| VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), |
| NULL, NULL, false, hrcf); |
| VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), |
| VOIDmode, NULL, NULL, |
| false, hrcf); |
| } |
| else |
| { |
| VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, |
| NULL, NULL, false, hrcf); |
| VINSN_HASH_RTX (vi) = VINSN_HASH (vi); |
| } |
| |
| insn_class = haifa_classify_insn (insn); |
| if (insn_class >= 2 |
| && (!targetm.sched.get_insn_spec_ds |
| || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) |
| == 0))) |
| VINSN_MAY_TRAP_P (vi) = true; |
| else |
| VINSN_MAY_TRAP_P (vi) = false; |
| } |
| |
| /* Indicate that VI has become the part of an rtx object. */ |
| void |
| vinsn_attach (vinsn_t vi) |
| { |
| /* Assert that VI is not pending for deletion. */ |
| gcc_assert (VINSN_INSN_RTX (vi)); |
| |
| VINSN_COUNT (vi)++; |
| } |
| |
| /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
| VINSN_TYPE (VI). */ |
| static vinsn_t |
| vinsn_create (insn_t insn, bool force_unique_p) |
| { |
| vinsn_t vi = XCNEW (struct vinsn_def); |
| |
| vinsn_init (vi, insn, force_unique_p); |
| return vi; |
| } |
| |
| /* Return a copy of VI. When REATTACH_P is true, detach VI and attach |
| the copy. */ |
| vinsn_t |
| vinsn_copy (vinsn_t vi, bool reattach_p) |
| { |
| rtx_insn *copy; |
| bool unique = VINSN_UNIQUE_P (vi); |
| vinsn_t new_vi; |
| |
| copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
| new_vi = create_vinsn_from_insn_rtx (copy, unique); |
| if (reattach_p) |
| { |
| vinsn_detach (vi); |
| vinsn_attach (new_vi); |
| } |
| |
| return new_vi; |
| } |
| |
| /* Delete the VI vinsn and free its data. */ |
| static void |
| vinsn_delete (vinsn_t vi) |
| { |
| gcc_assert (VINSN_COUNT (vi) == 0); |
| |
| if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) |
| { |
| return_regset_to_pool (VINSN_REG_SETS (vi)); |
| return_regset_to_pool (VINSN_REG_USES (vi)); |
| return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); |
| } |
| |
| free (vi); |
| } |
| |
| /* Indicate that VI is no longer a part of some rtx object. |
| Remove VI if it is no longer needed. */ |
| void |
| vinsn_detach (vinsn_t vi) |
| { |
| gcc_assert (VINSN_COUNT (vi) > 0); |
| |
| if (--VINSN_COUNT (vi) == 0) |
| vinsn_delete (vi); |
| } |
| |
| /* Returns TRUE if VI is a branch. */ |
| bool |
| vinsn_cond_branch_p (vinsn_t vi) |
| { |
| insn_t insn; |
| |
| if (!VINSN_UNIQUE_P (vi)) |
| return false; |
| |
| insn = VINSN_INSN_RTX (vi); |
| if (BB_END (BLOCK_FOR_INSN (insn)) != insn) |
| return false; |
| |
| return control_flow_insn_p (insn); |
| } |
| |
| /* Return latency of INSN. */ |
| static int |
| sel_insn_rtx_cost (rtx_insn *insn) |
| { |
| int cost; |
| |
| /* A USE insn, or something else we don't need to |
| understand. We can't pass these directly to |
| result_ready_cost or insn_default_latency because it will |
| trigger a fatal error for unrecognizable insns. */ |
| if (recog_memoized (insn) < 0) |
| cost = 0; |
| else |
| { |
| cost = insn_default_latency (insn); |
| |
| if (cost < 0) |
| cost = 0; |
| } |
| |
| return cost; |
| } |
| |
| /* Return the cost of the VI. |
| !!! FIXME: Unify with haifa-sched.cc: insn_sched_cost (). */ |
| int |
| sel_vinsn_cost (vinsn_t vi) |
| { |
| int cost = vi->cost; |
| |
| if (cost < 0) |
| { |
| cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); |
| vi->cost = cost; |
| } |
| |
| return cost; |
| } |
| |
| |
| /* Functions for insn emitting. */ |
| |
| /* Emit new insn after AFTER based on PATTERN and initialize its data from |
| EXPR and SEQNO. */ |
| insn_t |
| sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) |
| { |
| insn_t new_insn; |
| |
| gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); |
| |
| new_insn = emit_insn_after (pattern, after); |
| set_insn_init (expr, NULL, seqno); |
| sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); |
| |
| return new_insn; |
| } |
| |
| /* Force newly generated vinsns to be unique. */ |
| static bool init_insn_force_unique_p = false; |
| |
| /* Emit new speculation recovery insn after AFTER based on PATTERN and |
| initialize its data from EXPR and SEQNO. */ |
| insn_t |
| sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, |
| insn_t after) |
| { |
| insn_t insn; |
| |
| gcc_assert (!init_insn_force_unique_p); |
| |
| init_insn_force_unique_p = true; |
| insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); |
| CANT_MOVE (insn) = 1; |
| init_insn_force_unique_p = false; |
| |
| return insn; |
| } |
| |
| /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, |
| take it as a new vinsn instead of EXPR's vinsn. |
| We simplify insns later, after scheduling region in |
| simplify_changed_insns. */ |
| insn_t |
| sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
| insn_t after) |
| { |
| expr_t emit_expr; |
| insn_t insn; |
| int flags; |
| |
| emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), |
| seqno); |
| insn = EXPR_INSN_RTX (emit_expr); |
| |
| /* The insn may come from the transformation cache, which may hold already |
| deleted insns, so mark it as not deleted. */ |
| insn->set_undeleted (); |
| |
| add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
| |
| flags = INSN_INIT_TODO_SSID; |
| if (INSN_LUID (insn) == 0) |
| flags |= INSN_INIT_TODO_LUID; |
| sel_init_new_insn (insn, flags); |
| |
| return insn; |
| } |
| |
| /* Move insn from EXPR after AFTER. */ |
| insn_t |
| sel_move_insn (expr_t expr, int seqno, insn_t after) |
| { |
| insn_t insn = EXPR_INSN_RTX (expr); |
| basic_block bb = BLOCK_FOR_INSN (after); |
| insn_t next = NEXT_INSN (after); |
| |
| /* Assert that in move_op we disconnected this insn properly. */ |
| gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); |
| SET_PREV_INSN (insn) = after; |
| SET_NEXT_INSN (insn) = next; |
| |
| SET_NEXT_INSN (after) = insn; |
| SET_PREV_INSN (next) = insn; |
| |
| /* Update links from insn to bb and vice versa. */ |
| df_insn_change_bb (insn, bb); |
| if (BB_END (bb) == after) |
| BB_END (bb) = insn; |
| |
| prepare_insn_expr (insn, seqno); |
| return insn; |
| } |
| |
| |
| /* Functions to work with right-hand sides. */ |
| |
| /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
| VECT and return true when found. Use NEW_VINSN for comparison only when |
| COMPARE_VINSNS is true. Write to INDP the index on which |
| the search has stopped, such that inserting the new element at INDP will |
| retain VECT's sort order. */ |
| static bool |
| find_in_history_vect_1 (vec<expr_history_def> vect, |
| unsigned uid, vinsn_t new_vinsn, |
| bool compare_vinsns, int *indp) |
| { |
| expr_history_def *arr; |
| int i, j, len = vect.length (); |
| |
| if (len == 0) |
| { |
| *indp = 0; |
| return false; |
| } |
| |
| arr = vect.address (); |
| i = 0, j = len - 1; |
| |
| while (i <= j) |
| { |
| unsigned auid = arr[i].uid; |
| vinsn_t avinsn = arr[i].new_expr_vinsn; |
| |
| if (auid == uid |
| /* When undoing transformation on a bookkeeping copy, the new vinsn |
| may not be exactly equal to the one that is saved in the vector. |
| This is because the insn whose copy we're checking was possibly |
| substituted itself. */ |
| && (! compare_vinsns |
| || vinsn_equal_p (avinsn, new_vinsn))) |
| { |
| *indp = i; |
| return true; |
| } |
| else if (auid > uid) |
| break; |
| i++; |
| } |
| |
| *indp = i; |
| return false; |
| } |
| |
| /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
| the position found or -1, if no such value is in vector. |
| Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
| int |
| find_in_history_vect (vec<expr_history_def> vect, rtx insn, |
| vinsn_t new_vinsn, bool originators_p) |
| { |
| int ind; |
| |
| if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
| false, &ind)) |
| return ind; |
| |
| if (INSN_ORIGINATORS (insn) && originators_p) |
| { |
| unsigned uid; |
| bitmap_iterator bi; |
| |
| EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) |
| if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) |
| return ind; |
| } |
| |
| return -1; |
| } |
| |
| /* Insert new element in a sorted history vector pointed to by PVECT, |
| if it is not there already. The element is searched using |
| UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
| the history of a transformation. */ |
| void |
| insert_in_history_vect (vec<expr_history_def> *pvect, |
| unsigned uid, enum local_trans_type type, |
| vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
| ds_t spec_ds) |
| { |
| vec<expr_history_def> vect = *pvect; |
| expr_history_def temp; |
| bool res; |
| int ind; |
| |
| res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); |
| |
| if (res) |
| { |
| expr_history_def *phist = &vect[ind]; |
| |
| /* It is possible that speculation types of expressions that were |
| propagated through different paths will be different here. In this |
| case, merge the status to get the correct check later. */ |
| if (phist->spec_ds != spec_ds) |
| phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); |
| return; |
| } |
| |
| temp.uid = uid; |
| temp.old_expr_vinsn = old_expr_vinsn; |
| temp.new_expr_vinsn = new_expr_vinsn; |
| temp.spec_ds = spec_ds; |
| temp.type = type; |
| |
| vinsn_attach (old_expr_vinsn); |
| vinsn_attach (new_expr_vinsn); |
| vect.safe_insert (ind, temp); |
| *pvect = vect; |
| } |
| |
| /* Free history vector PVECT. */ |
| static void |
| free_history_vect (vec<expr_history_def> &pvect) |
| { |
| unsigned i; |
| expr_history_def *phist; |
| |
| if (! pvect.exists ()) |
| return; |
| |
| for (i = 0; pvect.iterate (i, &phist); i++) |
| { |
| vinsn_detach (phist->old_expr_vinsn); |
| vinsn_detach (phist->new_expr_vinsn); |
| } |
| |
| pvect.release (); |
| } |
| |
| /* Merge vector FROM to PVECT. */ |
| static void |
| merge_history_vect (vec<expr_history_def> *pvect, |
| vec<expr_history_def> from) |
| { |
| expr_history_def *phist; |
| int i; |
| |
| /* We keep this vector sorted. */ |
| for (i = 0; from.iterate (i, &phist); i++) |
| insert_in_history_vect (pvect, phist->uid, phist->type, |
| phist->old_expr_vinsn, phist->new_expr_vinsn, |
| phist->spec_ds); |
| } |
| |
| /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ |
| bool |
| vinsn_equal_p (vinsn_t x, vinsn_t y) |
| { |
| rtx_equal_p_callback_function repcf; |
| |
| if (x == y) |
| return true; |
| |
| if (VINSN_TYPE (x) != VINSN_TYPE (y)) |
| return false; |
| |
| if (VINSN_HASH (x) != VINSN_HASH (y)) |
| return false; |
| |
| repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; |
| if (VINSN_SEPARABLE_P (x)) |
| { |
| /* Compare RHSes of VINSNs. */ |
| gcc_assert (VINSN_RHS (x)); |
| gcc_assert (VINSN_RHS (y)); |
| |
| return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); |
| } |
| |
| return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); |
| } |
| |
| |
| /* Functions for working with expressions. */ |
| |
| /* Initialize EXPR. */ |
| static void |
| init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, |
| int sched_times, int orig_bb_index, ds_t spec_done_ds, |
| ds_t spec_to_check_ds, int orig_sched_cycle, |
| vec<expr_history_def> history, |
| signed char target_available, |
| bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
| bool cant_move) |
| { |
| vinsn_attach (vi); |
| |
| EXPR_VINSN (expr) = vi; |
| EXPR_SPEC (expr) = spec; |
| EXPR_USEFULNESS (expr) = use; |
| EXPR_PRIORITY (expr) = priority; |
| EXPR_PRIORITY_ADJ (expr) = 0; |
| EXPR_SCHED_TIMES (expr) = sched_times; |
| EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; |
| EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; |
| EXPR_SPEC_DONE_DS (expr) = spec_done_ds; |
| EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; |
| |
| if (history.exists ()) |
| EXPR_HISTORY_OF_CHANGES (expr) = history; |
| else |
| EXPR_HISTORY_OF_CHANGES (expr).create (0); |
| |
| EXPR_TARGET_AVAILABLE (expr) = target_available; |
| EXPR_WAS_SUBSTITUTED (expr) = was_substituted; |
| EXPR_WAS_RENAMED (expr) = was_renamed; |
| EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; |
| EXPR_CANT_MOVE (expr) = cant_move; |
| } |
| |
| /* Make a copy of the expr FROM into the expr TO. */ |
| void |
| copy_expr (expr_t to, expr_t from) |
| { |
| vec<expr_history_def> temp = vNULL; |
| |
| if (EXPR_HISTORY_OF_CHANGES (from).exists ()) |
| { |
| unsigned i; |
| expr_history_def *phist; |
| |
| temp = EXPR_HISTORY_OF_CHANGES (from).copy (); |
| for (i = 0; |
| temp.iterate (i, &phist); |
| i++) |
| { |
| vinsn_attach (phist->old_expr_vinsn); |
| vinsn_attach (phist->new_expr_vinsn); |
| } |
| } |
| |
| init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
| EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
| EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), |
| EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
| EXPR_ORIG_SCHED_CYCLE (from), temp, |
| EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
| EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
| EXPR_CANT_MOVE (from)); |
| } |
| |
| /* Same, but the final expr will not ever be in av sets, so don't copy |
| "uninteresting" data such as bitmap cache. */ |
| void |
| copy_expr_onside (expr_t to, expr_t from) |
| { |
| init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), |
| EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, |
| EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, |
| vNULL, |
| EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
| EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
| EXPR_CANT_MOVE (from)); |
| } |
| |
| /* Prepare the expr of INSN for scheduling. Used when moving insn and when |
| initializing new insns. */ |
| static void |
| prepare_insn_expr (insn_t insn, int seqno) |
| { |
| expr_t expr = INSN_EXPR (insn); |
| ds_t ds; |
| |
| INSN_SEQNO (insn) = seqno; |
| EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); |
| EXPR_SPEC (expr) = 0; |
| EXPR_ORIG_SCHED_CYCLE (expr) = 0; |
| EXPR_WAS_SUBSTITUTED (expr) = 0; |
| EXPR_WAS_RENAMED (expr) = 0; |
| EXPR_TARGET_AVAILABLE (expr) = 1; |
| INSN_LIVE_VALID_P (insn) = false; |
| |
| /* ??? If this expression is speculative, make its dependence |
| as weak as possible. We can filter this expression later |
| in process_spec_exprs, because we do not distinguish |
| between the status we got during compute_av_set and the |
| existing status. To be fixed. */ |
| ds = EXPR_SPEC_DONE_DS (expr); |
| if (ds) |
| EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); |
| |
| free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
| } |
| |
| /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT |
| is non-null when expressions are merged from different successors at |
| a split point. */ |
| static void |
| update_target_availability (expr_t to, expr_t from, insn_t split_point) |
| { |
| if (EXPR_TARGET_AVAILABLE (to) < 0 |
| || EXPR_TARGET_AVAILABLE (from) < 0) |
| EXPR_TARGET_AVAILABLE (to) = -1; |
| else |
| { |
| /* We try to detect the case when one of the expressions |
| can only be reached through another one. In this case, |
| we can do better. */ |
| if (split_point == NULL) |
| { |
| int toind, fromind; |
| |
| toind = EXPR_ORIG_BB_INDEX (to); |
| fromind = EXPR_ORIG_BB_INDEX (from); |
| |
| if (toind && toind == fromind) |
| /* Do nothing -- everything is done in |
| merge_with_other_exprs. */ |
| ; |
| else |
| EXPR_TARGET_AVAILABLE (to) = -1; |
| } |
| else if (EXPR_TARGET_AVAILABLE (from) == 0 |
| && EXPR_LHS (from) |
| && REG_P (EXPR_LHS (from)) |
| && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) |
| EXPR_TARGET_AVAILABLE (to) = -1; |
| else |
| EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); |
| } |
| } |
| |
| /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT |
| is non-null when expressions are merged from different successors at |
| a split point. */ |
| static void |
| update_speculative_bits (expr_t to, expr_t from, insn_t split_point) |
| { |
| ds_t old_to_ds, old_from_ds; |
| |
| old_to_ds = EXPR_SPEC_DONE_DS (to); |
| old_from_ds = EXPR_SPEC_DONE_DS (from); |
| |
| EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
| EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); |
| EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); |
| |
| /* When merging e.g. control & data speculative exprs, or a control |
| speculative with a control&data speculative one, we really have |
| to change vinsn too. Also, when speculative status is changed, |
| we also need to record this as a transformation in expr's history. */ |
| if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) |
| { |
| old_to_ds = ds_get_speculation_types (old_to_ds); |
| old_from_ds = ds_get_speculation_types (old_from_ds); |
| |
| if (old_to_ds != old_from_ds) |
| { |
| ds_t record_ds; |
| |
| /* When both expressions are speculative, we need to change |
| the vinsn first. */ |
| if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) |
| { |
| int res; |
| |
| res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
| gcc_assert (res >= 0); |
| } |
| |
| if (split_point != NULL) |
| { |
| /* Record the change with proper status. */ |
| record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; |
| record_ds &= ~(old_to_ds & SPECULATIVE); |
| record_ds &= ~(old_from_ds & SPECULATIVE); |
| |
| insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
| INSN_UID (split_point), TRANS_SPECULATION, |
| EXPR_VINSN (from), EXPR_VINSN (to), |
| record_ds); |
| } |
| } |
| } |
| } |
| |
| |
| /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, |
| this is done along different paths. */ |
| void |
| merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
| { |
| /* Choose the maximum of the specs of merged exprs. This is required |
| for correctness of bookkeeping. */ |
| if (EXPR_SPEC (to) < EXPR_SPEC (from)) |
| EXPR_SPEC (to) = EXPR_SPEC (from); |
| |
| if (split_point) |
| EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); |
| else |
| EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
| EXPR_USEFULNESS (from)); |
| |
| if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) |
| EXPR_PRIORITY (to) = EXPR_PRIORITY (from); |
| |
| /* We merge sched-times half-way to the larger value to avoid the endless |
| pipelining of unneeded insns. The average seems to be good compromise |
| between pipelining opportunities and avoiding extra work. */ |
| if (EXPR_SCHED_TIMES (to) != EXPR_SCHED_TIMES (from)) |
| EXPR_SCHED_TIMES (to) = ((EXPR_SCHED_TIMES (from) + EXPR_SCHED_TIMES (to) |
| + 1) / 2); |
| |
| if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) |
| EXPR_ORIG_BB_INDEX (to) = 0; |
| |
| EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
| EXPR_ORIG_SCHED_CYCLE (from)); |
| |
| EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
| EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); |
| EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); |
| |
| merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
| EXPR_HISTORY_OF_CHANGES (from)); |
| update_target_availability (to, from, split_point); |
| update_speculative_bits (to, from, split_point); |
| } |
| |
| /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal |
| in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
| are merged from different successors at a split point. */ |
| void |
| merge_expr (expr_t to, expr_t from, insn_t split_point) |
| { |
| vinsn_t to_vi = EXPR_VINSN (to); |
| vinsn_t from_vi = EXPR_VINSN (from); |
| |
| gcc_assert (vinsn_equal_p (to_vi, from_vi)); |
| |
| /* Make sure that speculative pattern is propagated into exprs that |
| have non-speculative one. This will provide us with consistent |
| speculative bits and speculative patterns inside expr. */ |
| if (EXPR_SPEC_DONE_DS (to) == 0 |
| && (EXPR_SPEC_DONE_DS (from) != 0 |
| /* Do likewise for volatile insns, so that we always retain |
| the may_trap_p bit on the resulting expression. However, |
| avoid propagating the trapping bit into the instructions |
| already speculated. This would result in replacing the |
| speculative pattern with the non-speculative one and breaking |
| the speculation support. */ |
| || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to)) |
| && VINSN_MAY_TRAP_P (EXPR_VINSN (from))))) |
| change_vinsn_in_expr (to, EXPR_VINSN (from)); |
| |
| merge_expr_data (to, from, split_point); |
| gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); |
| } |
| |
| /* Clear the information of this EXPR. */ |
| void |
| clear_expr (expr_t expr) |
| { |
| |
| vinsn_detach (EXPR_VINSN (expr)); |
| EXPR_VINSN (expr) = NULL; |
| |
| free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
| } |
| |
| /* For a given LV_SET, mark EXPR having unavailable target register. */ |
| static void |
| set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
| { |
| if (EXPR_SEPARABLE_P (expr)) |
| { |
| if (REG_P (EXPR_LHS (expr)) |
| && register_unavailable_p (lv_set, EXPR_LHS (expr))) |
| { |
| /* If it's an insn like r1 = use (r1, ...), and it exists in |
| different forms in each of the av_sets being merged, we can't say |
| whether original destination register is available or not. |
| However, this still works if destination register is not used |
| in the original expression: if the branch at which LV_SET we're |
| looking here is not actually 'other branch' in sense that same |
| expression is available through it (but it can't be determined |
| at computation stage because of transformations on one of the |
| branches), it still won't affect the availability. |
| Liveness of a register somewhere on a code motion path means |
| it's either read somewhere on a codemotion path, live on |
| 'other' branch, live at the point immediately following |
| the original operation, or is read by the original operation. |
| The latter case is filtered out in the condition below. |
| It still doesn't cover the case when register is defined and used |
| somewhere within the code motion path, and in this case we could |
| miss a unifying code motion along both branches using a renamed |
| register, but it won't affect a code correctness since upon |
| an actual code motion a bookkeeping code would be generated. */ |
| if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
| EXPR_LHS (expr))) |
| EXPR_TARGET_AVAILABLE (expr) = -1; |
| else |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| } |
| } |
| else |
| { |
| unsigned regno; |
| reg_set_iterator rsi; |
| |
| EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), |
| 0, regno, rsi) |
| if (bitmap_bit_p (lv_set, regno)) |
| { |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| break; |
| } |
| |
| EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), |
| 0, regno, rsi) |
| if (bitmap_bit_p (lv_set, regno)) |
| { |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| break; |
| } |
| } |
| } |
| |
| /* Try to make EXPR speculative. Return 1 when EXPR's pattern |
| or dependence status have changed, 2 when also the target register |
| became unavailable, 0 if nothing had to be changed. */ |
| int |
| speculate_expr (expr_t expr, ds_t ds) |
| { |
| int res; |
| rtx_insn *orig_insn_rtx; |
| rtx spec_pat; |
| ds_t target_ds, current_ds; |
| |
| /* Obtain the status we need to put on EXPR. */ |
| target_ds = (ds & SPECULATIVE); |
| current_ds = EXPR_SPEC_DONE_DS (expr); |
| ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); |
| |
| orig_insn_rtx = EXPR_INSN_RTX (expr); |
| |
| res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); |
| |
| switch (res) |
| { |
| case 0: |
| EXPR_SPEC_DONE_DS (expr) = ds; |
| return current_ds != ds ? 1 : 0; |
| |
| case 1: |
| { |
| rtx_insn *spec_insn_rtx = |
| create_insn_rtx_from_pattern (spec_pat, NULL_RTX); |
| vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); |
| |
| change_vinsn_in_expr (expr, spec_vinsn); |
| EXPR_SPEC_DONE_DS (expr) = ds; |
| EXPR_NEEDS_SPEC_CHECK_P (expr) = true; |
| |
| /* Do not allow clobbering the address register of speculative |
| insns. */ |
| if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
| expr_dest_reg (expr))) |
| { |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| return 2; |
| } |
| |
| return 1; |
| } |
| |
| case -1: |
| return -1; |
| |
| default: |
| gcc_unreachable (); |
| return -1; |
| } |
| } |
| |
| /* Return a destination register, if any, of EXPR. */ |
| rtx |
| expr_dest_reg (expr_t expr) |
| { |
| rtx dest = VINSN_LHS (EXPR_VINSN (expr)); |
| |
| if (dest != NULL_RTX && REG_P (dest)) |
| return dest; |
| |
| return NULL_RTX; |
| } |
| |
| /* Returns the REGNO of the R's destination. */ |
| unsigned |
| expr_dest_regno (expr_t expr) |
| { |
| rtx dest = expr_dest_reg (expr); |
| |
| gcc_assert (dest != NULL_RTX); |
| return REGNO (dest); |
| } |
| |
| /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
| AV_SET having unavailable target register. */ |
| void |
| mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) |
| { |
| expr_t expr; |
| av_set_iterator avi; |
| |
| FOR_EACH_EXPR (expr, avi, join_set) |
| if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) |
| set_unavailable_target_for_expr (expr, lv_set); |
| } |
| |
| |
| /* Returns true if REG (at least partially) is present in REGS. */ |
| bool |
| register_unavailable_p (regset regs, rtx reg) |
| { |
| unsigned regno, end_regno; |
| |
| regno = REGNO (reg); |
| if (bitmap_bit_p (regs, regno)) |
| return true; |
| |
| end_regno = END_REGNO (reg); |
| |
| while (++regno < end_regno) |
| if (bitmap_bit_p (regs, regno)) |
| return true; |
| |
| return false; |
| } |
| |
| /* Av set functions. */ |
| |
| /* Add a new element to av set SETP. |
| Return the element added. */ |
| static av_set_t |
| av_set_add_element (av_set_t *setp) |
| { |
| /* Insert at the beginning of the list. */ |
| _list_add (setp); |
| return *setp; |
| } |
| |
| /* Add EXPR to SETP. */ |
| void |
| av_set_add (av_set_t *setp, expr_t expr) |
| { |
| av_set_t elem; |
| |
| gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
| elem = av_set_add_element (setp); |
| copy_expr (_AV_SET_EXPR (elem), expr); |
| } |
| |
| /* Same, but do not copy EXPR. */ |
| static void |
| av_set_add_nocopy (av_set_t *setp, expr_t expr) |
| { |
| av_set_t elem; |
| |
| elem = av_set_add_element (setp); |
| *_AV_SET_EXPR (elem) = *expr; |
| } |
| |
| /* Remove expr pointed to by IP from the av_set. */ |
| void |
| av_set_iter_remove (av_set_iterator *ip) |
| { |
| clear_expr (_AV_SET_EXPR (*ip->lp)); |
| _list_iter_remove (ip); |
| } |
| |
| /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the |
| sense of vinsn_equal_p function. Return NULL if no such expr is |
| in SET was found. */ |
| expr_t |
| av_set_lookup (av_set_t set, vinsn_t sought_vinsn) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| |
| FOR_EACH_EXPR (expr, i, set) |
| if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) |
| return expr; |
| return NULL; |
| } |
| |
| /* Same, but also remove the EXPR found. */ |
| static expr_t |
| av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| |
| FOR_EACH_EXPR_1 (expr, i, setp) |
| if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) |
| { |
| _list_iter_remove_nofree (&i); |
| return expr; |
| } |
| return NULL; |
| } |
| |
| /* Search for an expr in SET, such that it's equivalent to EXPR in the |
| sense of vinsn_equal_p function of their vinsns, but not EXPR itself. |
| Returns NULL if no such expr is in SET was found. */ |
| static expr_t |
| av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) |
| { |
| expr_t cur_expr; |
| av_set_iterator i; |
| |
| FOR_EACH_EXPR (cur_expr, i, set) |
| { |
| if (cur_expr == expr) |
| continue; |
| if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) |
| return cur_expr; |
| } |
| |
| return NULL; |
| } |
| |
| /* If other expression is already in AVP, remove one of them. */ |
| expr_t |
| merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) |
| { |
| expr_t expr2; |
| |
| expr2 = av_set_lookup_other_equiv_expr (*avp, expr); |
| if (expr2 != NULL) |
| { |
| /* Reset target availability on merge, since taking it only from one |
| of the exprs would be controversial for different code. */ |
| EXPR_TARGET_AVAILABLE (expr2) = -1; |
| EXPR_USEFULNESS (expr2) = 0; |
| |
| merge_expr (expr2, expr, NULL); |
| |
| /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
| EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; |
| |
| av_set_iter_remove (ip); |
| return expr2; |
| } |
| |
| return expr; |
| } |
| |
| /* Return true if there is an expr that correlates to VI in SET. */ |
| bool |
| av_set_is_in_p (av_set_t set, vinsn_t vi) |
| { |
| return av_set_lookup (set, vi) != NULL; |
| } |
| |
| /* Return a copy of SET. */ |
| av_set_t |
| av_set_copy (av_set_t set) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| av_set_t res = NULL; |
| |
| FOR_EACH_EXPR (expr, i, set) |
| av_set_add (&res, expr); |
| |
| return res; |
| } |
| |
| /* Join two av sets that do not have common elements by attaching second set |
| (pointed to by FROMP) to the end of first set (TO_TAILP must point to |
| _AV_SET_NEXT of first set's last element). */ |
| static void |
| join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) |
| { |
| gcc_assert (*to_tailp == NULL); |
| *to_tailp = *fromp; |
| *fromp = NULL; |
| } |
| |
| /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set |
| pointed to by FROMP afterwards. */ |
| void |
| av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) |
| { |
| expr_t expr1; |
| av_set_iterator i; |
| |
| /* Delete from TOP all exprs, that present in FROMP. */ |
| FOR_EACH_EXPR_1 (expr1, i, top) |
| { |
| expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); |
| |
| if (expr2) |
| { |
| merge_expr (expr2, expr1, insn); |
| av_set_iter_remove (&i); |
| } |
| } |
| |
| join_distinct_sets (i.lp, fromp); |
| } |
| |
| /* Same as above, but also update availability of target register in |
| TOP judging by TO_LV_SET and FROM_LV_SET. */ |
| void |
| av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, |
| regset from_lv_set, insn_t insn) |
| { |
| expr_t expr1; |
| av_set_iterator i; |
| av_set_t *to_tailp, in_both_set = NULL; |
| |
| /* Delete from TOP all expres, that present in FROMP. */ |
| FOR_EACH_EXPR_1 (expr1, i, top) |
| { |
| expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); |
| |
| if (expr2) |
| { |
| /* It may be that the expressions have different destination |
| registers, in which case we need to check liveness here. */ |
| if (EXPR_SEPARABLE_P (expr1)) |
| { |
| int regno1 = (REG_P (EXPR_LHS (expr1)) |
| ? (int) expr_dest_regno (expr1) : -1); |
| int regno2 = (REG_P (EXPR_LHS (expr2)) |
| ? (int) expr_dest_regno (expr2) : -1); |
| |
| /* ??? We don't have a way to check restrictions for |
| *other* register on the current path, we did it only |
| for the current target register. Give up. */ |
| if (regno1 != regno2) |
| EXPR_TARGET_AVAILABLE (expr2) = -1; |
| } |
| else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) |
| EXPR_TARGET_AVAILABLE (expr2) = -1; |
| |
| merge_expr (expr2, expr1, insn); |
| av_set_add_nocopy (&in_both_set, expr2); |
| av_set_iter_remove (&i); |
| } |
| else |
| /* EXPR1 is present in TOP, but not in FROMP. Check it on |
| FROM_LV_SET. */ |
| set_unavailable_target_for_expr (expr1, from_lv_set); |
| } |
| to_tailp = i.lp; |
| |
| /* These expressions are not present in TOP. Check liveness |
| restrictions on TO_LV_SET. */ |
| FOR_EACH_EXPR (expr1, i, *fromp) |
| set_unavailable_target_for_expr (expr1, to_lv_set); |
| |
| join_distinct_sets (i.lp, &in_both_set); |
| join_distinct_sets (to_tailp, fromp); |
| } |
| |
| /* Clear av_set pointed to by SETP. */ |
| void |
| av_set_clear (av_set_t *setp) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| |
| FOR_EACH_EXPR_1 (expr, i, setp) |
| av_set_iter_remove (&i); |
| |
| gcc_assert (*setp == NULL); |
| } |
| |
| /* Leave only one non-speculative element in the SETP. */ |
| void |
| av_set_leave_one_nonspec (av_set_t *setp) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| bool has_one_nonspec = false; |
| |
| /* Keep all speculative exprs, and leave one non-speculative |
| (the first one). */ |
| FOR_EACH_EXPR_1 (expr, i, setp) |
| { |
| if (!EXPR_SPEC_DONE_DS (expr)) |
| { |
| if (has_one_nonspec) |
| av_set_iter_remove (&i); |
| else |
| has_one_nonspec = true; |
| } |
| } |
| } |
| |
| /* Return the N'th element of the SET. */ |
| expr_t |
| av_set_element (av_set_t set, int n) |
| { |
| expr_t expr; |
| av_set_iterator i; |
| |
| FOR_EACH_EXPR (expr, i, set) |
| if (n-- == 0) |
| return expr; |
| |
| gcc_unreachable (); |
| return NULL; |
| } |
| |
| /* Deletes all expressions from AVP that are conditional branches (IFs). */ |
| void |
| av_set_substract_cond_branches (av_set_t *avp) |
| { |
| av_set_iterator i; |
| expr_t expr; |
| |
| FOR_EACH_EXPR_1 (expr, i, avp) |
| if (vinsn_cond_branch_p (EXPR_VINSN (expr))) |
| av_set_iter_remove (&i); |
| } |
| |
| /* Multiplies usefulness attribute of each member of av-set *AVP by |
| value PROB / ALL_PROB. */ |
| void |
| av_set_split_usefulness (av_set_t av, int prob, int all_prob) |
| { |
| av_set_iterator i; |
| expr_t expr; |
| |
| FOR_EACH_EXPR (expr, i, av) |
| EXPR_USEFULNESS (expr) = (all_prob |
| ? (EXPR_USEFULNESS (expr) * prob) / all_prob |
| : 0); |
| } |
| |
| /* Leave in AVP only those expressions, which are present in AV, |
| and return it, merging history expressions. */ |
| void |
| av_set_code_motion_filter (av_set_t *avp, av_set_t av) |
| { |
| av_set_iterator i; |
| expr_t expr, expr2; |
| |
| FOR_EACH_EXPR_1 (expr, i, avp) |
| if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) |
| av_set_iter_remove (&i); |
| else |
| /* When updating av sets in bookkeeping blocks, we can add more insns |
| there which will be transformed but the upper av sets will not |
| reflect those transformations. We then fail to undo those |
| when searching for such insns. So merge the history saved |
| in the av set of the block we are processing. */ |
| merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
| EXPR_HISTORY_OF_CHANGES (expr2)); |
| } |
| |
| |
| |
| /* Dependence hooks to initialize insn data. */ |
| |
| /* This is used in hooks callable from dependence analysis when initializing |
| instruction's data. */ |
| static struct |
| { |
| /* Where the dependence was found (lhs/rhs). */ |
| deps_where_t where; |
| |
| /* The actual data object to initialize. */ |
| idata_t id; |
| |
| /* True when the insn should not be made clonable. */ |
| bool force_unique_p; |
| |
| /* True when insn should be treated as of type USE, i.e. never renamed. */ |
| bool force_use_p; |
| } deps_init_id_data; |
| |
| |
| /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
| clonable. */ |
| static void |
| setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) |
| { |
| int type; |
| |
| /* Determine whether INSN could be cloned and return appropriate vinsn type. |
| That clonable insns which can be separated into lhs and rhs have type SET. |
| Other clonable insns have type USE. */ |
| type = GET_CODE (insn); |
| |
| /* Only regular insns could be cloned. */ |
| if (type == INSN && !force_unique_p) |
| type = SET; |
| else if (type == JUMP_INSN && simplejump_p (insn)) |
| type = PC; |
| else if (type == DEBUG_INSN) |
| type = !force_unique_p ? USE : INSN; |
| |
| IDATA_TYPE (id) = type; |
| IDATA_REG_SETS (id) = get_clear_regset_from_pool (); |
| IDATA_REG_USES (id) = get_clear_regset_from_pool (); |
| IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); |
| } |
| |
| /* Start initializing insn data. */ |
| static void |
| deps_init_id_start_insn (insn_t insn) |
| { |
| gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); |
| |
| setup_id_for_insn (deps_init_id_data.id, insn, |
| deps_init_id_data.force_unique_p); |
| deps_init_id_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Start initializing lhs data. */ |
| static void |
| deps_init_id_start_lhs (rtx lhs) |
| { |
| gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
| gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); |
| |
| if (IDATA_TYPE (deps_init_id_data.id) == SET) |
| { |
| IDATA_LHS (deps_init_id_data.id) = lhs; |
| deps_init_id_data.where = DEPS_IN_LHS; |
| } |
| } |
| |
| /* Finish initializing lhs data. */ |
| static void |
| deps_init_id_finish_lhs (void) |
| { |
| deps_init_id_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Note a set of REGNO. */ |
| static void |
| deps_init_id_note_reg_set (int regno) |
| { |
| haifa_note_reg_set (regno); |
| |
| if (deps_init_id_data.where == DEPS_IN_RHS) |
| deps_init_id_data.force_use_p = true; |
| |
| if (IDATA_TYPE (deps_init_id_data.id) != PC) |
| SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); |
| |
| #ifdef STACK_REGS |
| /* Make instructions that set stack registers to be ineligible for |
| renaming to avoid issues with find_used_regs. */ |
| if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
| deps_init_id_data.force_use_p = true; |
| #endif |
| } |
| |
| /* Note a clobber of REGNO. */ |
| static void |
| deps_init_id_note_reg_clobber (int regno) |
| { |
| haifa_note_reg_clobber (regno); |
| |
| if (deps_init_id_data.where == DEPS_IN_RHS) |
| deps_init_id_data.force_use_p = true; |
| |
| if (IDATA_TYPE (deps_init_id_data.id) != PC) |
| SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); |
| } |
| |
| /* Note a use of REGNO. */ |
| static void |
| deps_init_id_note_reg_use (int regno) |
| { |
| haifa_note_reg_use (regno); |
| |
| if (IDATA_TYPE (deps_init_id_data.id) != PC) |
| SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); |
| } |
| |
| /* Start initializing rhs data. */ |
| static void |
| deps_init_id_start_rhs (rtx rhs) |
| { |
| gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
| |
| /* And there was no sel_deps_reset_to_insn (). */ |
| if (IDATA_LHS (deps_init_id_data.id) != NULL) |
| { |
| IDATA_RHS (deps_init_id_data.id) = rhs; |
| deps_init_id_data.where = DEPS_IN_RHS; |
| } |
| } |
| |
| /* Finish initializing rhs data. */ |
| static void |
| deps_init_id_finish_rhs (void) |
| { |
| gcc_assert (deps_init_id_data.where == DEPS_IN_RHS |
| || deps_init_id_data.where == DEPS_IN_INSN); |
| deps_init_id_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Finish initializing insn data. */ |
| static void |
| deps_init_id_finish_insn (void) |
| { |
| gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
| |
| if (IDATA_TYPE (deps_init_id_data.id) == SET) |
| { |
| rtx lhs = IDATA_LHS (deps_init_id_data.id); |
| rtx rhs = IDATA_RHS (deps_init_id_data.id); |
| |
| if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) |
| || deps_init_id_data.force_use_p) |
| { |
| /* This should be a USE, as we don't want to schedule its RHS |
| separately. However, we still want to have them recorded |
| for the purposes of substitution. That's why we don't |
| simply call downgrade_to_use () here. */ |
| gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); |
| gcc_assert (!lhs == !rhs); |
| |
| IDATA_TYPE (deps_init_id_data.id) = USE; |
| } |
| } |
| |
| deps_init_id_data.where = DEPS_IN_NOWHERE; |
| } |
| |
| /* This is dependence info used for initializing insn's data. */ |
| static struct sched_deps_info_def deps_init_id_sched_deps_info; |
| |
| /* This initializes most of the static part of the above structure. */ |
| static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = |
| { |
| NULL, |
| |
| deps_init_id_start_insn, |
| deps_init_id_finish_insn, |
| deps_init_id_start_lhs, |
| deps_init_id_finish_lhs, |
| deps_init_id_start_rhs, |
| deps_init_id_finish_rhs, |
| deps_init_id_note_reg_set, |
| deps_init_id_note_reg_clobber, |
| deps_init_id_note_reg_use, |
| NULL, /* note_mem_dep */ |
| NULL, /* note_dep */ |
| |
| 0, /* use_cselib */ |
| 0, /* use_deps_list */ |
| 0 /* generate_spec_deps */ |
| }; |
| |
| /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, |
| we don't actually need information about lhs and rhs. */ |
| static void |
| setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) |
| { |
| rtx pat = PATTERN (insn); |
| |
| if (NONJUMP_INSN_P (insn) |
| && GET_CODE (pat) == SET |
| && !force_unique_p) |
| { |
| IDATA_RHS (id) = SET_SRC (pat); |
| IDATA_LHS (id) = SET_DEST (pat); |
| } |
| else |
| IDATA_LHS (id) = IDATA_RHS (id) = NULL; |
| } |
| |
| /* Possibly downgrade INSN to USE. */ |
| static void |
| maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
| { |
| bool must_be_use = false; |
| df_ref def; |
| rtx lhs = IDATA_LHS (id); |
| rtx rhs = IDATA_RHS (id); |
| |
| /* We downgrade only SETs. */ |
| if (IDATA_TYPE (id) != SET) |
| return; |
| |
| if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) |
| { |
| IDATA_TYPE (id) = USE; |
| return; |
| } |
| |
| FOR_EACH_INSN_DEF (def, insn) |
| { |
| if (DF_REF_INSN (def) |
| && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) |
| && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) |
| { |
| must_be_use = true; |
| break; |
| } |
| |
| #ifdef STACK_REGS |
| /* Make instructions that set stack registers to be ineligible for |
| renaming to avoid issues with find_used_regs. */ |
| if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) |
| { |
| must_be_use = true; |
| break; |
| } |
| #endif |
| } |
| |
| if (must_be_use) |
| IDATA_TYPE (id) = USE; |
| } |
| |
| /* Setup implicit register clobbers calculated by sched-deps for INSN |
| before reload and save them in ID. */ |
| static void |
| setup_id_implicit_regs (idata_t id, insn_t insn) |
| { |
| if (reload_completed) |
| return; |
| |
| HARD_REG_SET temp; |
| |
| get_implicit_reg_pending_clobbers (&temp, insn); |
| IOR_REG_SET_HRS (IDATA_REG_SETS (id), temp); |
| } |
| |
| /* Setup register sets describing INSN in ID. */ |
| static void |
| setup_id_reg_sets (idata_t id, insn_t insn) |
| { |
| struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
| df_ref def, use; |
| regset tmp = get_clear_regset_from_pool (); |
| |
| FOR_EACH_INSN_INFO_DEF (def, insn_info) |
| { |
| unsigned int regno = DF_REF_REGNO (def); |
| |
| /* Post modifies are treated like clobbers by sched-deps.cc. */ |
| if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER |
| | DF_REF_PRE_POST_MODIFY))) |
| SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); |
| else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) |
| { |
| SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); |
| |
| #ifdef STACK_REGS |
| /* For stack registers, treat writes to them as writes |
| to the first one to be consistent with sched-deps.cc. */ |
| if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
| SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); |
| #endif |
| } |
| /* Mark special refs that generate read/write def pair. */ |
| if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) |
| || regno == STACK_POINTER_REGNUM) |
| bitmap_set_bit (tmp, regno); |
| } |
| |
| FOR_EACH_INSN_INFO_USE (use, insn_info) |
| { |
| unsigned int regno = DF_REF_REGNO (use); |
| |
| /* When these refs are met for the first time, skip them, as |
| these uses are just counterparts of some defs. */ |
| if (bitmap_bit_p (tmp, regno)) |
| bitmap_clear_bit (tmp, regno); |
| else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) |
| { |
| SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); |
| |
| #ifdef STACK_REGS |
| /* For stack registers, treat reads from them as reads from |
| the first one to be consistent with sched-deps.cc. */ |
| if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
| SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); |
| #endif |
| } |
| } |
| |
| /* Also get implicit reg clobbers from sched-deps. */ |
| setup_id_implicit_regs (id, insn); |
| |
| return_regset_to_pool (tmp); |
| } |
| |
| /* Initialize instruction data for INSN in ID using DF's data. */ |
| static void |
| init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) |
| { |
| gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); |
| |
| setup_id_for_insn (id, insn, force_unique_p); |
| setup_id_lhs_rhs (id, insn, force_unique_p); |
| |
| if (INSN_NOP_P (insn)) |
| return; |
| |
| maybe_downgrade_id_to_use (id, insn); |
| setup_id_reg_sets (id, insn); |
| } |
| |
| /* Initialize instruction data for INSN in ID. */ |
| static void |
| deps_init_id (idata_t id, insn_t insn, bool force_unique_p) |
| { |
| class deps_desc _dc, *dc = &_dc; |
| |
| deps_init_id_data.where = DEPS_IN_NOWHERE; |
| deps_init_id_data.id = id; |
| deps_init_id_data.force_unique_p = force_unique_p; |
| deps_init_id_data.force_use_p = false; |
| |
| init_deps (dc, false); |
| memcpy (&deps_init_id_sched_deps_info, |
| &const_deps_init_id_sched_deps_info, |
| sizeof (deps_init_id_sched_deps_info)); |
| if (spec_info != NULL) |
| deps_init_id_sched_deps_info.generate_spec_deps = 1; |
| sched_deps_info = &deps_init_id_sched_deps_info; |
| |
| deps_analyze_insn (dc, insn); |
| /* Implicit reg clobbers received from sched-deps separately. */ |
| setup_id_implicit_regs (id, insn); |
| |
| free_deps (dc); |
| deps_init_id_data.id = NULL; |
| } |
| |
| |
| struct sched_scan_info_def |
| { |
| /* This hook notifies scheduler frontend to extend its internal per basic |
| block data structures. This hook should be called once before a series of |
| calls to bb_init (). */ |
| void (*extend_bb) (void); |
| |
| /* This hook makes scheduler frontend to initialize its internal data |
| structures for the passed basic block. */ |
| void (*init_bb) (basic_block); |
| |
| /* This hook notifies scheduler frontend to extend its internal per insn data |
| structures. This hook should be called once before a series of calls to |
| insn_init (). */ |
| void (*extend_insn) (void); |
| |
| /* This hook makes scheduler frontend to initialize its internal data |
| structures for the passed insn. */ |
| void (*init_insn) (insn_t); |
| }; |
| |
| /* A driver function to add a set of basic blocks (BBS) to the |
| scheduling region. */ |
| static void |
| sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) |
| { |
| unsigned i; |
| basic_block bb; |
| |
| if (ssi->extend_bb) |
| ssi->extend_bb (); |
| |
| if (ssi->init_bb) |
| FOR_EACH_VEC_ELT (bbs, i, bb) |
| ssi->init_bb (bb); |
| |
| if (ssi->extend_insn) |
| ssi->extend_insn (); |
| |
| if (ssi->init_insn) |
| FOR_EACH_VEC_ELT (bbs, i, bb) |
| { |
| rtx_insn *insn; |
| |
| FOR_BB_INSNS (bb, insn) |
| ssi->init_insn (insn); |
| } |
| } |
| |
| /* Implement hooks for collecting fundamental insn properties like if insn is |
| an ASM or is within a SCHED_GROUP. */ |
| |
| /* True when a "one-time init" data for INSN was already inited. */ |
| static bool |
| first_time_insn_init (insn_t insn) |
| { |
| return INSN_LIVE (insn) == NULL; |
| } |
| |
| /* Hash an entry in a transformed_insns hashtable. */ |
| static hashval_t |
| hash_transformed_insns (const void *p) |
| { |
| return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); |
| } |
| |
| /* Compare the entries in a transformed_insns hashtable. */ |
| static int |
| eq_transformed_insns (const void *p, const void *q) |
| { |
| rtx_insn *i1 = |
| VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); |
| rtx_insn *i2 = |
| VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); |
| |
| if (INSN_UID (i1) == INSN_UID (i2)) |
| return 1; |
| return rtx_equal_p (PATTERN (i1), PATTERN (i2)); |
| } |
| |
| /* Free an entry in a transformed_insns hashtable. */ |
| static void |
| free_transformed_insns (void *p) |
| { |
| struct transformed_insns *pti = (struct transformed_insns *) p; |
| |
| vinsn_detach (pti->vinsn_old); |
| vinsn_detach (pti->vinsn_new); |
| free (pti); |
| } |
| |
| /* Init the s_i_d data for INSN which should be inited just once, when |
| we first see the insn. */ |
| static void |
| init_first_time_insn_data (insn_t insn) |
| { |
| /* This should not be set if this is the first time we init data for |
| insn. */ |
| gcc_assert (first_time_insn_init (insn)); |
| |
| /* These are needed for nops too. */ |
| INSN_LIVE (insn) = get_regset_from_pool (); |
| INSN_LIVE_VALID_P (insn) = false; |
| |
| if (!INSN_NOP_P (insn)) |
| { |
| INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); |
| INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); |
| INSN_TRANSFORMED_INSNS (insn) |
| = htab_create (16, hash_transformed_insns, |
| eq_transformed_insns, free_transformed_insns); |
| init_deps (&INSN_DEPS_CONTEXT (insn), true); |
| } |
| } |
| |
| /* Free almost all above data for INSN that is scheduled already. |
| Used for extra-large basic blocks. */ |
| void |
| free_data_for_scheduled_insn (insn_t insn) |
| { |
| gcc_assert (! first_time_insn_init (insn)); |
| |
| if (! INSN_ANALYZED_DEPS (insn)) |
| return; |
| |
| BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
| BITMAP_FREE (INSN_FOUND_DEPS (insn)); |
| htab_delete (INSN_TRANSFORMED_INSNS (insn)); |
| |
| /* This is allocated only for bookkeeping insns. */ |
| if (INSN_ORIGINATORS (insn)) |
| BITMAP_FREE (INSN_ORIGINATORS (insn)); |
| free_deps (&INSN_DEPS_CONTEXT (insn)); |
| |
| INSN_ANALYZED_DEPS (insn) = NULL; |
| |
| /* Clear the readonly flag so we would ICE when trying to recalculate |
| the deps context (as we believe that it should not happen). */ |
| (&INSN_DEPS_CONTEXT (insn))->readonly = 0; |
| } |
| |
| /* Free the same data as above for INSN. */ |
| static void |
| free_first_time_insn_data (insn_t insn) |
| { |
| gcc_assert (! first_time_insn_init (insn)); |
| |
| free_data_for_scheduled_insn (insn); |
| return_regset_to_pool (INSN_LIVE (insn)); |
| INSN_LIVE (insn) = NULL; |
| INSN_LIVE_VALID_P (insn) = false; |
| } |
| |
| /* Initialize region-scope data structures for basic blocks. */ |
| static void |
| init_global_and_expr_for_bb (basic_block bb) |
| { |
| if (sel_bb_empty_p (bb)) |
| return; |
| |
| invalidate_av_set (bb); |
| } |
| |
| /* Data for global dependency analysis (to initialize CANT_MOVE and |
| SCHED_GROUP_P). */ |
| static struct |
| { |
| /* Previous insn. */ |
| insn_t prev_insn; |
| } init_global_data; |
| |
| /* Determine if INSN is in the sched_group, is an asm or should not be |
| cloned. After that initialize its expr. */ |
| static void |
| init_global_and_expr_for_insn (insn_t insn) |
| { |
| if (LABEL_P (insn)) |
| return; |
| |
| if (NOTE_INSN_BASIC_BLOCK_P (insn)) |
| { |
| init_global_data.prev_insn = NULL; |
| return; |
| } |
| |
| gcc_assert (INSN_P (insn)); |
| |
| if (SCHED_GROUP_P (insn)) |
| /* Setup a sched_group. */ |
| { |
| insn_t prev_insn = init_global_data.prev_insn; |
| |
| if (prev_insn) |
| INSN_SCHED_NEXT (prev_insn) = insn; |
| |
| init_global_data.prev_insn = insn; |
| } |
| else |
| init_global_data.prev_insn = NULL; |
| |
| if (GET_CODE (PATTERN (insn)) == ASM_INPUT |
| || asm_noperands (PATTERN (insn)) >= 0) |
| /* Mark INSN as an asm. */ |
| INSN_ASM_P (insn) = true; |
| |
| { |
| bool force_unique_p; |
| ds_t spec_done_ds; |
| |
| /* Certain instructions cannot be cloned, and frame related insns and |
| the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of |
| their block. */ |
| if (prologue_epilogue_contains (insn)) |
| { |
| if (RTX_FRAME_RELATED_P (insn)) |
| CANT_MOVE (insn) = 1; |
| else |
| { |
| rtx note; |
| for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) |
| if (REG_NOTE_KIND (note) == REG_SAVE_NOTE |
| && ((enum insn_note) INTVAL (XEXP (note, 0)) |
| == NOTE_INSN_EPILOGUE_BEG)) |
| { |
| CANT_MOVE (insn) = 1; |
| break; |
| } |
| } |
| force_unique_p = true; |
| } |
| else |
| if (CANT_MOVE (insn) |
| || INSN_ASM_P (insn) |
| || SCHED_GROUP_P (insn) |
| || CALL_P (insn) |
| /* Exception handling insns are always unique. */ |
| || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) |
| /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ |
| || control_flow_insn_p (insn) |
| || volatile_insn_p (PATTERN (insn)) |
| || (targetm.cannot_copy_insn_p |
| && targetm.cannot_copy_insn_p (insn))) |
| force_unique_p = true; |
| else |
| force_unique_p = false; |
| |
| if (targetm.sched.get_insn_spec_ds) |
| { |
| spec_done_ds = targetm.sched.get_insn_spec_ds (insn); |
| spec_done_ds = ds_get_max_dep_weak (spec_done_ds); |
| } |
| else |
| spec_done_ds = 0; |
| |
| /* Initialize INSN's expr. */ |
| init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, |
| REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), |
| spec_done_ds, 0, 0, vNULL, true, |
| false, false, false, CANT_MOVE (insn)); |
| } |
| |
| init_first_time_insn_data (insn); |
| } |
| |
| /* Scan the region and initialize instruction data for basic blocks BBS. */ |
| void |
| sel_init_global_and_expr (bb_vec_t bbs) |
| { |
| /* ??? It would be nice to implement push / pop scheme for sched_infos. */ |
| const struct sched_scan_info_def ssi = |
| { |
| NULL, /* extend_bb */ |
| init_global_and_expr_for_bb, /* init_bb */ |
| extend_insn_data, /* extend_insn */ |
| init_global_and_expr_for_insn /* init_insn */ |
| }; |
| |
| sched_scan (&ssi, bbs); |
| } |
| |
| /* Finalize region-scope data structures for basic blocks. */ |
| static void |
| finish_global_and_expr_for_bb (basic_block bb) |
| { |
| av_set_clear (&BB_AV_SET (bb)); |
| BB_AV_LEVEL (bb) = 0; |
| } |
| |
| /* Finalize INSN's data. */ |
| static void |
| finish_global_and_expr_insn (insn_t insn) |
| { |
| if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) |
| return; |
| |
| gcc_assert (INSN_P (insn)); |
| |
| if (INSN_LUID (insn) > 0) |
| { |
| free_first_time_insn_data (insn); |
| INSN_WS_LEVEL (insn) = 0; |
| CANT_MOVE (insn) = 0; |
| |
| /* We can no longer assert this, as vinsns of this insn could be |
| easily live in other insn's caches. This should be changed to |
| a counter-like approach among all vinsns. */ |
| gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); |
| clear_expr (INSN_EXPR (insn)); |
| } |
| } |
| |
| /* Finalize per instruction data for the whole region. */ |
| void |
| sel_finish_global_and_expr (void) |
| { |
| { |
| bb_vec_t bbs; |
| int i; |
| |
| bbs.create (current_nr_blocks); |
| |
| for (i = 0; i < current_nr_blocks; i++) |
| bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
| |
| /* Clear AV_SETs and INSN_EXPRs. */ |
| { |
| const struct sched_scan_info_def ssi = |
| { |
| NULL, /* extend_bb */ |
| finish_global_and_expr_for_bb, /* init_bb */ |
| NULL, /* extend_insn */ |
| finish_global_and_expr_insn /* init_insn */ |
| }; |
| |
| sched_scan (&ssi, bbs); |
| } |
| |
| bbs.release (); |
| } |
| |
| finish_insns (); |
| } |
| |
| |
| /* In the below hooks, we merely calculate whether or not a dependence |
| exists, and in what part of insn. However, we will need more data |
| when we'll start caching dependence requests. */ |
| |
| /* Container to hold information for dependency analysis. */ |
| static struct |
| { |
| deps_t dc; |
| |
| /* A variable to track which part of rtx we are scanning in |
| sched-deps.cc: sched_analyze_insn (). */ |
| deps_where_t where; |
| |
| /* Current producer. */ |
| insn_t pro; |
| |
| /* Current consumer. */ |
| vinsn_t con; |
| |
| /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. |
| X is from { INSN, LHS, RHS }. */ |
| ds_t has_dep_p[DEPS_IN_NOWHERE]; |
| } has_dependence_data; |
| |
| /* Start analyzing dependencies of INSN. */ |
| static void |
| has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) |
| { |
| gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); |
| |
| has_dependence_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Finish analyzing dependencies of an insn. */ |
| static void |
| has_dependence_finish_insn (void) |
| { |
| gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
| |
| has_dependence_data.where = DEPS_IN_NOWHERE; |
| } |
| |
| /* Start analyzing dependencies of LHS. */ |
| static void |
| has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) |
| { |
| gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
| |
| if (VINSN_LHS (has_dependence_data.con) != NULL) |
| has_dependence_data.where = DEPS_IN_LHS; |
| } |
| |
| /* Finish analyzing dependencies of an lhs. */ |
| static void |
| has_dependence_finish_lhs (void) |
| { |
| has_dependence_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Start analyzing dependencies of RHS. */ |
| static void |
| has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) |
| { |
| gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
| |
| if (VINSN_RHS (has_dependence_data.con) != NULL) |
| has_dependence_data.where = DEPS_IN_RHS; |
| } |
| |
| /* Start analyzing dependencies of an rhs. */ |
| static void |
| has_dependence_finish_rhs (void) |
| { |
| gcc_assert (has_dependence_data.where == DEPS_IN_RHS |
| || has_dependence_data.where == DEPS_IN_INSN); |
| |
| has_dependence_data.where = DEPS_IN_INSN; |
| } |
| |
| /* Note a set of REGNO. */ |
| static void |
| has_dependence_note_reg_set (int regno) |
| { |
| struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
| |
| if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
| VINSN_INSN_RTX |
| (has_dependence_data.con))) |
| { |
| ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
| |
| if (reg_last->sets != NULL |
| || reg_last->clobbers != NULL) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; |
| |
| if (reg_last->uses || reg_last->implicit_sets) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
| } |
| } |
| |
| /* Note a clobber of REGNO. */ |
| static void |
| has_dependence_note_reg_clobber (int regno) |
| { |
| struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
| |
| if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
| VINSN_INSN_RTX |
| (has_dependence_data.con))) |
| { |
| ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
| |
| if (reg_last->sets) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; |
| |
| if (reg_last->uses || reg_last->implicit_sets) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
| } |
| } |
| |
| /* Note a use of REGNO. */ |
| static void |
| has_dependence_note_reg_use (int regno) |
| { |
| struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
| |
| if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
| VINSN_INSN_RTX |
| (has_dependence_data.con))) |
| { |
| ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
| |
| if (reg_last->sets) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; |
| |
| if (reg_last->clobbers || reg_last->implicit_sets) |
| *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
| |
| /* Merge BE_IN_SPEC bits into *DSP when the dependency producer |
| is actually a check insn. We need to do this for any register |
| read-read dependency with the check unless we track properly |
| all registers written by BE_IN_SPEC-speculated insns, as |
| we don't have explicit dependence lists. See PR 53975. */ |
| if (reg_last->uses) |
| { |
| ds_t pro_spec_checked_ds; |
| |
| pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); |
| pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); |
| |
| if (pro_spec_checked_ds != 0) |
| *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, |
| NULL_RTX, NULL_RTX); |
| } |
| } |
| } |
| |
| /* Note a memory dependence. */ |
| static void |
| has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, |
| rtx pending_mem ATTRIBUTE_UNUSED, |
| insn_t pending_insn ATTRIBUTE_UNUSED, |
| ds_t ds ATTRIBUTE_UNUSED) |
| { |
| if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
| VINSN_INSN_RTX (has_dependence_data.con))) |
| { |
| ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
| |
| *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); |
| } |
| } |
| |
| /* Note a dependence. */ |
| static void |
| has_dependence_note_dep (insn_t pro, ds_t ds ATTRIBUTE_UNUSED) |
| { |
| insn_t real_pro = has_dependence_data.pro; |
| insn_t real_con = VINSN_INSN_RTX (has_dependence_data.con); |
| |
| /* We do not allow for debug insns to move through others unless they |
| are at the start of bb. This movement may create bookkeeping copies |
| that later would not be able to move up, violating the invariant |
| that a bookkeeping copy should be movable as the original insn. |
| Detect that here and allow that movement if we allowed it before |
| in the first place. */ |
| if (DEBUG_INSN_P (real_con) && !DEBUG_INSN_P (real_pro) |
| && INSN_UID (NEXT_INSN (pro)) == INSN_UID (real_con)) |
| return; |
| |
| if (!sched_insns_conditions_mutex_p (real_pro, real_con)) |
| { |
| ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
| |
| *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); |
| } |
| } |
| |
| /* Mark the insn as having a hard dependence that prevents speculation. */ |
| void |
| sel_mark_hard_insn (rtx insn) |
| { |
| int i; |
| |
| /* Only work when we're in has_dependence_p mode. |
| ??? This is a hack, this should actually be a hook. */ |
| if (!has_dependence_data.dc || !has_dependence_data.pro) |
| return; |
| |
| gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); |
| gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
| |
| for (i = 0; i <
|