| /* Instruction scheduling pass. Selective scheduler and pipeliner. |
| Copyright (C) 2006-2015 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "rtl-error.h" |
| #include "tm_p.h" |
| #include "hard-reg-set.h" |
| #include "regs.h" |
| #include "hashtab.h" |
| #include "hash-set.h" |
| #include "vec.h" |
| #include "machmode.h" |
| #include "input.h" |
| #include "function.h" |
| #include "predict.h" |
| #include "dominance.h" |
| #include "cfg.h" |
| #include "cfgbuild.h" |
| #include "basic-block.h" |
| #include "flags.h" |
| #include "insn-config.h" |
| #include "insn-attr.h" |
| #include "except.h" |
| #include "recog.h" |
| #include "params.h" |
| #include "target.h" |
| #include "output.h" |
| #include "sched-int.h" |
| #include "ggc.h" |
| #include "symtab.h" |
| #include "wide-int.h" |
| #include "inchash.h" |
| #include "tree.h" |
| #include "langhooks.h" |
| #include "rtlhooks-def.h" |
| #include "emit-rtl.h" |
| #include "ira.h" |
| #include "ira-int.h" |
| #include "rtl-iter.h" |
| |
| #ifdef INSN_SCHEDULING |
| #include "sel-sched-ir.h" |
| #include "sel-sched-dump.h" |
| #include "sel-sched.h" |
| #include "dbgcnt.h" |
| |
| /* Implementation of selective scheduling approach. |
| The below implementation follows the original approach with the following |
| changes: |
| |
| o the scheduler works after register allocation (but can be also tuned |
| to work before RA); |
| o some instructions are not copied or register renamed; |
| o conditional jumps are not moved with code duplication; |
| o several jumps in one parallel group are not supported; |
| o when pipelining outer loops, code motion through inner loops |
| is not supported; |
| o control and data speculation are supported; |
| o some improvements for better compile time/performance were made. |
| |
| Terminology |
| =========== |
| |
| A vinsn, or virtual insn, is an insn with additional data characterizing |
| insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc. |
| Vinsns also act as smart pointers to save memory by reusing them in |
| different expressions. A vinsn is described by vinsn_t type. |
| |
| An expression is a vinsn with additional data characterizing its properties |
| at some point in the control flow graph. The data may be its usefulness, |
| priority, speculative status, whether it was renamed/subsituted, etc. |
| An expression is described by expr_t type. |
| |
| Availability set (av_set) is a set of expressions at a given control flow |
| point. It is represented as av_set_t. The expressions in av sets are kept |
| sorted in the terms of expr_greater_p function. It allows to truncate |
| the set while leaving the best expressions. |
| |
| A fence is a point through which code motion is prohibited. On each step, |
| we gather a parallel group of insns at a fence. It is possible to have |
| multiple fences. A fence is represented via fence_t. |
| |
| A boundary is the border between the fence group and the rest of the code. |
| Currently, we never have more than one boundary per fence, as we finalize |
| the fence group when a jump is scheduled. A boundary is represented |
| via bnd_t. |
| |
| High-level overview |
| =================== |
| |
| The scheduler finds regions to schedule, schedules each one, and finalizes. |
| The regions are formed starting from innermost loops, so that when the inner |
| loop is pipelined, its prologue can be scheduled together with yet unprocessed |
| outer loop. The rest of acyclic regions are found using extend_rgns: |
| the blocks that are not yet allocated to any regions are traversed in top-down |
| order, and a block is added to a region to which all its predecessors belong; |
| otherwise, the block starts its own region. |
| |
| The main scheduling loop (sel_sched_region_2) consists of just |
| scheduling on each fence and updating fences. For each fence, |
| we fill a parallel group of insns (fill_insns) until some insns can be added. |
| First, we compute available exprs (av-set) at the boundary of the current |
| group. Second, we choose the best expression from it. If the stall is |
| required to schedule any of the expressions, we advance the current cycle |
| appropriately. So, the final group does not exactly correspond to a VLIW |
| word. Third, we move the chosen expression to the boundary (move_op) |
| and update the intermediate av sets and liveness sets. We quit fill_insns |
| when either no insns left for scheduling or we have scheduled enough insns |
| so we feel like advancing a scheduling point. |
| |
| Computing available expressions |
| =============================== |
| |
| The computation (compute_av_set) is a bottom-up traversal. At each insn, |
| we're moving the union of its successors' sets through it via |
| moveup_expr_set. The dependent expressions are removed. Local |
| transformations (substitution, speculation) are applied to move more |
| exprs. Then the expr corresponding to the current insn is added. |
| The result is saved on each basic block header. |
| |
| When traversing the CFG, we're moving down for no more than max_ws insns. |
| Also, we do not move down to ineligible successors (is_ineligible_successor), |
| which include moving along a back-edge, moving to already scheduled code, |
| and moving to another fence. The first two restrictions are lifted during |
| pipelining, which allows us to move insns along a back-edge. We always have |
| an acyclic region for scheduling because we forbid motion through fences. |
| |
| Choosing the best expression |
| ============================ |
| |
| We sort the final availability set via sel_rank_for_schedule, then we remove |
| expressions which are not yet ready (tick_check_p) or which dest registers |
| cannot be used. For some of them, we choose another register via |
| find_best_reg. To do this, we run find_used_regs to calculate the set of |
| registers which cannot be used. The find_used_regs function performs |
| a traversal of code motion paths for an expr. We consider for renaming |
| only registers which are from the same regclass as the original one and |
| using which does not interfere with any live ranges. Finally, we convert |
| the resulting set to the ready list format and use max_issue and reorder* |
| hooks similarly to the Haifa scheduler. |
| |
| Scheduling the best expression |
| ============================== |
| |
| We run the move_op routine to perform the same type of code motion paths |
| traversal as in find_used_regs. (These are working via the same driver, |
| code_motion_path_driver.) When moving down the CFG, we look for original |
| instruction that gave birth to a chosen expression. We undo |
| the transformations performed on an expression via the history saved in it. |
| When found, we remove the instruction or leave a reg-reg copy/speculation |
| check if needed. On a way up, we insert bookkeeping copies at each join |
| point. If a copy is not needed, it will be removed later during this |
| traversal. We update the saved av sets and liveness sets on the way up, too. |
| |
| Finalizing the schedule |
| ======================= |
| |
| When pipelining, we reschedule the blocks from which insns were pipelined |
| to get a tighter schedule. On Itanium, we also perform bundling via |
| the same routine from ia64.c. |
| |
| Dependence analysis changes |
| =========================== |
| |
| We augmented the sched-deps.c with hooks that get called when a particular |
| dependence is found in a particular part of an insn. Using these hooks, we |
| can do several actions such as: determine whether an insn can be moved through |
| another (has_dependence_p, moveup_expr); find out whether an insn can be |
| scheduled on the current cycle (tick_check_p); find out registers that |
| are set/used/clobbered by an insn and find out all the strange stuff that |
| restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in |
| init_global_and_expr_for_insn). |
| |
| Initialization changes |
| ====================== |
| |
| There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are |
| reused in all of the schedulers. We have split up the initialization of data |
| of such parts into different functions prefixed with scheduler type and |
| postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish}, |
| sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc. |
| The same splitting is done with current_sched_info structure: |
| dependence-related parts are in sched_deps_info, common part is in |
| common_sched_info, and haifa/sel/etc part is in current_sched_info. |
| |
| Target contexts |
| =============== |
| |
| As we now have multiple-point scheduling, this would not work with backends |
| which save some of the scheduler state to use it in the target hooks. |
| For this purpose, we introduce a concept of target contexts, which |
| encapsulate such information. The backend should implement simple routines |
| of allocating/freeing/setting such a context. The scheduler calls these |
| as target hooks and handles the target context as an opaque pointer (similar |
| to the DFA state type, state_t). |
| |
| Various speedups |
| ================ |
| |
| As the correct data dependence graph is not supported during scheduling (which |
| is to be changed in mid-term), we cache as much of the dependence analysis |
| results as possible to avoid reanalyzing. This includes: bitmap caches on |
| each insn in stream of the region saying yes/no for a query with a pair of |
| UIDs; hashtables with the previously done transformations on each insn in |
| stream; a vector keeping a history of transformations on each expr. |
| |
| Also, we try to minimize the dependence context used on each fence to check |
| whether the given expression is ready for scheduling by removing from it |
| insns that are definitely completed the execution. The results of |
| tick_check_p checks are also cached in a vector on each fence. |
| |
| We keep a valid liveness set on each insn in a region to avoid the high |
| cost of recomputation on large basic blocks. |
| |
| Finally, we try to minimize the number of needed updates to the availability |
| sets. The updates happen in two cases: when fill_insns terminates, |
| we advance all fences and increase the stage number to show that the region |
| has changed and the sets are to be recomputed; and when the next iteration |
| of a loop in fill_insns happens (but this one reuses the saved av sets |
| on bb headers.) Thus, we try to break the fill_insns loop only when |
| "significant" number of insns from the current scheduling window was |
| scheduled. This should be made a target param. |
| |
| |
| TODO: correctly support the data dependence graph at all stages and get rid |
| of all caches. This should speed up the scheduler. |
| TODO: implement moving cond jumps with bookkeeping copies on both targets. |
| TODO: tune the scheduler before RA so it does not create too much pseudos. |
| |
| |
| References: |
| S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with |
| selective scheduling and software pipelining. |
| ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997. |
| |
| Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik, |
| and Dmitry Zhurikhin. An interblock VLIW-targeted instruction scheduler |
| for GCC. In Proceedings of GCC Developers' Summit 2006. |
| |
| Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik. GCC Instruction |
| Scheduler and Software Pipeliner on the Itanium Platform. EPIC-7 Workshop. |
| http://rogue.colorado.edu/EPIC7/. |
| |
| */ |
| |
| /* True when pipelining is enabled. */ |
| bool pipelining_p; |
| |
| /* True if bookkeeping is enabled. */ |
| bool bookkeeping_p; |
| |
| /* Maximum number of insns that are eligible for renaming. */ |
| int max_insns_to_rename; |
| |
| |
| /* Definitions of local types and macros. */ |
| |
| /* Represents possible outcomes of moving an expression through an insn. */ |
| enum MOVEUP_EXPR_CODE |
| { |
| /* The expression is not changed. */ |
| MOVEUP_EXPR_SAME, |
| |
| /* Not changed, but requires a new destination register. */ |
| MOVEUP_EXPR_AS_RHS, |
| |
| /* Cannot be moved. */ |
| MOVEUP_EXPR_NULL, |
| |
| /* Changed (substituted or speculated). */ |
| MOVEUP_EXPR_CHANGED |
| }; |
| |
| /* The container to be passed into rtx search & replace functions. */ |
| struct rtx_search_arg |
| { |
| /* What we are searching for. */ |
| rtx x; |
| |
| /* The occurrence counter. */ |
| int n; |
| }; |
| |
| typedef struct rtx_search_arg *rtx_search_arg_p; |
| |
| /* This struct contains precomputed hard reg sets that are needed when |
| computing registers available for renaming. */ |
| struct hard_regs_data |
| { |
| /* For every mode, this stores registers available for use with |
| that mode. */ |
| HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES]; |
| |
| /* True when regs_for_mode[mode] is initialized. */ |
| bool regs_for_mode_ok[NUM_MACHINE_MODES]; |
| |
| /* For every register, it has regs that are ok to rename into it. |
| The register in question is always set. If not, this means |
| that the whole set is not computed yet. */ |
| HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER]; |
| |
| /* For every mode, this stores registers not available due to |
| call clobbering. */ |
| HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES]; |
| |
| /* All registers that are used or call used. */ |
| HARD_REG_SET regs_ever_used; |
| |
| #ifdef STACK_REGS |
| /* Stack registers. */ |
| HARD_REG_SET stack_regs; |
| #endif |
| }; |
| |
| /* Holds the results of computation of available for renaming and |
| unavailable hard registers. */ |
| struct reg_rename |
| { |
| /* These are unavailable due to calls crossing, globalness, etc. */ |
| HARD_REG_SET unavailable_hard_regs; |
| |
| /* These are *available* for renaming. */ |
| HARD_REG_SET available_for_renaming; |
| |
| /* Whether this code motion path crosses a call. */ |
| bool crosses_call; |
| }; |
| |
| /* A global structure that contains the needed information about harg |
| regs. */ |
| static struct hard_regs_data sel_hrd; |
| |
| |
| /* This structure holds local data used in code_motion_path_driver hooks on |
| the same or adjacent levels of recursion. Here we keep those parameters |
| that are not used in code_motion_path_driver routine itself, but only in |
| its hooks. Moreover, all parameters that can be modified in hooks are |
| in this structure, so all other parameters passed explicitly to hooks are |
| read-only. */ |
| struct cmpd_local_params |
| { |
| /* Local params used in move_op_* functions. */ |
| |
| /* Edges for bookkeeping generation. */ |
| edge e1, e2; |
| |
| /* C_EXPR merged from all successors and locally allocated temporary C_EXPR. */ |
| expr_t c_expr_merged, c_expr_local; |
| |
| /* Local params used in fur_* functions. */ |
| /* Copy of the ORIGINAL_INSN list, stores the original insns already |
| found before entering the current level of code_motion_path_driver. */ |
| def_list_t old_original_insns; |
| |
| /* Local params used in move_op_* functions. */ |
| /* True when we have removed last insn in the block which was |
| also a boundary. Do not update anything or create bookkeeping copies. */ |
| BOOL_BITFIELD removed_last_insn : 1; |
| }; |
| |
| /* Stores the static parameters for move_op_* calls. */ |
| struct moveop_static_params |
| { |
| /* Destination register. */ |
| rtx dest; |
| |
| /* Current C_EXPR. */ |
| expr_t c_expr; |
| |
| /* An UID of expr_vliw which is to be moved up. If we find other exprs, |
| they are to be removed. */ |
| int uid; |
| |
| #ifdef ENABLE_CHECKING |
| /* This is initialized to the insn on which the driver stopped its traversal. */ |
| insn_t failed_insn; |
| #endif |
| |
| /* True if we scheduled an insn with different register. */ |
| bool was_renamed; |
| }; |
| |
| /* Stores the static parameters for fur_* calls. */ |
| struct fur_static_params |
| { |
| /* Set of registers unavailable on the code motion path. */ |
| regset used_regs; |
| |
| /* Pointer to the list of original insns definitions. */ |
| def_list_t *original_insns; |
| |
| /* True if a code motion path contains a CALL insn. */ |
| bool crosses_call; |
| }; |
| |
| typedef struct fur_static_params *fur_static_params_p; |
| typedef struct cmpd_local_params *cmpd_local_params_p; |
| typedef struct moveop_static_params *moveop_static_params_p; |
| |
| /* Set of hooks and parameters that determine behaviour specific to |
| move_op or find_used_regs functions. */ |
| struct code_motion_path_driver_info_def |
| { |
| /* Called on enter to the basic block. */ |
| int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool); |
| |
| /* Called when original expr is found. */ |
| void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *); |
| |
| /* Called while descending current basic block if current insn is not |
| the original EXPR we're searching for. */ |
| bool (*orig_expr_not_found) (insn_t, av_set_t, void *); |
| |
| /* Function to merge C_EXPRes from different successors. */ |
| void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *); |
| |
| /* Function to finalize merge from different successors and possibly |
| deallocate temporary data structures used for merging. */ |
| void (*after_merge_succs) (cmpd_local_params_p, void *); |
| |
| /* Called on the backward stage of recursion to do moveup_expr. |
| Used only with move_op_*. */ |
| void (*ascend) (insn_t, void *); |
| |
| /* Called on the ascending pass, before returning from the current basic |
| block or from the whole traversal. */ |
| void (*at_first_insn) (insn_t, cmpd_local_params_p, void *); |
| |
| /* When processing successors in move_op we need only descend into |
| SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL. */ |
| int succ_flags; |
| |
| /* The routine name to print in dumps ("move_op" of "find_used_regs"). */ |
| const char *routine_name; |
| }; |
| |
| /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or |
| FUR_HOOKS. */ |
| struct code_motion_path_driver_info_def *code_motion_path_driver_info; |
| |
| /* Set of hooks for performing move_op and find_used_regs routines with |
| code_motion_path_driver. */ |
| extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks; |
| |
| /* True if/when we want to emulate Haifa scheduler in the common code. |
| This is used in sched_rgn_local_init and in various places in |
| sched-deps.c. */ |
| int sched_emulate_haifa_p; |
| |
| /* GLOBAL_LEVEL is used to discard information stored in basic block headers |
| av_sets. Av_set of bb header is valid if its (bb header's) level is equal |
| to GLOBAL_LEVEL. And invalid if lesser. This is primarily used to advance |
| scheduling window. */ |
| int global_level; |
| |
| /* Current fences. */ |
| flist_t fences; |
| |
| /* True when separable insns should be scheduled as RHSes. */ |
| static bool enable_schedule_as_rhs_p; |
| |
| /* Used in verify_target_availability to assert that target reg is reported |
| unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if |
| we haven't scheduled anything on the previous fence. |
| if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can |
| have more conservative value than the one returned by the |
| find_used_regs, thus we shouldn't assert that these values are equal. */ |
| static bool scheduled_something_on_previous_fence; |
| |
| /* All newly emitted insns will have their uids greater than this value. */ |
| static int first_emitted_uid; |
| |
| /* Set of basic blocks that are forced to start new ebbs. This is a subset |
| of all the ebb heads. */ |
| static bitmap_head _forced_ebb_heads; |
| bitmap_head *forced_ebb_heads = &_forced_ebb_heads; |
| |
| /* Blocks that need to be rescheduled after pipelining. */ |
| bitmap blocks_to_reschedule = NULL; |
| |
| /* True when the first lv set should be ignored when updating liveness. */ |
| static bool ignore_first = false; |
| |
| /* Number of insns max_issue has initialized data structures for. */ |
| static int max_issue_size = 0; |
| |
| /* Whether we can issue more instructions. */ |
| static int can_issue_more; |
| |
| /* Maximum software lookahead window size, reduced when rescheduling after |
| pipelining. */ |
| static int max_ws; |
| |
| /* Number of insns scheduled in current region. */ |
| static int num_insns_scheduled; |
| |
| /* A vector of expressions is used to be able to sort them. */ |
| static vec<expr_t> vec_av_set = vNULL; |
| |
| /* A vector of vinsns is used to hold temporary lists of vinsns. */ |
| typedef vec<vinsn_t> vinsn_vec_t; |
| |
| /* This vector has the exprs which may still present in av_sets, but actually |
| can't be moved up due to bookkeeping created during code motion to another |
| fence. See comment near the call to update_and_record_unavailable_insns |
| for the detailed explanations. */ |
| static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t (); |
| |
| /* This vector has vinsns which are scheduled with renaming on the first fence |
| and then seen on the second. For expressions with such vinsns, target |
| availability information may be wrong. */ |
| static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t (); |
| |
| /* Vector to store temporary nops inserted in move_op to prevent removal |
| of empty bbs. */ |
| static vec<insn_t> vec_temp_moveop_nops = vNULL; |
| |
| /* These bitmaps record original instructions scheduled on the current |
| iteration and bookkeeping copies created by them. */ |
| static bitmap current_originators = NULL; |
| static bitmap current_copies = NULL; |
| |
| /* This bitmap marks the blocks visited by code_motion_path_driver so we don't |
| visit them afterwards. */ |
| static bitmap code_motion_visited_blocks = NULL; |
| |
| /* Variables to accumulate different statistics. */ |
| |
| /* The number of bookkeeping copies created. */ |
| static int stat_bookkeeping_copies; |
| |
| /* The number of insns that required bookkeeiping for their scheduling. */ |
| static int stat_insns_needed_bookkeeping; |
| |
| /* The number of insns that got renamed. */ |
| static int stat_renamed_scheduled; |
| |
| /* The number of substitutions made during scheduling. */ |
| static int stat_substitutions_total; |
| |
| |
| /* Forward declarations of static functions. */ |
| static bool rtx_ok_for_substitution_p (rtx, rtx); |
| static int sel_rank_for_schedule (const void *, const void *); |
| static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool); |
| static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax); |
| |
| static rtx get_dest_from_orig_ops (av_set_t); |
| static basic_block generate_bookkeeping_insn (expr_t, edge, edge); |
| static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *, |
| def_list_t *); |
| static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*); |
| static int code_motion_path_driver (insn_t, av_set_t, ilist_t, |
| cmpd_local_params_p, void *); |
| static void sel_sched_region_1 (void); |
| static void sel_sched_region_2 (int); |
| static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool); |
| |
| static void debug_state (state_t); |
| |
| |
| /* Functions that work with fences. */ |
| |
| /* Advance one cycle on FENCE. */ |
| static void |
| advance_one_cycle (fence_t fence) |
| { |
| unsigned i; |
| int cycle; |
| rtx_insn *insn; |
| |
| advance_state (FENCE_STATE (fence)); |
| cycle = ++FENCE_CYCLE (fence); |
| FENCE_ISSUED_INSNS (fence) = 0; |
| FENCE_STARTS_CYCLE_P (fence) = 1; |
| can_issue_more = issue_rate; |
| FENCE_ISSUE_MORE (fence) = can_issue_more; |
| |
| for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); ) |
| { |
| if (INSN_READY_CYCLE (insn) < cycle) |
| { |
| remove_from_deps (FENCE_DC (fence), insn); |
| FENCE_EXECUTING_INSNS (fence)->unordered_remove (i); |
| continue; |
| } |
| i++; |
| } |
| if (sched_verbose >= 2) |
| { |
| sel_print ("Finished a cycle. Current cycle = %d\n", FENCE_CYCLE (fence)); |
| debug_state (FENCE_STATE (fence)); |
| } |
| } |
| |
| /* Returns true when SUCC in a fallthru bb of INSN, possibly |
| skipping empty basic blocks. */ |
| static bool |
| in_fallthru_bb_p (rtx insn, rtx succ) |
| { |
| basic_block bb = BLOCK_FOR_INSN (insn); |
| edge e; |
| |
| if (bb == BLOCK_FOR_INSN (succ)) |
| return true; |
| |
| e = find_fallthru_edge_from (bb); |
| if (e) |
| bb = e->dest; |
| else |
| return false; |
| |
| while (sel_bb_empty_p (bb)) |
| bb = bb->next_bb; |
| |
| return bb == BLOCK_FOR_INSN (succ); |
| } |
| |
| /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES. |
| When a successor will continue a ebb, transfer all parameters of a fence |
| to the new fence. ORIG_MAX_SEQNO is the maximal seqno before this round |
| of scheduling helping to distinguish between the old and the new code. */ |
| static void |
| extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences, |
| int orig_max_seqno) |
| { |
| bool was_here_p = false; |
| insn_t insn = NULL; |
| insn_t succ; |
| succ_iterator si; |
| ilist_iterator ii; |
| fence_t fence = FLIST_FENCE (old_fences); |
| basic_block bb; |
| |
| /* Get the only element of FENCE_BNDS (fence). */ |
| FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence)) |
| { |
| gcc_assert (!was_here_p); |
| was_here_p = true; |
| } |
| gcc_assert (was_here_p && insn != NULL_RTX); |
| |
| /* When in the "middle" of the block, just move this fence |
| to the new list. */ |
| bb = BLOCK_FOR_INSN (insn); |
| if (! sel_bb_end_p (insn) |
| || (single_succ_p (bb) |
| && single_pred_p (single_succ (bb)))) |
| { |
| insn_t succ; |
| |
| succ = (sel_bb_end_p (insn) |
| ? sel_bb_head (single_succ (bb)) |
| : NEXT_INSN (insn)); |
| |
| if (INSN_SEQNO (succ) > 0 |
| && INSN_SEQNO (succ) <= orig_max_seqno |
| && INSN_SCHED_TIMES (succ) <= 0) |
| { |
| FENCE_INSN (fence) = succ; |
| move_fence_to_fences (old_fences, new_fences); |
| |
| if (sched_verbose >= 1) |
| sel_print ("Fence %d continues as %d[%d] (state continue)\n", |
| INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ)); |
| } |
| return; |
| } |
| |
| /* Otherwise copy fence's structures to (possibly) multiple successors. */ |
| FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
| { |
| int seqno = INSN_SEQNO (succ); |
| |
| if (0 < seqno && seqno <= orig_max_seqno |
| && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0)) |
| { |
| bool b = (in_same_ebb_p (insn, succ) |
| || in_fallthru_bb_p (insn, succ)); |
| |
| if (sched_verbose >= 1) |
| sel_print ("Fence %d continues as %d[%d] (state %s)\n", |
| INSN_UID (insn), INSN_UID (succ), |
| BLOCK_NUM (succ), b ? "continue" : "reset"); |
| |
| if (b) |
| add_dirty_fence_to_fences (new_fences, succ, fence); |
| else |
| { |
| /* Mark block of the SUCC as head of the new ebb. */ |
| bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ)); |
| add_clean_fence_to_fences (new_fences, succ, fence); |
| } |
| } |
| } |
| } |
| |
| |
| /* Functions to support substitution. */ |
| |
| /* Returns whether INSN with dependence status DS is eligible for |
| substitution, i.e. it's a copy operation x := y, and RHS that is |
| moved up through this insn should be substituted. */ |
| static bool |
| can_substitute_through_p (insn_t insn, ds_t ds) |
| { |
| /* We can substitute only true dependencies. */ |
| if ((ds & DEP_OUTPUT) |
| || (ds & DEP_ANTI) |
| || ! INSN_RHS (insn) |
| || ! INSN_LHS (insn)) |
| return false; |
| |
| /* Now we just need to make sure the INSN_RHS consists of only one |
| simple REG rtx. */ |
| if (REG_P (INSN_LHS (insn)) |
| && REG_P (INSN_RHS (insn))) |
| return true; |
| return false; |
| } |
| |
| /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's |
| source (if INSN is eligible for substitution). Returns TRUE if |
| substitution was actually performed, FALSE otherwise. Substitution might |
| be not performed because it's either EXPR' vinsn doesn't contain INSN's |
| destination or the resulting insn is invalid for the target machine. |
| When UNDO is true, perform unsubstitution instead (the difference is in |
| the part of rtx on which validate_replace_rtx is called). */ |
| static bool |
| substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo) |
| { |
| rtx *where; |
| bool new_insn_valid; |
| vinsn_t *vi = &EXPR_VINSN (expr); |
| bool has_rhs = VINSN_RHS (*vi) != NULL; |
| rtx old, new_rtx; |
| |
| /* Do not try to replace in SET_DEST. Although we'll choose new |
| register for the RHS, we don't want to change RHS' original reg. |
| If the insn is not SET, we may still be able to substitute something |
| in it, and if we're here (don't have deps), it doesn't write INSN's |
| dest. */ |
| where = (has_rhs |
| ? &VINSN_RHS (*vi) |
| : &PATTERN (VINSN_INSN_RTX (*vi))); |
| old = undo ? INSN_RHS (insn) : INSN_LHS (insn); |
| |
| /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI. */ |
| if (rtx_ok_for_substitution_p (old, *where)) |
| { |
| rtx_insn *new_insn; |
| rtx *where_replace; |
| |
| /* We should copy these rtxes before substitution. */ |
| new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn)); |
| new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi)); |
| |
| /* Where we'll replace. |
| WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be |
| used instead of SET_SRC. */ |
| where_replace = (has_rhs |
| ? &SET_SRC (PATTERN (new_insn)) |
| : &PATTERN (new_insn)); |
| |
| new_insn_valid |
| = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace, |
| new_insn); |
| |
| /* ??? Actually, constrain_operands result depends upon choice of |
| destination register. E.g. if we allow single register to be an rhs, |
| and if we try to move dx=ax(as rhs) through ax=dx, we'll result |
| in invalid insn dx=dx, so we'll loose this rhs here. |
| Just can't come up with significant testcase for this, so just |
| leaving it for now. */ |
| if (new_insn_valid) |
| { |
| change_vinsn_in_expr (expr, |
| create_vinsn_from_insn_rtx (new_insn, false)); |
| |
| /* Do not allow clobbering the address register of speculative |
| insns. */ |
| if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE) |
| && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
| expr_dest_reg (expr))) |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| |
| return true; |
| } |
| else |
| return false; |
| } |
| else |
| return false; |
| } |
| |
| /* Return the number of places WHAT appears within WHERE. |
| Bail out when we found a reference occupying several hard registers. */ |
| static int |
| count_occurrences_equiv (const_rtx what, const_rtx where) |
| { |
| int count = 0; |
| subrtx_iterator::array_type array; |
| FOR_EACH_SUBRTX (iter, array, where, NONCONST) |
| { |
| const_rtx x = *iter; |
| if (REG_P (x) && REGNO (x) == REGNO (what)) |
| { |
| /* Bail out if mode is different or more than one register is |
| used. */ |
| if (GET_MODE (x) != GET_MODE (what) |
| || (HARD_REGISTER_P (x) |
| && hard_regno_nregs[REGNO (x)][GET_MODE (x)] > 1)) |
| return 0; |
| count += 1; |
| } |
| else if (GET_CODE (x) == SUBREG |
| && (!REG_P (SUBREG_REG (x)) |
| || REGNO (SUBREG_REG (x)) == REGNO (what))) |
| /* ??? Do not support substituting regs inside subregs. In that case, |
| simplify_subreg will be called by validate_replace_rtx, and |
| unsubstitution will fail later. */ |
| return 0; |
| } |
| return count; |
| } |
| |
| /* Returns TRUE if WHAT is found in WHERE rtx tree. */ |
| static bool |
| rtx_ok_for_substitution_p (rtx what, rtx where) |
| { |
| return (count_occurrences_equiv (what, where) > 0); |
| } |
| |
| |
| /* Functions to support register renaming. */ |
| |
| /* Substitute VI's set source with REGNO. Returns newly created pattern |
| that has REGNO as its source. */ |
| static rtx_insn * |
| create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx) |
| { |
| rtx lhs_rtx; |
| rtx pattern; |
| rtx_insn *insn_rtx; |
| |
| lhs_rtx = copy_rtx (VINSN_LHS (vi)); |
| |
| pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx); |
| insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX); |
| |
| return insn_rtx; |
| } |
| |
| /* Returns whether INSN's src can be replaced with register number |
| NEW_SRC_REG. E.g. the following insn is valid for i386: |
| |
| (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337 |
| (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp) |
| (reg:SI 0 ax [orig:770 c1 ] [770])) |
| (const_int 288 [0x120])) [0 str S1 A8]) |
| (const_int 0 [0x0])) 43 {*movqi_1} (nil) |
| (nil)) |
| |
| But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid |
| because of operand constraints: |
| |
| (define_insn "*movqi_1" |
| [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m") |
| (match_operand:QI 1 "general_operand" " q,qn,qm,q,rn,qm,qn") |
| )] |
| |
| So do constrain_operands here, before choosing NEW_SRC_REG as best |
| reg for rhs. */ |
| |
| static bool |
| replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg) |
| { |
| vinsn_t vi = INSN_VINSN (insn); |
| machine_mode mode; |
| rtx dst_loc; |
| bool res; |
| |
| gcc_assert (VINSN_SEPARABLE_P (vi)); |
| |
| get_dest_and_mode (insn, &dst_loc, &mode); |
| gcc_assert (mode == GET_MODE (new_src_reg)); |
| |
| if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc)) |
| return true; |
| |
| /* See whether SET_SRC can be replaced with this register. */ |
| validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1); |
| res = verify_changes (0); |
| cancel_changes (0); |
| |
| return res; |
| } |
| |
| /* Returns whether INSN still be valid after replacing it's DEST with |
| register NEW_REG. */ |
| static bool |
| replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg) |
| { |
| vinsn_t vi = INSN_VINSN (insn); |
| bool res; |
| |
| /* We should deal here only with separable insns. */ |
| gcc_assert (VINSN_SEPARABLE_P (vi)); |
| gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg)); |
| |
| /* See whether SET_DEST can be replaced with this register. */ |
| validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1); |
| res = verify_changes (0); |
| cancel_changes (0); |
| |
| return res; |
| } |
| |
| /* Create a pattern with rhs of VI and lhs of LHS_RTX. */ |
| static rtx_insn * |
| create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx) |
| { |
| rtx rhs_rtx; |
| rtx pattern; |
| rtx_insn *insn_rtx; |
| |
| rhs_rtx = copy_rtx (VINSN_RHS (vi)); |
| |
| pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx); |
| insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX); |
| |
| return insn_rtx; |
| } |
| |
| /* Substitute lhs in the given expression EXPR for the register with number |
| NEW_REGNO. SET_DEST may be arbitrary rtx, not only register. */ |
| static void |
| replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg) |
| { |
| rtx_insn *insn_rtx; |
| vinsn_t vinsn; |
| |
| insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg); |
| vinsn = create_vinsn_from_insn_rtx (insn_rtx, false); |
| |
| change_vinsn_in_expr (expr, vinsn); |
| EXPR_WAS_RENAMED (expr) = 1; |
| EXPR_TARGET_AVAILABLE (expr) = 1; |
| } |
| |
| /* Returns whether VI writes either one of the USED_REGS registers or, |
| if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers. */ |
| static bool |
| vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs, |
| HARD_REG_SET unavailable_hard_regs) |
| { |
| unsigned regno; |
| reg_set_iterator rsi; |
| |
| EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi) |
| { |
| if (REGNO_REG_SET_P (used_regs, regno)) |
| return true; |
| if (HARD_REGISTER_NUM_P (regno) |
| && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) |
| return true; |
| } |
| |
| EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi) |
| { |
| if (REGNO_REG_SET_P (used_regs, regno)) |
| return true; |
| if (HARD_REGISTER_NUM_P (regno) |
| && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Returns register class of the output register in INSN. |
| Returns NO_REGS for call insns because some targets have constraints on |
| destination register of a call insn. |
| |
| Code adopted from regrename.c::build_def_use. */ |
| static enum reg_class |
| get_reg_class (rtx_insn *insn) |
| { |
| int i, n_ops; |
| |
| extract_constrain_insn (insn); |
| preprocess_constraints (insn); |
| n_ops = recog_data.n_operands; |
| |
| const operand_alternative *op_alt = which_op_alt (); |
| if (asm_noperands (PATTERN (insn)) > 0) |
| { |
| for (i = 0; i < n_ops; i++) |
| if (recog_data.operand_type[i] == OP_OUT) |
| { |
| rtx *loc = recog_data.operand_loc[i]; |
| rtx op = *loc; |
| enum reg_class cl = alternative_class (op_alt, i); |
| |
| if (REG_P (op) |
| && REGNO (op) == ORIGINAL_REGNO (op)) |
| continue; |
| |
| return cl; |
| } |
| } |
| else if (!CALL_P (insn)) |
| { |
| for (i = 0; i < n_ops + recog_data.n_dups; i++) |
| { |
| int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops]; |
| enum reg_class cl = alternative_class (op_alt, opn); |
| |
| if (recog_data.operand_type[opn] == OP_OUT || |
| recog_data.operand_type[opn] == OP_INOUT) |
| return cl; |
| } |
| } |
| |
| /* Insns like |
| (insn (set (reg:CCZ 17 flags) (compare:CCZ ...))) |
| may result in returning NO_REGS, cause flags is written implicitly through |
| CMP insn, which has no OP_OUT | OP_INOUT operands. */ |
| return NO_REGS; |
| } |
| |
| #ifdef HARD_REGNO_RENAME_OK |
| /* Calculate HARD_REGNO_RENAME_OK data for REGNO. */ |
| static void |
| init_hard_regno_rename (int regno) |
| { |
| int cur_reg; |
| |
| SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno); |
| |
| for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) |
| { |
| /* We are not interested in renaming in other regs. */ |
| if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg)) |
| continue; |
| |
| if (HARD_REGNO_RENAME_OK (regno, cur_reg)) |
| SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg); |
| } |
| } |
| #endif |
| |
| /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs |
| data first. */ |
| static inline bool |
| sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED) |
| { |
| #ifdef HARD_REGNO_RENAME_OK |
| /* Check whether this is all calculated. */ |
| if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from)) |
| return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); |
| |
| init_hard_regno_rename (from); |
| |
| return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); |
| #else |
| return true; |
| #endif |
| } |
| |
| /* Calculate set of registers that are capable of holding MODE. */ |
| static void |
| init_regs_for_mode (machine_mode mode) |
| { |
| int cur_reg; |
| |
| CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]); |
| CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]); |
| |
| for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) |
| { |
| int nregs; |
| int i; |
| |
| /* See whether it accepts all modes that occur in |
| original insns. */ |
| if (! HARD_REGNO_MODE_OK (cur_reg, mode)) |
| continue; |
| |
| nregs = hard_regno_nregs[cur_reg][mode]; |
| |
| for (i = nregs - 1; i >= 0; --i) |
| if (fixed_regs[cur_reg + i] |
| || global_regs[cur_reg + i] |
| /* Can't use regs which aren't saved by |
| the prologue. */ |
| || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i) |
| /* Can't use regs with non-null REG_BASE_VALUE, because adjusting |
| it affects aliasing globally and invalidates all AV sets. */ |
| || get_reg_base_value (cur_reg + i) |
| #ifdef LEAF_REGISTERS |
| /* We can't use a non-leaf register if we're in a |
| leaf function. */ |
| || (crtl->is_leaf |
| && !LEAF_REGISTERS[cur_reg + i]) |
| #endif |
| ) |
| break; |
| |
| if (i >= 0) |
| continue; |
| |
| if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode)) |
| SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode], |
| cur_reg); |
| |
| /* If the CUR_REG passed all the checks above, |
| then it's ok. */ |
| SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg); |
| } |
| |
| sel_hrd.regs_for_mode_ok[mode] = true; |
| } |
| |
| /* Init all register sets gathered in HRD. */ |
| static void |
| init_hard_regs_data (void) |
| { |
| int cur_reg = 0; |
| int cur_mode = 0; |
| |
| CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used); |
| for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) |
| if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg]) |
| SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg); |
| |
| /* Initialize registers that are valid based on mode when this is |
| really needed. */ |
| for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++) |
| sel_hrd.regs_for_mode_ok[cur_mode] = false; |
| |
| /* Mark that all HARD_REGNO_RENAME_OK is not calculated. */ |
| for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) |
| CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]); |
| |
| #ifdef STACK_REGS |
| CLEAR_HARD_REG_SET (sel_hrd.stack_regs); |
| |
| for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++) |
| SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg); |
| #endif |
| } |
| |
| /* Mark hardware regs in REG_RENAME_P that are not suitable |
| for renaming rhs in INSN due to hardware restrictions (register class, |
| modes compatibility etc). This doesn't affect original insn's dest reg, |
| if it isn't in USED_REGS. DEF is a definition insn of rhs for which the |
| destination register is sought. LHS (DEF->ORIG_INSN) may be REG or MEM. |
| Registers that are in used_regs are always marked in |
| unavailable_hard_regs as well. */ |
| |
| static void |
| mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p, |
| regset used_regs ATTRIBUTE_UNUSED) |
| { |
| machine_mode mode; |
| enum reg_class cl = NO_REGS; |
| rtx orig_dest; |
| unsigned cur_reg, regno; |
| hard_reg_set_iterator hrsi; |
| |
| gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET); |
| gcc_assert (reg_rename_p); |
| |
| orig_dest = SET_DEST (PATTERN (def->orig_insn)); |
| |
| /* We have decided not to rename 'mem = something;' insns, as 'something' |
| is usually a register. */ |
| if (!REG_P (orig_dest)) |
| return; |
| |
| regno = REGNO (orig_dest); |
| |
| /* If before reload, don't try to work with pseudos. */ |
| if (!reload_completed && !HARD_REGISTER_NUM_P (regno)) |
| return; |
| |
| if (reload_completed) |
| cl = get_reg_class (def->orig_insn); |
| |
| /* Stop if the original register is one of the fixed_regs, global_regs or |
| frame pointer, or we could not discover its class. */ |
| if (fixed_regs[regno] |
| || global_regs[regno] |
| #if !HARD_FRAME_POINTER_IS_FRAME_POINTER |
| || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM) |
| #else |
| || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM) |
| #endif |
| || (reload_completed && cl == NO_REGS)) |
| { |
| SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs); |
| |
| /* Give a chance for original register, if it isn't in used_regs. */ |
| if (!def->crosses_call) |
| CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno); |
| |
| return; |
| } |
| |
| /* If something allocated on stack in this function, mark frame pointer |
| register unavailable, considering also modes. |
| FIXME: it is enough to do this once per all original defs. */ |
| if (frame_pointer_needed) |
| { |
| add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, |
| Pmode, FRAME_POINTER_REGNUM); |
| |
| if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) |
| add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, |
| Pmode, HARD_FRAME_POINTER_REGNUM); |
| } |
| |
| #ifdef STACK_REGS |
| /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS |
| is equivalent to as if all stack regs were in this set. |
| I.e. no stack register can be renamed, and even if it's an original |
| register here we make sure it won't be lifted over it's previous def |
| (it's previous def will appear as if it's a FIRST_STACK_REG def. |
| The HARD_REGNO_RENAME_OK covers other cases in condition below. */ |
| if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG) |
| && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG)) |
| IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs, |
| sel_hrd.stack_regs); |
| #endif |
| |
| /* If there's a call on this path, make regs from call_used_reg_set |
| unavailable. */ |
| if (def->crosses_call) |
| IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs, |
| call_used_reg_set); |
| |
| /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call, |
| but not register classes. */ |
| if (!reload_completed) |
| return; |
| |
| /* Leave regs as 'available' only from the current |
| register class. */ |
| COPY_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| reg_class_contents[cl]); |
| |
| mode = GET_MODE (orig_dest); |
| |
| /* Leave only registers available for this mode. */ |
| if (!sel_hrd.regs_for_mode_ok[mode]) |
| init_regs_for_mode (mode); |
| AND_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| sel_hrd.regs_for_mode[mode]); |
| |
| /* Exclude registers that are partially call clobbered. */ |
| if (def->crosses_call |
| && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)) |
| AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| sel_hrd.regs_for_call_clobbered[mode]); |
| |
| /* Leave only those that are ok to rename. */ |
| EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| 0, cur_reg, hrsi) |
| { |
| int nregs; |
| int i; |
| |
| nregs = hard_regno_nregs[cur_reg][mode]; |
| gcc_assert (nregs > 0); |
| |
| for (i = nregs - 1; i >= 0; --i) |
| if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i)) |
| break; |
| |
| if (i >= 0) |
| CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming, |
| cur_reg); |
| } |
| |
| AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| reg_rename_p->unavailable_hard_regs); |
| |
| /* Regno is always ok from the renaming part of view, but it really |
| could be in *unavailable_hard_regs already, so set it here instead |
| of there. */ |
| SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno); |
| } |
| |
| /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the |
| best register more recently than REG2. */ |
| static int reg_rename_tick[FIRST_PSEUDO_REGISTER]; |
| |
| /* Indicates the number of times renaming happened before the current one. */ |
| static int reg_rename_this_tick; |
| |
| /* Choose the register among free, that is suitable for storing |
| the rhs value. |
| |
| ORIGINAL_INSNS is the list of insns where the operation (rhs) |
| originally appears. There could be multiple original operations |
| for single rhs since we moving it up and merging along different |
| paths. |
| |
| Some code is adapted from regrename.c (regrename_optimize). |
| If original register is available, function returns it. |
| Otherwise it performs the checks, so the new register should |
| comply with the following: |
| - it should not violate any live ranges (such registers are in |
| REG_RENAME_P->available_for_renaming set); |
| - it should not be in the HARD_REGS_USED regset; |
| - it should be in the class compatible with original uses; |
| - it should not be clobbered through reference with different mode; |
| - if we're in the leaf function, then the new register should |
| not be in the LEAF_REGISTERS; |
| - etc. |
| |
| If several registers meet the conditions, the register with smallest |
| tick is returned to achieve more even register allocation. |
| |
| If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true. |
| |
| If no register satisfies the above conditions, NULL_RTX is returned. */ |
| static rtx |
| choose_best_reg_1 (HARD_REG_SET hard_regs_used, |
| struct reg_rename *reg_rename_p, |
| def_list_t original_insns, bool *is_orig_reg_p_ptr) |
| { |
| int best_new_reg; |
| unsigned cur_reg; |
| machine_mode mode = VOIDmode; |
| unsigned regno, i, n; |
| hard_reg_set_iterator hrsi; |
| def_list_iterator di; |
| def_t def; |
| |
| /* If original register is available, return it. */ |
| *is_orig_reg_p_ptr = true; |
| |
| FOR_EACH_DEF (def, di, original_insns) |
| { |
| rtx orig_dest = SET_DEST (PATTERN (def->orig_insn)); |
| |
| gcc_assert (REG_P (orig_dest)); |
| |
| /* Check that all original operations have the same mode. |
| This is done for the next loop; if we'd return from this |
| loop, we'd check only part of them, but in this case |
| it doesn't matter. */ |
| if (mode == VOIDmode) |
| mode = GET_MODE (orig_dest); |
| gcc_assert (mode == GET_MODE (orig_dest)); |
| |
| regno = REGNO (orig_dest); |
| for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++) |
| if (TEST_HARD_REG_BIT (hard_regs_used, regno + i)) |
| break; |
| |
| /* All hard registers are available. */ |
| if (i == n) |
| { |
| gcc_assert (mode != VOIDmode); |
| |
| /* Hard registers should not be shared. */ |
| return gen_rtx_REG (mode, regno); |
| } |
| } |
| |
| *is_orig_reg_p_ptr = false; |
| best_new_reg = -1; |
| |
| /* Among all available regs choose the register that was |
| allocated earliest. */ |
| EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming, |
| 0, cur_reg, hrsi) |
| if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg)) |
| { |
| /* Check that all hard regs for mode are available. */ |
| for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++) |
| if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i) |
| || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming, |
| cur_reg + i)) |
| break; |
| |
| if (i < n) |
| continue; |
| |
| /* All hard registers are available. */ |
| if (best_new_reg < 0 |
| || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg]) |
| { |
| best_new_reg = cur_reg; |
| |
| /* Return immediately when we know there's no better reg. */ |
| if (! reg_rename_tick[best_new_reg]) |
| break; |
| } |
| } |
| |
| if (best_new_reg >= 0) |
| { |
| /* Use the check from the above loop. */ |
| gcc_assert (mode != VOIDmode); |
| return gen_rtx_REG (mode, best_new_reg); |
| } |
| |
| return NULL_RTX; |
| } |
| |
| /* A wrapper around choose_best_reg_1 () to verify that we make correct |
| assumptions about available registers in the function. */ |
| static rtx |
| choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p, |
| def_list_t original_insns, bool *is_orig_reg_p_ptr) |
| { |
| rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p, |
| original_insns, is_orig_reg_p_ptr); |
| |
| /* FIXME loop over hard_regno_nregs here. */ |
| gcc_assert (best_reg == NULL_RTX |
| || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg))); |
| |
| return best_reg; |
| } |
| |
| /* Choose the pseudo register for storing rhs value. As this is supposed |
| to work before reload, we return either the original register or make |
| the new one. The parameters are the same that in choose_nest_reg_1 |
| functions, except that USED_REGS may contain pseudos. |
| If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS. |
| |
| TODO: take into account register pressure while doing this. Up to this |
| moment, this function would never return NULL for pseudos, but we should |
| not rely on this. */ |
| static rtx |
| choose_best_pseudo_reg (regset used_regs, |
| struct reg_rename *reg_rename_p, |
| def_list_t original_insns, bool *is_orig_reg_p_ptr) |
| { |
| def_list_iterator i; |
| def_t def; |
| machine_mode mode = VOIDmode; |
| bool bad_hard_regs = false; |
| |
| /* We should not use this after reload. */ |
| gcc_assert (!reload_completed); |
| |
| /* If original register is available, return it. */ |
| *is_orig_reg_p_ptr = true; |
| |
| FOR_EACH_DEF (def, i, original_insns) |
| { |
| rtx dest = SET_DEST (PATTERN (def->orig_insn)); |
| int orig_regno; |
| |
| gcc_assert (REG_P (dest)); |
| |
| /* Check that all original operations have the same mode. */ |
| if (mode == VOIDmode) |
| mode = GET_MODE (dest); |
| else |
| gcc_assert (mode == GET_MODE (dest)); |
| orig_regno = REGNO (dest); |
| |
| /* Check that nothing in used_regs intersects with orig_regno. When |
| we have a hard reg here, still loop over hard_regno_nregs. */ |
| if (HARD_REGISTER_NUM_P (orig_regno)) |
| { |
| int j, n; |
| for (j = 0, n = hard_regno_nregs[orig_regno][mode]; j < n; j++) |
| if (REGNO_REG_SET_P (used_regs, orig_regno + j)) |
| break; |
| if (j < n) |
| continue; |
| } |
| else |
| { |
| if (REGNO_REG_SET_P (used_regs, orig_regno)) |
| continue; |
| } |
| if (HARD_REGISTER_NUM_P (orig_regno)) |
| { |
| gcc_assert (df_regs_ever_live_p (orig_regno)); |
| |
| /* For hard registers, we have to check hardware imposed |
| limitations (frame/stack registers, calls crossed). */ |
| if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, |
| orig_regno)) |
| { |
| /* Don't let register cross a call if it doesn't already |
| cross one. This condition is written in accordance with |
| that in sched-deps.c sched_analyze_reg(). */ |
| if (!reg_rename_p->crosses_call |
| || REG_N_CALLS_CROSSED (orig_regno) > 0) |
| return gen_rtx_REG (mode, orig_regno); |
| } |
| |
| bad_hard_regs = true; |
| } |
| else |
| return dest; |
| } |
| |
| *is_orig_reg_p_ptr = false; |
| |
| /* We had some original hard registers that couldn't be used. |
| Those were likely special. Don't try to create a pseudo. */ |
| if (bad_hard_regs) |
| return NULL_RTX; |
| |
| /* We haven't found a register from original operations. Get a new one. |
| FIXME: control register pressure somehow. */ |
| { |
| rtx new_reg = gen_reg_rtx (mode); |
| |
| gcc_assert (mode != VOIDmode); |
| |
| max_regno = max_reg_num (); |
| maybe_extend_reg_info_p (); |
| REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0; |
| |
| return new_reg; |
| } |
| } |
| |
| /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE, |
| USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS. */ |
| static void |
| verify_target_availability (expr_t expr, regset used_regs, |
| struct reg_rename *reg_rename_p) |
| { |
| unsigned n, i, regno; |
| machine_mode mode; |
| bool target_available, live_available, hard_available; |
| |
| if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0) |
| return; |
| |
| regno = expr_dest_regno (expr); |
| mode = GET_MODE (EXPR_LHS (expr)); |
| target_available = EXPR_TARGET_AVAILABLE (expr) == 1; |
| n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1; |
| |
| live_available = hard_available = true; |
| for (i = 0; i < n; i++) |
| { |
| if (bitmap_bit_p (used_regs, regno + i)) |
| live_available = false; |
| if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i)) |
| hard_available = false; |
| } |
| |
| /* When target is not available, it may be due to hard register |
| restrictions, e.g. crosses calls, so we check hard_available too. */ |
| if (target_available) |
| gcc_assert (live_available); |
| else |
| /* Check only if we haven't scheduled something on the previous fence, |
| cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues |
| and having more than one fence, we may end having targ_un in a block |
| in which successors target register is actually available. |
| |
| The last condition handles the case when a dependence from a call insn |
| was created in sched-deps.c for insns with destination registers that |
| never crossed a call before, but do cross one after our code motion. |
| |
| FIXME: in the latter case, we just uselessly called find_used_regs, |
| because we can't move this expression with any other register |
| as well. */ |
| gcc_assert (scheduled_something_on_previous_fence || !live_available |
| || !hard_available |
| || (!reload_completed && reg_rename_p->crosses_call |
| && REG_N_CALLS_CROSSED (regno) == 0)); |
| } |
| |
| /* Collect unavailable registers due to liveness for EXPR from BNDS |
| into USED_REGS. Save additional information about available |
| registers and unavailable due to hardware restriction registers |
| into REG_RENAME_P structure. Save original insns into ORIGINAL_INSNS |
| list. */ |
| static void |
| collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs, |
| struct reg_rename *reg_rename_p, |
| def_list_t *original_insns) |
| { |
| for (; bnds; bnds = BLIST_NEXT (bnds)) |
| { |
| bool res; |
| av_set_t orig_ops = NULL; |
| bnd_t bnd = BLIST_BND (bnds); |
| |
| /* If the chosen best expr doesn't belong to current boundary, |
| skip it. */ |
| if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr))) |
| continue; |
| |
| /* Put in ORIG_OPS all exprs from this boundary that became |
| RES on top. */ |
| orig_ops = find_sequential_best_exprs (bnd, expr, false); |
| |
| /* Compute used regs and OR it into the USED_REGS. */ |
| res = find_used_regs (BND_TO (bnd), orig_ops, used_regs, |
| reg_rename_p, original_insns); |
| |
| /* FIXME: the assert is true until we'd have several boundaries. */ |
| gcc_assert (res); |
| av_set_clear (&orig_ops); |
| } |
| } |
| |
| /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG. |
| If BEST_REG is valid, replace LHS of EXPR with it. */ |
| static bool |
| try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr) |
| { |
| /* Try whether we'll be able to generate the insn |
| 'dest := best_reg' at the place of the original operation. */ |
| for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns)) |
| { |
| insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn; |
| |
| gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn))); |
| |
| if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn)) |
| && (! replace_src_with_reg_ok_p (orig_insn, best_reg) |
| || ! replace_dest_with_reg_ok_p (orig_insn, best_reg))) |
| return false; |
| } |
| |
| /* Make sure that EXPR has the right destination |
| register. */ |
| if (expr_dest_regno (expr) != REGNO (best_reg)) |
| replace_dest_with_reg_in_expr (expr, best_reg); |
| else |
| EXPR_TARGET_AVAILABLE (expr) = 1; |
| |
| return true; |
| } |
| |
| /* Select and assign best register to EXPR searching from BNDS. |
| Set *IS_ORIG_REG_P to TRUE if original register was selected. |
| Return FALSE if no register can be chosen, which could happen when: |
| * EXPR_SEPARABLE_P is true but we were unable to find suitable register; |
| * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers |
| that are used on the moving path. */ |
| static bool |
| find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p) |
| { |
| static struct reg_rename reg_rename_data; |
| |
| regset used_regs; |
| def_list_t original_insns = NULL; |
| bool reg_ok; |
| |
| *is_orig_reg_p = false; |
| |
| /* Don't bother to do anything if this insn doesn't set any registers. */ |
| if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr))) |
| && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)))) |
| return true; |
| |
| used_regs = get_clear_regset_from_pool (); |
| CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs); |
| |
| collect_unavailable_regs_from_bnds (expr, bnds, used_regs, ®_rename_data, |
| &original_insns); |
| |
| #ifdef ENABLE_CHECKING |
| /* If after reload, make sure we're working with hard regs here. */ |
| if (reload_completed) |
| { |
| reg_set_iterator rsi; |
| unsigned i; |
| |
| EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi) |
| gcc_unreachable (); |
| } |
| #endif |
| |
| if (EXPR_SEPARABLE_P (expr)) |
| { |
| rtx best_reg = NULL_RTX; |
| /* Check that we have computed availability of a target register |
| correctly. */ |
| verify_target_availability (expr, used_regs, ®_rename_data); |
| |
| /* Turn everything in hard regs after reload. */ |
| if (reload_completed) |
| { |
| HARD_REG_SET hard_regs_used; |
| REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs); |
| |
| /* Join hard registers unavailable due to register class |
| restrictions and live range intersection. */ |
| IOR_HARD_REG_SET (hard_regs_used, |
| reg_rename_data.unavailable_hard_regs); |
| |
| best_reg = choose_best_reg (hard_regs_used, ®_rename_data, |
| original_insns, is_orig_reg_p); |
| } |
| else |
| best_reg = choose_best_pseudo_reg (used_regs, ®_rename_data, |
| original_insns, is_orig_reg_p); |
| |
| if (!best_reg) |
| reg_ok = false; |
| else if (*is_orig_reg_p) |
| { |
| /* In case of unification BEST_REG may be different from EXPR's LHS |
| when EXPR's LHS is unavailable, and there is another LHS among |
| ORIGINAL_INSNS. */ |
| reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); |
| } |
| else |
| { |
| /* Forbid renaming of low-cost insns. */ |
| if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2) |
| reg_ok = false; |
| else |
| reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); |
| } |
| } |
| else |
| { |
| /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set |
| any of the HARD_REGS_USED set. */ |
| if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs, |
| reg_rename_data.unavailable_hard_regs)) |
| { |
| reg_ok = false; |
| gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0); |
| } |
| else |
| { |
| reg_ok = true; |
| gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0); |
| } |
| } |
| |
| ilist_clear (&original_insns); |
| return_regset_to_pool (used_regs); |
| |
| return reg_ok; |
| } |
| |
| |
| /* Return true if dependence described by DS can be overcomed. */ |
| static bool |
| can_speculate_dep_p (ds_t ds) |
| { |
| if (spec_info == NULL) |
| return false; |
| |
| /* Leave only speculative data. */ |
| ds &= SPECULATIVE; |
| |
| if (ds == 0) |
| return false; |
| |
| { |
| /* FIXME: make sched-deps.c produce only those non-hard dependencies, |
| that we can overcome. */ |
| ds_t spec_mask = spec_info->mask; |
| |
| if ((ds & spec_mask) != ds) |
| return false; |
| } |
| |
| if (ds_weak (ds) < spec_info->data_weakness_cutoff) |
| return false; |
| |
| return true; |
| } |
| |
| /* Get a speculation check instruction. |
| C_EXPR is a speculative expression, |
| CHECK_DS describes speculations that should be checked, |
| ORIG_INSN is the original non-speculative insn in the stream. */ |
| static insn_t |
| create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn) |
| { |
| rtx check_pattern; |
| rtx_insn *insn_rtx; |
| insn_t insn; |
| basic_block recovery_block; |
| rtx_insn *label; |
| |
| /* Create a recovery block if target is going to emit branchy check, or if |
| ORIG_INSN was speculative already. */ |
| if (targetm.sched.needs_block_p (check_ds) |
| || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0) |
| { |
| recovery_block = sel_create_recovery_block (orig_insn); |
| label = BB_HEAD (recovery_block); |
| } |
| else |
| { |
| recovery_block = NULL; |
| label = NULL; |
| } |
| |
| /* Get pattern of the check. */ |
| check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label, |
| check_ds); |
| |
| gcc_assert (check_pattern != NULL); |
| |
| /* Emit check. */ |
| insn_rtx = create_insn_rtx_from_pattern (check_pattern, label); |
| |
| insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn), |
| INSN_SEQNO (orig_insn), orig_insn); |
| |
| /* Make check to be non-speculative. */ |
| EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0; |
| INSN_SPEC_CHECKED_DS (insn) = check_ds; |
| |
| /* Decrease priority of check by difference of load/check instruction |
| latencies. */ |
| EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn)) |
| - sel_vinsn_cost (INSN_VINSN (insn))); |
| |
| /* Emit copy of original insn (though with replaced target register, |
| if needed) to the recovery block. */ |
| if (recovery_block != NULL) |
| { |
| rtx twin_rtx; |
| |
| twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr))); |
| twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX); |
| sel_gen_recovery_insn_from_rtx_after (twin_rtx, |
| INSN_EXPR (orig_insn), |
| INSN_SEQNO (insn), |
| bb_note (recovery_block)); |
| } |
| |
| /* If we've generated a data speculation check, make sure |
| that all the bookkeeping instruction we'll create during |
| this move_op () will allocate an ALAT entry so that the |
| check won't fail. |
| In case of control speculation we must convert C_EXPR to control |
| speculative mode, because failing to do so will bring us an exception |
| thrown by the non-control-speculative load. */ |
| check_ds = ds_get_max_dep_weak (check_ds); |
| speculate_expr (c_expr, check_ds); |
| |
| return insn; |
| } |
| |
| /* True when INSN is a "regN = regN" copy. */ |
| static bool |
| identical_copy_p (rtx insn) |
| { |
| rtx lhs, rhs, pat; |
| |
| pat = PATTERN (insn); |
| |
| if (GET_CODE (pat) != SET) |
| return false; |
| |
| lhs = SET_DEST (pat); |
| if (!REG_P (lhs)) |
| return false; |
| |
| rhs = SET_SRC (pat); |
| if (!REG_P (rhs)) |
| return false; |
| |
| return REGNO (lhs) == REGNO (rhs); |
| } |
| |
| /* Undo all transformations on *AV_PTR that were done when |
| moving through INSN. */ |
| static void |
| undo_transformations (av_set_t *av_ptr, rtx_insn *insn) |
| { |
| av_set_iterator av_iter; |
| expr_t expr; |
| av_set_t new_set = NULL; |
| |
| /* First, kill any EXPR that uses registers set by an insn. This is |
| required for correctness. */ |
| FOR_EACH_EXPR_1 (expr, av_iter, av_ptr) |
| if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr)) |
| && bitmap_intersect_p (INSN_REG_SETS (insn), |
| VINSN_REG_USES (EXPR_VINSN (expr))) |
| /* When an insn looks like 'r1 = r1', we could substitute through |
| it, but the above condition will still hold. This happened with |
| gcc.c-torture/execute/961125-1.c. */ |
| && !identical_copy_p (insn)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("Expr %d removed due to use/set conflict\n", |
| INSN_UID (EXPR_INSN_RTX (expr))); |
| av_set_iter_remove (&av_iter); |
| } |
| |
| /* Undo transformations looking at the history vector. */ |
| FOR_EACH_EXPR (expr, av_iter, *av_ptr) |
| { |
| int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr), |
| insn, EXPR_VINSN (expr), true); |
| |
| if (index >= 0) |
| { |
| expr_history_def *phist; |
| |
| phist = &EXPR_HISTORY_OF_CHANGES (expr)[index]; |
| |
| switch (phist->type) |
| { |
| case TRANS_SPECULATION: |
| { |
| ds_t old_ds, new_ds; |
| |
| /* Compute the difference between old and new speculative |
| statuses: that's what we need to check. |
| Earlier we used to assert that the status will really |
| change. This no longer works because only the probability |
| bits in the status may have changed during compute_av_set, |
| and in the case of merging different probabilities of the |
| same speculative status along different paths we do not |
| record this in the history vector. */ |
| old_ds = phist->spec_ds; |
| new_ds = EXPR_SPEC_DONE_DS (expr); |
| |
| old_ds &= SPECULATIVE; |
| new_ds &= SPECULATIVE; |
| new_ds &= ~old_ds; |
| |
| EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds; |
| break; |
| } |
| case TRANS_SUBSTITUTION: |
| { |
| expr_def _tmp_expr, *tmp_expr = &_tmp_expr; |
| vinsn_t new_vi; |
| bool add = true; |
| |
| new_vi = phist->old_expr_vinsn; |
| |
| gcc_assert (VINSN_SEPARABLE_P (new_vi) |
| == EXPR_SEPARABLE_P (expr)); |
| copy_expr (tmp_expr, expr); |
| |
| if (vinsn_equal_p (phist->new_expr_vinsn, |
| EXPR_VINSN (tmp_expr))) |
| change_vinsn_in_expr (tmp_expr, new_vi); |
| else |
| /* This happens when we're unsubstituting on a bookkeeping |
| copy, which was in turn substituted. The history is wrong |
| in this case. Do it the hard way. */ |
| add = substitute_reg_in_expr (tmp_expr, insn, true); |
| if (add) |
| av_set_add (&new_set, tmp_expr); |
| clear_expr (tmp_expr); |
| break; |
| } |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| } |
| |
| av_set_union_and_clear (av_ptr, &new_set, NULL); |
| } |
| |
| |
| /* Moveup_* helpers for code motion and computing av sets. */ |
| |
| /* Propagates EXPR inside an insn group through THROUGH_INSN. |
| The difference from the below function is that only substitution is |
| performed. */ |
| static enum MOVEUP_EXPR_CODE |
| moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn) |
| { |
| vinsn_t vi = EXPR_VINSN (expr); |
| ds_t *has_dep_p; |
| ds_t full_ds; |
| |
| /* Do this only inside insn group. */ |
| gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0); |
| |
| full_ds = has_dependence_p (expr, through_insn, &has_dep_p); |
| if (full_ds == 0) |
| return MOVEUP_EXPR_SAME; |
| |
| /* Substitution is the possible choice in this case. */ |
| if (has_dep_p[DEPS_IN_RHS]) |
| { |
| /* Can't substitute UNIQUE VINSNs. */ |
| gcc_assert (!VINSN_UNIQUE_P (vi)); |
| |
| if (can_substitute_through_p (through_insn, |
| has_dep_p[DEPS_IN_RHS]) |
| && substitute_reg_in_expr (expr, through_insn, false)) |
| { |
| EXPR_WAS_SUBSTITUTED (expr) = true; |
| return MOVEUP_EXPR_CHANGED; |
| } |
| |
| /* Don't care about this, as even true dependencies may be allowed |
| in an insn group. */ |
| return MOVEUP_EXPR_SAME; |
| } |
| |
| /* This can catch output dependencies in COND_EXECs. */ |
| if (has_dep_p[DEPS_IN_INSN]) |
| return MOVEUP_EXPR_NULL; |
| |
| /* This is either an output or an anti dependence, which usually have |
| a zero latency. Allow this here, if we'd be wrong, tick_check_p |
| will fix this. */ |
| gcc_assert (has_dep_p[DEPS_IN_LHS]); |
| return MOVEUP_EXPR_AS_RHS; |
| } |
| |
| /* True when a trapping EXPR cannot be moved through THROUGH_INSN. */ |
| #define CANT_MOVE_TRAPPING(expr, through_insn) \ |
| (VINSN_MAY_TRAP_P (EXPR_VINSN (expr)) \ |
| && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \ |
| && !sel_insn_is_speculation_check (through_insn)) |
| |
| /* True when a conflict on a target register was found during moveup_expr. */ |
| static bool was_target_conflict = false; |
| |
| /* Return true when moving a debug INSN across THROUGH_INSN will |
| create a bookkeeping block. We don't want to create such blocks, |
| for they would cause codegen differences between compilations with |
| and without debug info. */ |
| |
| static bool |
| moving_insn_creates_bookkeeping_block_p (insn_t insn, |
| insn_t through_insn) |
| { |
| basic_block bbi, bbt; |
| edge e1, e2; |
| edge_iterator ei1, ei2; |
| |
| if (!bookkeeping_can_be_created_if_moved_through_p (through_insn)) |
| { |
| if (sched_verbose >= 9) |
| sel_print ("no bookkeeping required: "); |
| return FALSE; |
| } |
| |
| bbi = BLOCK_FOR_INSN (insn); |
| |
| if (EDGE_COUNT (bbi->preds) == 1) |
| { |
| if (sched_verbose >= 9) |
| sel_print ("only one pred edge: "); |
| return TRUE; |
| } |
| |
| bbt = BLOCK_FOR_INSN (through_insn); |
| |
| FOR_EACH_EDGE (e1, ei1, bbt->succs) |
| { |
| FOR_EACH_EDGE (e2, ei2, bbi->preds) |
| { |
| if (find_block_for_bookkeeping (e1, e2, TRUE)) |
| { |
| if (sched_verbose >= 9) |
| sel_print ("found existing block: "); |
| return FALSE; |
| } |
| } |
| } |
| |
| if (sched_verbose >= 9) |
| sel_print ("would create bookkeeping block: "); |
| |
| return TRUE; |
| } |
| |
| /* Return true when the conflict with newly created implicit clobbers |
| between EXPR and THROUGH_INSN is found because of renaming. */ |
| static bool |
| implicit_clobber_conflict_p (insn_t through_insn, expr_t expr) |
| { |
| HARD_REG_SET temp; |
| rtx_insn *insn; |
| rtx reg, rhs, pat; |
| hard_reg_set_iterator hrsi; |
| unsigned regno; |
| bool valid; |
| |
| /* Make a new pseudo register. */ |
| reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr))); |
| max_regno = max_reg_num (); |
| maybe_extend_reg_info_p (); |
| |
| /* Validate a change and bail out early. */ |
| insn = EXPR_INSN_RTX (expr); |
| validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true); |
| valid = verify_changes (0); |
| cancel_changes (0); |
| if (!valid) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("implicit clobbers failed validation, "); |
| return true; |
| } |
| |
| /* Make a new insn with it. */ |
| rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr))); |
| pat = gen_rtx_SET (VOIDmode, reg, rhs); |
| start_sequence (); |
| insn = emit_insn (pat); |
| end_sequence (); |
| |
| /* Calculate implicit clobbers. */ |
| extract_insn (insn); |
| preprocess_constraints (insn); |
| alternative_mask prefrred = get_preferred_alternatives (insn); |
| ira_implicitly_set_insn_hard_regs (&temp, prefrred); |
| AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs); |
| |
| /* If any implicit clobber registers intersect with regular ones in |
| through_insn, we have a dependency and thus bail out. */ |
| EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi) |
| { |
| vinsn_t vi = INSN_VINSN (through_insn); |
| if (bitmap_bit_p (VINSN_REG_SETS (vi), regno) |
| || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno) |
| || bitmap_bit_p (VINSN_REG_USES (vi), regno)) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Modifies EXPR so it can be moved through the THROUGH_INSN, |
| performing necessary transformations. Record the type of transformation |
| made in PTRANS_TYPE, when it is not NULL. When INSIDE_INSN_GROUP, |
| permit all dependencies except true ones, and try to remove those |
| too via forward substitution. All cases when a non-eliminable |
| non-zero cost dependency exists inside an insn group will be fixed |
| in tick_check_p instead. */ |
| static enum MOVEUP_EXPR_CODE |
| moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group, |
| enum local_trans_type *ptrans_type) |
| { |
| vinsn_t vi = EXPR_VINSN (expr); |
| insn_t insn = VINSN_INSN_RTX (vi); |
| bool was_changed = false; |
| bool as_rhs = false; |
| ds_t *has_dep_p; |
| ds_t full_ds; |
| |
| /* ??? We use dependencies of non-debug insns on debug insns to |
| indicate that the debug insns need to be reset if the non-debug |
| insn is pulled ahead of it. It's hard to figure out how to |
| introduce such a notion in sel-sched, but it already fails to |
| support debug insns in other ways, so we just go ahead and |
| let the deug insns go corrupt for now. */ |
| if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn)) |
| return MOVEUP_EXPR_SAME; |
| |
| /* When inside_insn_group, delegate to the helper. */ |
| if (inside_insn_group) |
| return moveup_expr_inside_insn_group (expr, through_insn); |
| |
| /* Deal with unique insns and control dependencies. */ |
| if (VINSN_UNIQUE_P (vi)) |
| { |
| /* We can move jumps without side-effects or jumps that are |
| mutually exclusive with instruction THROUGH_INSN (all in cases |
| dependencies allow to do so and jump is not speculative). */ |
| if (control_flow_insn_p (insn)) |
| { |
| basic_block fallthru_bb; |
| |
| /* Do not move checks and do not move jumps through other |
| jumps. */ |
| if (control_flow_insn_p (through_insn) |
| || sel_insn_is_speculation_check (insn)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* Don't move jumps through CFG joins. */ |
| if (bookkeeping_can_be_created_if_moved_through_p (through_insn)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* The jump should have a clear fallthru block, and |
| this block should be in the current region. */ |
| if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL |
| || ! in_current_region_p (fallthru_bb)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* And it should be mutually exclusive with through_insn. */ |
| if (! sched_insns_conditions_mutex_p (insn, through_insn) |
| && ! DEBUG_INSN_P (through_insn)) |
| return MOVEUP_EXPR_NULL; |
| } |
| |
| /* Don't move what we can't move. */ |
| if (EXPR_CANT_MOVE (expr) |
| && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* Don't move SCHED_GROUP instruction through anything. |
| If we don't force this, then it will be possible to start |
| scheduling a sched_group before all its dependencies are |
| resolved. |
| ??? Haifa deals with this issue by delaying the SCHED_GROUP |
| as late as possible through rank_for_schedule. */ |
| if (SCHED_GROUP_P (insn)) |
| return MOVEUP_EXPR_NULL; |
| } |
| else |
| gcc_assert (!control_flow_insn_p (insn)); |
| |
| /* Don't move debug insns if this would require bookkeeping. */ |
| if (DEBUG_INSN_P (insn) |
| && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn) |
| && moving_insn_creates_bookkeeping_block_p (insn, through_insn)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* Deal with data dependencies. */ |
| was_target_conflict = false; |
| full_ds = has_dependence_p (expr, through_insn, &has_dep_p); |
| if (full_ds == 0) |
| { |
| if (!CANT_MOVE_TRAPPING (expr, through_insn)) |
| return MOVEUP_EXPR_SAME; |
| } |
| else |
| { |
| /* We can move UNIQUE insn up only as a whole and unchanged, |
| so it shouldn't have any dependencies. */ |
| if (VINSN_UNIQUE_P (vi)) |
| return MOVEUP_EXPR_NULL; |
| } |
| |
| if (full_ds != 0 && can_speculate_dep_p (full_ds)) |
| { |
| int res; |
| |
| res = speculate_expr (expr, full_ds); |
| if (res >= 0) |
| { |
| /* Speculation was successful. */ |
| full_ds = 0; |
| was_changed = (res > 0); |
| if (res == 2) |
| was_target_conflict = true; |
| if (ptrans_type) |
| *ptrans_type = TRANS_SPECULATION; |
| sel_clear_has_dependence (); |
| } |
| } |
| |
| if (has_dep_p[DEPS_IN_INSN]) |
| /* We have some dependency that cannot be discarded. */ |
| return MOVEUP_EXPR_NULL; |
| |
| if (has_dep_p[DEPS_IN_LHS]) |
| { |
| /* Only separable insns can be moved up with the new register. |
| Anyways, we should mark that the original register is |
| unavailable. */ |
| if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr)) |
| return MOVEUP_EXPR_NULL; |
| |
| /* When renaming a hard register to a pseudo before reload, extra |
| dependencies can occur from the implicit clobbers of the insn. |
| Filter out such cases here. */ |
| if (!reload_completed && REG_P (EXPR_LHS (expr)) |
| && HARD_REGISTER_P (EXPR_LHS (expr)) |
| && implicit_clobber_conflict_p (through_insn, expr)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("implicit clobbers conflict detected, "); |
| return MOVEUP_EXPR_NULL; |
| } |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| was_target_conflict = true; |
| as_rhs = true; |
| } |
| |
| /* At this point we have either separable insns, that will be lifted |
| up only as RHSes, or non-separable insns with no dependency in lhs. |
| If dependency is in RHS, then try to perform substitution and move up |
| substituted RHS: |
| |
| Ex. 1: Ex.2 |
| y = x; y = x; |
| z = y*2; y = y*2; |
| |
| In Ex.1 y*2 can be substituted for x*2 and the whole operation can be |
| moved above y=x assignment as z=x*2. |
| |
| In Ex.2 y*2 also can be substituted for x*2, but only the right hand |
| side can be moved because of the output dependency. The operation was |
| cropped to its rhs above. */ |
| if (has_dep_p[DEPS_IN_RHS]) |
| { |
| ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS]; |
| |
| /* Can't substitute UNIQUE VINSNs. */ |
| gcc_assert (!VINSN_UNIQUE_P (vi)); |
| |
| if (can_speculate_dep_p (*rhs_dsp)) |
| { |
| int res; |
| |
| res = speculate_expr (expr, *rhs_dsp); |
| if (res >= 0) |
| { |
| /* Speculation was successful. */ |
| *rhs_dsp = 0; |
| was_changed = (res > 0); |
| if (res == 2) |
| was_target_conflict = true; |
| if (ptrans_type) |
| *ptrans_type = TRANS_SPECULATION; |
| } |
| else |
| return MOVEUP_EXPR_NULL; |
| } |
| else if (can_substitute_through_p (through_insn, |
| *rhs_dsp) |
| && substitute_reg_in_expr (expr, through_insn, false)) |
| { |
| /* ??? We cannot perform substitution AND speculation on the same |
| insn. */ |
| gcc_assert (!was_changed); |
| was_changed = true; |
| if (ptrans_type) |
| *ptrans_type = TRANS_SUBSTITUTION; |
| EXPR_WAS_SUBSTITUTED (expr) = true; |
| } |
| else |
| return MOVEUP_EXPR_NULL; |
| } |
| |
| /* Don't move trapping insns through jumps. |
| This check should be at the end to give a chance to control speculation |
| to perform its duties. */ |
| if (CANT_MOVE_TRAPPING (expr, through_insn)) |
| return MOVEUP_EXPR_NULL; |
| |
| return (was_changed |
| ? MOVEUP_EXPR_CHANGED |
| : (as_rhs |
| ? MOVEUP_EXPR_AS_RHS |
| : MOVEUP_EXPR_SAME)); |
| } |
| |
| /* Try to look at bitmap caches for EXPR and INSN pair, return true |
| if successful. When INSIDE_INSN_GROUP, also try ignore dependencies |
| that can exist within a parallel group. Write to RES the resulting |
| code for moveup_expr. */ |
| static bool |
| try_bitmap_cache (expr_t expr, insn_t insn, |
| bool inside_insn_group, |
| enum MOVEUP_EXPR_CODE *res) |
| { |
| int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)); |
| |
| /* First check whether we've analyzed this situation already. */ |
| if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid)) |
| { |
| if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("removed (cached)\n"); |
| *res = MOVEUP_EXPR_NULL; |
| return true; |
| } |
| else |
| { |
| if (sched_verbose >= 6) |
| sel_print ("unchanged (cached)\n"); |
| *res = MOVEUP_EXPR_SAME; |
| return true; |
| } |
| } |
| else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid)) |
| { |
| if (inside_insn_group) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("unchanged (as RHS, cached, inside insn group)\n"); |
| *res = MOVEUP_EXPR_SAME; |
| return true; |
| |
| } |
| else |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| |
| /* This is the only case when propagation result can change over time, |
| as we can dynamically switch off scheduling as RHS. In this case, |
| just check the flag to reach the correct decision. */ |
| if (enable_schedule_as_rhs_p) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("unchanged (as RHS, cached)\n"); |
| *res = MOVEUP_EXPR_AS_RHS; |
| return true; |
| } |
| else |
| { |
| if (sched_verbose >= 6) |
| sel_print ("removed (cached as RHS, but renaming" |
| " is now disabled)\n"); |
| *res = MOVEUP_EXPR_NULL; |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Try to look at bitmap caches for EXPR and INSN pair, return true |
| if successful. Write to RES the resulting code for moveup_expr. */ |
| static bool |
| try_transformation_cache (expr_t expr, insn_t insn, |
| enum MOVEUP_EXPR_CODE *res) |
| { |
| struct transformed_insns *pti |
| = (struct transformed_insns *) |
| htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn), |
| &EXPR_VINSN (expr), |
| VINSN_HASH_RTX (EXPR_VINSN (expr))); |
| if (pti) |
| { |
| /* This EXPR was already moved through this insn and was |
| changed as a result. Fetch the proper data from |
| the hashtable. */ |
| insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
| INSN_UID (insn), pti->type, |
| pti->vinsn_old, pti->vinsn_new, |
| EXPR_SPEC_DONE_DS (expr)); |
| |
| if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new))) |
| pti->vinsn_new = vinsn_copy (pti->vinsn_new, true); |
| change_vinsn_in_expr (expr, pti->vinsn_new); |
| if (pti->was_target_conflict) |
| EXPR_TARGET_AVAILABLE (expr) = false; |
| if (pti->type == TRANS_SPECULATION) |
| { |
| EXPR_SPEC_DONE_DS (expr) = pti->ds; |
| EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check; |
| } |
| |
| if (sched_verbose >= 6) |
| { |
| sel_print ("changed (cached): "); |
| dump_expr (expr); |
| sel_print ("\n"); |
| } |
| |
| *res = MOVEUP_EXPR_CHANGED; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Update bitmap caches on INSN with result RES of propagating EXPR. */ |
| static void |
| update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group, |
| enum MOVEUP_EXPR_CODE res) |
| { |
| int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)); |
| |
| /* Do not cache result of propagating jumps through an insn group, |
| as it is always true, which is not useful outside the group. */ |
| if (inside_insn_group) |
| return; |
| |
| if (res == MOVEUP_EXPR_NULL) |
| { |
| bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid); |
| bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid); |
| } |
| else if (res == MOVEUP_EXPR_SAME) |
| { |
| bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid); |
| bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid); |
| } |
| else if (res == MOVEUP_EXPR_AS_RHS) |
| { |
| bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid); |
| bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid); |
| } |
| else |
| gcc_unreachable (); |
| } |
| |
| /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN |
| and transformation type TRANS_TYPE. */ |
| static void |
| update_transformation_cache (expr_t expr, insn_t insn, |
| bool inside_insn_group, |
| enum local_trans_type trans_type, |
| vinsn_t expr_old_vinsn) |
| { |
| struct transformed_insns *pti; |
| |
| if (inside_insn_group) |
| return; |
| |
| pti = XNEW (struct transformed_insns); |
| pti->vinsn_old = expr_old_vinsn; |
| pti->vinsn_new = EXPR_VINSN (expr); |
| pti->type = trans_type; |
| pti->was_target_conflict = was_target_conflict; |
| pti->ds = EXPR_SPEC_DONE_DS (expr); |
| pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr); |
| vinsn_attach (pti->vinsn_old); |
| vinsn_attach (pti->vinsn_new); |
| *((struct transformed_insns **) |
| htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn), |
| pti, VINSN_HASH_RTX (expr_old_vinsn), |
| INSERT)) = pti; |
| } |
| |
| /* Same as moveup_expr, but first looks up the result of |
| transformation in caches. */ |
| static enum MOVEUP_EXPR_CODE |
| moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group) |
| { |
| enum MOVEUP_EXPR_CODE res; |
| bool got_answer = false; |
| |
| if (sched_verbose >= 6) |
| { |
| sel_print ("Moving "); |
| dump_expr (expr); |
| sel_print (" through %d: ", INSN_UID (insn)); |
| } |
| |
| if (DEBUG_INSN_P (EXPR_INSN_RTX (expr)) |
| && BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)) |
| && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr))) |
| == EXPR_INSN_RTX (expr))) |
| /* Don't use cached information for debug insns that are heads of |
| basic blocks. */; |
| else if (try_bitmap_cache (expr, insn, inside_insn_group, &res)) |
| /* When inside insn group, we do not want remove stores conflicting |
| with previosly issued loads. */ |
| got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL; |
| else if (try_transformation_cache (expr, insn, &res)) |
| got_answer = true; |
| |
| if (! got_answer) |
| { |
| /* Invoke moveup_expr and record the results. */ |
| vinsn_t expr_old_vinsn = EXPR_VINSN (expr); |
| ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr); |
| int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn)); |
| bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn); |
| enum local_trans_type trans_type = TRANS_SUBSTITUTION; |
| |
| /* ??? Invent something better than this. We can't allow old_vinsn |
| to go, we need it for the history vector. */ |
| vinsn_attach (expr_old_vinsn); |
| |
| res = moveup_expr (expr, insn, inside_insn_group, |
| &trans_type); |
| switch (res) |
| { |
| case MOVEUP_EXPR_NULL: |
| update_bitmap_cache (expr, insn, inside_insn_group, res); |
| if (sched_verbose >= 6) |
| sel_print ("removed\n"); |
| break; |
| |
| case MOVEUP_EXPR_SAME: |
| update_bitmap_cache (expr, insn, inside_insn_group, res); |
| if (sched_verbose >= 6) |
| sel_print ("unchanged\n"); |
| break; |
| |
| case MOVEUP_EXPR_AS_RHS: |
| gcc_assert (!unique_p || inside_insn_group); |
| update_bitmap_cache (expr, insn, inside_insn_group, res); |
| if (sched_verbose >= 6) |
| sel_print ("unchanged (as RHS)\n"); |
| break; |
| |
| case MOVEUP_EXPR_CHANGED: |
| gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid |
| || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds); |
| insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
| INSN_UID (insn), trans_type, |
| expr_old_vinsn, EXPR_VINSN (expr), |
| expr_old_spec_ds); |
| update_transformation_cache (expr, insn, inside_insn_group, |
| trans_type, expr_old_vinsn); |
| if (sched_verbose >= 6) |
| { |
| sel_print ("changed: "); |
| dump_expr (expr); |
| sel_print ("\n"); |
| } |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| vinsn_detach (expr_old_vinsn); |
| } |
| |
| return res; |
| } |
| |
| /* Moves an av set AVP up through INSN, performing necessary |
| transformations. */ |
| static void |
| moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group) |
| { |
| av_set_iterator i; |
| expr_t expr; |
| |
| FOR_EACH_EXPR_1 (expr, i, avp) |
| { |
| |
| switch (moveup_expr_cached (expr, insn, inside_insn_group)) |
| { |
| case MOVEUP_EXPR_SAME: |
| case MOVEUP_EXPR_AS_RHS: |
| break; |
| |
| case MOVEUP_EXPR_NULL: |
| av_set_iter_remove (&i); |
| break; |
| |
| case MOVEUP_EXPR_CHANGED: |
| expr = merge_with_other_exprs (avp, &i, expr); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| } |
| |
| /* Moves AVP set along PATH. */ |
| static void |
| moveup_set_inside_insn_group (av_set_t *avp, ilist_t path) |
| { |
| int last_cycle; |
| |
| if (sched_verbose >= 6) |
| sel_print ("Moving expressions up in the insn group...\n"); |
| if (! path) |
| return; |
| last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path)); |
| while (path |
| && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle) |
| { |
| moveup_set_expr (avp, ILIST_INSN (path), true); |
| path = ILIST_NEXT (path); |
| } |
| } |
| |
| /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW. */ |
| static bool |
| equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw) |
| { |
| expr_def _tmp, *tmp = &_tmp; |
| int last_cycle; |
| bool res = true; |
| |
| copy_expr_onside (tmp, expr); |
| last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0; |
| while (path |
| && res |
| && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle) |
| { |
| res = (moveup_expr_cached (tmp, ILIST_INSN (path), true) |
| != MOVEUP_EXPR_NULL); |
| path = ILIST_NEXT (path); |
| } |
| |
| if (res) |
| { |
| vinsn_t tmp_vinsn = EXPR_VINSN (tmp); |
| vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw); |
| |
| if (tmp_vinsn != expr_vliw_vinsn) |
| res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn); |
| } |
| |
| clear_expr (tmp); |
| return res; |
| } |
| |
| |
| /* Functions that compute av and lv sets. */ |
| |
| /* Returns true if INSN is not a downward continuation of the given path P in |
| the current stage. */ |
| static bool |
| is_ineligible_successor (insn_t insn, ilist_t p) |
| { |
| insn_t prev_insn; |
| |
| /* Check if insn is not deleted. */ |
| if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn) |
| gcc_unreachable (); |
| else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn) |
| gcc_unreachable (); |
| |
| /* If it's the first insn visited, then the successor is ok. */ |
| if (!p) |
| return false; |
| |
| prev_insn = ILIST_INSN (p); |
| |
| if (/* a backward edge. */ |
| INSN_SEQNO (insn) < INSN_SEQNO (prev_insn) |
| /* is already visited. */ |
| || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn) |
| && (ilist_is_in_p (p, insn) |
| /* We can reach another fence here and still seqno of insn |
| would be equal to seqno of prev_insn. This is possible |
| when prev_insn is a previously created bookkeeping copy. |
| In that case it'd get a seqno of insn. Thus, check here |
| whether insn is in current fence too. */ |
| || IN_CURRENT_FENCE_P (insn))) |
| /* Was already scheduled on this round. */ |
| || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn) |
| && IN_CURRENT_FENCE_P (insn)) |
| /* An insn from another fence could also be |
| scheduled earlier even if this insn is not in |
| a fence list right now. Check INSN_SCHED_CYCLE instead. */ |
| || (!pipelining_p |
| && INSN_SCHED_TIMES (insn) > 0)) |
| return true; |
| else |
| return false; |
| } |
| |
| /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work' |
| of handling multiple successors and properly merging its av_sets. P is |
| the current path traversed. WS is the size of lookahead window. |
| Return the av set computed. */ |
| static av_set_t |
| compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws) |
| { |
| struct succs_info *sinfo; |
| av_set_t expr_in_all_succ_branches = NULL; |
| int is; |
| insn_t succ, zero_succ = NULL; |
| av_set_t av1 = NULL; |
| |
| gcc_assert (sel_bb_end_p (insn)); |
| |
| /* Find different kind of successors needed for correct computing of |
| SPEC and TARGET_AVAILABLE attributes. */ |
| sinfo = compute_succs_info (insn, SUCCS_NORMAL); |
| |
| /* Debug output. */ |
| if (sched_verbose >= 6) |
| { |
| sel_print ("successors of bb end (%d): ", INSN_UID (insn)); |
| dump_insn_vector (sinfo->succs_ok); |
| sel_print ("\n"); |
| if (sinfo->succs_ok_n != sinfo->all_succs_n) |
| sel_print ("real successors num: %d\n", sinfo->all_succs_n); |
| } |
| |
| /* Add insn to the tail of current path. */ |
| ilist_add (&p, insn); |
| |
| FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ) |
| { |
| av_set_t succ_set; |
| |
| /* We will edit SUCC_SET and EXPR_SPEC field of its elements. */ |
| succ_set = compute_av_set_inside_bb (succ, p, ws, true); |
| |
| av_set_split_usefulness (succ_set, |
| sinfo->probs_ok[is], |
| sinfo->all_prob); |
| |
| if (sinfo->all_succs_n > 1) |
| { |
| /* Find EXPR'es that came from *all* successors and save them |
| into expr_in_all_succ_branches. This set will be used later |
| for calculating speculation attributes of EXPR'es. */ |
| if (is == 0) |
| { |
| expr_in_all_succ_branches = av_set_copy (succ_set); |
| |
| /* Remember the first successor for later. */ |
| zero_succ = succ; |
| } |
| else |
| { |
| av_set_iterator i; |
| expr_t expr; |
| |
| FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches) |
| if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr))) |
| av_set_iter_remove (&i); |
| } |
| } |
| |
| /* Union the av_sets. Check liveness restrictions on target registers |
| in special case of two successors. */ |
| if (sinfo->succs_ok_n == 2 && is == 1) |
| { |
| basic_block bb0 = BLOCK_FOR_INSN (zero_succ); |
| basic_block bb1 = BLOCK_FOR_INSN (succ); |
| |
| gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1)); |
| av_set_union_and_live (&av1, &succ_set, |
| BB_LV_SET (bb0), |
| BB_LV_SET (bb1), |
| insn); |
| } |
| else |
| av_set_union_and_clear (&av1, &succ_set, insn); |
| } |
| |
| /* Check liveness restrictions via hard way when there are more than |
| two successors. */ |
| if (sinfo->succs_ok_n > 2) |
| FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ) |
| { |
| basic_block succ_bb = BLOCK_FOR_INSN (succ); |
| |
| gcc_assert (BB_LV_SET_VALID_P (succ_bb)); |
| mark_unavailable_targets (av1, BB_AV_SET (succ_bb), |
| BB_LV_SET (succ_bb)); |
| } |
| |
| /* Finally, check liveness restrictions on paths leaving the region. */ |
| if (sinfo->all_succs_n > sinfo->succs_ok_n) |
| FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ) |
| mark_unavailable_targets |
| (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ))); |
| |
| if (sinfo->all_succs_n > 1) |
| { |
| av_set_iterator i; |
| expr_t expr; |
| |
| /* Increase the spec attribute of all EXPR'es that didn't come |
| from all successors. */ |
| FOR_EACH_EXPR (expr, i, av1) |
| if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr))) |
| EXPR_SPEC (expr)++; |
| |
| av_set_clear (&expr_in_all_succ_branches); |
| |
| /* Do not move conditional branches through other |
| conditional branches. So, remove all conditional |
| branches from av_set if current operator is a conditional |
| branch. */ |
| av_set_substract_cond_branches (&av1); |
| } |
| |
| ilist_remove (&p); |
| free_succs_info (sinfo); |
| |
| if (sched_verbose >= 6) |
| { |
| sel_print ("av_succs (%d): ", INSN_UID (insn)); |
| dump_av_set (av1); |
| sel_print ("\n"); |
| } |
| |
| return av1; |
| } |
| |
| /* This function computes av_set for the FIRST_INSN by dragging valid |
| av_set through all basic block insns either from the end of basic block |
| (computed using compute_av_set_at_bb_end) or from the insn on which |
| MAX_WS was exceeded. It uses compute_av_set_at_bb_end to compute av_set |
| below the basic block and handling conditional branches. |
| FIRST_INSN - the basic block head, P - path consisting of the insns |
| traversed on the way to the FIRST_INSN (the path is sparse, only bb heads |
| and bb ends are added to the path), WS - current window size, |
| NEED_COPY_P - true if we'll make a copy of av_set before returning it. */ |
| static av_set_t |
| compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws, |
| bool need_copy_p) |
| { |
| insn_t cur_insn; |
| int end_ws = ws; |
| insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn)); |
| insn_t after_bb_end = NEXT_INSN (bb_end); |
| insn_t last_insn; |
| av_set_t av = NULL; |
| basic_block cur_bb = BLOCK_FOR_INSN (first_insn); |
| |
| /* Return NULL if insn is not on the legitimate downward path. */ |
| if (is_ineligible_successor (first_insn, p)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn)); |
| |
| return NULL; |
| } |
| |
| /* If insn already has valid av(insn) computed, just return it. */ |
| if (AV_SET_VALID_P (first_insn)) |
| { |
| av_set_t av_set; |
| |
| if (sel_bb_head_p (first_insn)) |
| av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn)); |
| else |
| av_set = NULL; |
| |
| if (sched_verbose >= 6) |
| { |
| sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn)); |
| dump_av_set (av_set); |
| sel_print ("\n"); |
| } |
| |
| return need_copy_p ? av_set_copy (av_set) : av_set; |
| } |
| |
| ilist_add (&p, first_insn); |
| |
| /* As the result after this loop have completed, in LAST_INSN we'll |
| have the insn which has valid av_set to start backward computation |
| from: it either will be NULL because on it the window size was exceeded |
| or other valid av_set as returned by compute_av_set for the last insn |
| of the basic block. */ |
| for (last_insn = first_insn; last_insn != after_bb_end; |
| last_insn = NEXT_INSN (last_insn)) |
| { |
| /* We may encounter valid av_set not only on bb_head, but also on |
| those insns on which previously MAX_WS was exceeded. */ |
| if (AV_SET_VALID_P (last_insn)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn)); |
| break; |
| } |
| |
| /* The special case: the last insn of the BB may be an |
| ineligible_successor due to its SEQ_NO that was set on |
| it as a bookkeeping. */ |
| if (last_insn != first_insn |
| && is_ineligible_successor (last_insn, p)) |
| { |
| if (sched_verbose >= 6) |
| sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn)); |
| break; |
| } |
| |
| if (DEBUG_INSN_P (last_insn)) |
| continue; |
| |
| if (end_ws > max_ws) |
| { |
| /* We can reach max lookahead size at bb_header, so clean av_set |
| first. */ |
| INSN_WS_LEVEL (last_insn) = global_level; |
| |
| if (sched_verbose >= 6) |
| sel_print ("Insn %d is beyond the software lookahead window size\n", |
| INSN_UID (last_insn)); |
| break; |
| } |
| |
| end_ws++; |
| } |
| |
| /* Get the valid av_set into AV above the LAST_INSN to start backward |
| computation from. It either will be empty av_set or av_set computed from |
| the successors on the last insn of the current bb. */ |
| if (last_insn != after_bb_end) |
| { |
| av = NULL; |
| |
| /* This is needed only to obtain av_sets that are identical to |
| those computed by the old compute_av_set version. */ |
| if (last_insn == first_insn && !INSN_NOP_P (last_insn)) |
| av_set_add (&av, INSN_EXPR (last_insn)); |
| } |
| else |
| /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END. */ |
| av = compute_av_set_at_bb_end (bb_end, p, end_ws); |
| |
| /* Compute av_set in AV starting from below the LAST_INSN up to |
| location above the FIRST_INSN. */ |
| for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn); |
| cur_insn = PREV_INSN (cur_insn)) |
| if (!INSN_NOP_P (cur_insn)) |
| { |
| expr_t expr; |
| |
| moveup_set_expr (&av, cur_insn, false); |
| |
| /* If the expression for CUR_INSN is already in the set, |
| replace it by the new one. */ |
| expr = av_set_lookup (av, INSN_VINSN (cur_insn)); |
| if (expr != NULL) |
| { |
| clear_expr (expr); |
| copy_expr (expr, INSN_EXPR (cur_insn)); |
| } |
| else |
| av_set_add (&av, INSN_EXPR (cur_insn)); |
| } |
| |
| /* Clear stale bb_av_set. */ |
| if (sel_bb_head_p (first_insn)) |
| { |
| av_set_clear (&BB_AV_SET (cur_bb)); |
| BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av; |
| BB_AV_LEVEL (cur_bb) = global_level; |
| } |
| |
| if (sched_verbose >= 6) |
| { |
| sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn)); |
| dump_av_set (av); |
| sel_print ("\n"); |
| } |
| |
| ilist_remove (&p); |
| return av; |
| } |
| |
| /* Compute av set before INSN. |
| INSN - the current operation (actual rtx INSN) |
| P - the current path, which is list of insns visited so far |
| WS - software lookahead window size. |
| UNIQUE_P - TRUE, if returned av_set will be changed, hence |
| if we want to save computed av_set in s_i_d, we should make a copy of it. |
| |
| In the resulting set we will have only expressions that don't have delay |
| stalls and nonsubstitutable dependences. */ |
| static av_set_t |
| compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p) |
| { |
| return compute_av_set_inside_bb (insn, p, ws, unique_p); |
| } |
| |
| /* Propagate a liveness set LV through INSN. */ |
| static void |
| propagate_lv_set (regset lv, insn_t insn) |
| { |
| gcc_assert (INSN_P (insn)); |
| |
| if (INSN_NOP_P (insn)) |
| return; |
| |
| df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv); |
| } |
| |
| /* Return livness set at the end of BB. */ |
| static regset |
| compute_live_after_bb (basic_block bb) |
| { |
| edge e; |
| edge_iterator ei; |
| regset lv = get_clear_regset_from_pool (); |
| |
| gcc_assert (!ignore_first); |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| if (sel_bb_empty_p (e->dest)) |
| { |
| if (! BB_LV_SET_VALID_P (e->dest)) |
| { |
| gcc_unreachable (); |
| gcc_assert (BB_LV_SET (e->dest) == NULL); |
| BB_LV_SET (e->dest) = compute_live_after_bb (e->dest); |
| BB_LV_SET_VALID_P (e->dest) = true; |
| } |
| IOR_REG_SET (lv, BB_LV_SET (e->dest)); |
| } |
| else |
| IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest))); |
| |
| return lv; |
| } |
| |
| /* Compute the set of all live registers at the point before INSN and save |
| it at INSN if INSN is bb header. */ |
| regset |
| compute_live (insn_t insn) |
| { |
| basic_block bb = BLOCK_FOR_INSN (insn); |
| insn_t final, temp; |
| regset lv; |
| |
| /* Return the valid set if we're already on it. */ |
| if (!ignore_first) |
| { |
| regset src = NULL; |
| |
| if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb)) |
| src = BB_LV_SET (bb); |
| else |
| { |
| gcc_assert (in_current_region_p (bb)); |
| if (INSN_LIVE_VALID_P (insn)) |
| src = INSN_LIVE (insn); |
| } |
| |
| if (src) |
| { |
| lv = get_regset_from_pool (); |
| COPY_REG_SET (lv, src); |
| |
| if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb)) |
| { |
| COPY_REG_SET (BB_LV_SET (bb), lv); |
| BB_LV_SET_VALID_P (bb) = true; |
| } |
| |
| return_regset_to_pool (lv); |
| return lv; |
| } |
| } |
| |
| /* We've skipped the wrong lv_set. Don't skip the right one. */ |
| ignore_first = false; |
| gcc_assert (in_current_region_p (bb)); |
| |
| /* Find a valid LV set in this block or below, if needed. |
| Start searching from the next insn: either ignore_first is true, or |
| INSN doesn't have a correct live set. */ |
| temp = NEXT_INSN (insn); |
| final = NEXT_INSN (BB_END (bb)); |
| while (temp != final && ! INSN_LIVE_VALID_P (temp)) |
| temp = NEXT_INSN (temp); |
| if (temp == final) |
| { |
| lv = compute_live_after_bb (bb); |
| temp = PREV_INSN (temp); |
| } |
| else |
| { |
| lv = get_regset_from_pool (); |
| COPY_REG_SET (lv, INSN_LIVE (temp)); |
| } |
| |
| /* Put correct lv sets on the insns which have bad sets. */ |
| final = PREV_INSN (insn); |
| while (temp != final) |
| { |
| propagate_lv_set (lv, temp); |
| COPY_REG_SET (INSN_LIVE (temp), lv); |
| INSN_LIVE_VALID_P (temp) = true; |
| temp = PREV_INSN (temp); |
| } |
| |
| /* Also put it in a BB. */ |
| if (sel_bb_head_p (insn)) |
| { |
| basic_block bb = BLOCK_FOR_INSN (insn); |
| |
| COPY_REG_SET (BB_LV_SET (bb), lv); |
| BB_LV_SET_VALID_P (bb) = true; |
| } |
| |
| /* We return LV to the pool, but will not clear it there. Thus we can |
| legimatelly use LV till the next use of regset_pool_get (). */ |
| return_regset_to_pool (lv); |
| return lv; |
| } |
| |
| /* Update liveness sets for INSN. */ |
| static inline void |
| update_liveness_on_insn (rtx_insn *insn) |
| { |
| ignore_first = true; |
| compute_live (insn); |
| } |
| |
| /* Compute liveness below INSN and write it into REGS. */ |
| static inline void |
| compute_live_below_insn (rtx_insn *insn, regset regs) |
| { |
| rtx_insn *succ; |
| succ_iterator si; |
| |
| FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) |
| IOR_REG_SET (regs, compute_live (succ)); |
| } |
| |
| /* Update the data gathered in av and lv sets starting from INSN. */ |
| static void |
| update_data_sets (rtx_insn *insn) |
| { |
| update_liveness_on_insn (insn); |
| if (sel_bb_head_p (insn)) |
| { |
| gcc_assert (AV_LEVEL (insn) != 0); |
| BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1; |
| compute_av_set (insn, NULL, 0, 0); |
| } |
| } |
| |
| |
| /* Helper for move_op () and find_used_regs (). |
| Return speculation type for which a check should be created on the place |
| of INSN. EXPR is one of the original ops we are searching for. */ |
| static ds_t |
| get_spec_check_type_for_insn (insn_t insn, expr_t expr) |
| { |
| ds_t to_check_ds; |
| ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn)); |
| |
| to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr); |
| |
| if (targetm.sched.get_insn_checked_ds) |
| already_checked_ds |= targetm.sched.get_insn_checked_ds (insn); |
| |
| if (spec_info != NULL |
| && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)) |
| already_checked_ds |= BEGIN_CONTROL; |
| |
| already_checked_ds = ds_get_speculation_types (already_checked_ds); |
| |
| to_check_ds &= ~already_checked_ds; |
| |
| return to_check_ds; |
| } |
| |
| /* Find the set of registers that are unavailable for storing expres |
| while moving ORIG_OPS up on the path starting from INSN due to |
| liveness (USED_REGS) or hardware restrictions (REG_RENAME_P). |
| |
| All the original operations found during the traversal are saved in the |
| ORIGINAL_INSNS list. |
| |
| REG_RENAME_P denotes the set of hardware registers that |
| can not be used with renaming due to the register class restrictions, |
| mode restrictions and other (the register we'll choose should be |
| compatible class with the original uses, shouldn't be in call_used_regs, |
| should be HARD_REGNO_RENAME_OK etc). |
| |
| Returns TRUE if we've found all original insns, FALSE otherwise. |
| |
| This function utilizes code_motion_path_driver (formerly find_used_regs_1) |
| to traverse the code motion paths. This helper function finds registers |
| that are not available for storing expres while moving ORIG_OPS up on the |
| path starting from INSN. A register considered as used on the moving path, |
| if one of the following conditions is not satisfied: |
| |
| (1) a register not set or read on any path from xi to an instance of |
| the original operation, |
| (2) not among the live registers of the point immediately following the |
| first original operation on a given downward path, except for the |
| original target register of the operation, |
| (3) not live on the other path of any conditional branch that is passed |
| by the operation, in case original operations are not present on |
| both paths of the conditional branch. |
| |
| All the original operations found during the traversal are saved in the |
| ORIGINAL_INSNS list. |
| |
| REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path |
| from INSN to original insn. In this case CALL_USED_REG_SET will be added |
| to unavailable hard regs at the point original operation is found. */ |
| |
| static bool |
| find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs, |
| struct reg_rename *reg_rename_p, def_list_t *original_insns) |
| { |
| def_list_iterator i; |
| def_t def; |
| int res; |
| bool needs_spec_check_p = false; |
| expr_t expr; |
| av_set_iterator expr_iter; |
| struct fur_static_params sparams; |
| struct cmpd_local_params lparams; |
| |
| /* We haven't visited any blocks yet. */ |
| bitmap_clear (code_motion_visited_blocks); |
| |
| /* Init parameters for code_motion_path_driver. */ |
| sparams.crosses_call = false; |
| sparams.original_insns = original_insns; |
| sparams.used_regs = used_regs; |
| |
| /* Set the appropriate hooks and data. */ |
| code_motion_path_driver_info = &fur_hooks; |
| |
| res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams); |
| |
| reg_rename_p->crosses_call |= sparams.crosses_call; |
| |
| gcc_assert (res == 1); |
| gcc_assert (original_insns && *original_insns); |
| |
| /* ??? We calculate whether an expression needs a check when computing |
| av sets. This information is not as precise as it could be due to |
| merging this bit in merge_expr. We can do better in find_used_regs, |
| but we want to avoid multiple traversals of the same code motion |
| paths. */ |
| FOR_EACH_EXPR (expr, expr_iter, orig_ops) |
| needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr); |
| |
| /* Mark hardware regs in REG_RENAME_P that are not suitable |
| for renaming expr in INSN due to hardware restrictions (register class, |
| modes compatibility etc). */ |
| FOR_EACH_DEF (def, i, *original_insns) |
| { |
| vinsn_t vinsn = INSN_VINSN (def->orig_insn); |
| |
| if (VINSN_SEPARABLE_P (vinsn)) |
| mark_unavailable_hard_regs (def, reg_rename_p, used_regs); |
| |
| /* Do not allow clobbering of ld.[sa] address in case some of the |
| original operations need a check. */ |
| if (needs_spec_check_p) |
| IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn)); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Functions to choose the best insn from available ones. */ |
| |
| /* Adjusts the priority for EXPR using the backend *_adjust_priority hook. */ |
| static int |
| sel_target_adjust_priority (expr_t expr) |
| { |
| int priority = EXPR_PRIORITY (expr); |
| int new_priority; |
| |
| if (targetm.sched.adjust_priority) |
| new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority); |
| else |
| new_priority = priority; |
| |
| /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly. */ |
| EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr); |
| |
| gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0); |
| |
| if (sched_verbose >= 4) |
| |