| /* Instruction scheduling pass. |
| Copyright (C) 1992-2019 Free Software Foundation, Inc. |
| Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
| and currently maintained by, Jim Wilson (wilson@cygnus.com) |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* Instruction scheduling pass. This file, along with sched-deps.c, |
| contains the generic parts. The actual entry point for |
| the normal instruction scheduling pass is found in sched-rgn.c. |
| |
| We compute insn priorities based on data dependencies. Flow |
| analysis only creates a fraction of the data-dependencies we must |
| observe: namely, only those dependencies which the combiner can be |
| expected to use. For this pass, we must therefore create the |
| remaining dependencies we need to observe: register dependencies, |
| memory dependencies, dependencies to keep function calls in order, |
| and the dependence between a conditional branch and the setting of |
| condition codes are all dealt with here. |
| |
| The scheduler first traverses the data flow graph, starting with |
| the last instruction, and proceeding to the first, assigning values |
| to insn_priority as it goes. This sorts the instructions |
| topologically by data dependence. |
| |
| Once priorities have been established, we order the insns using |
| list scheduling. This works as follows: starting with a list of |
| all the ready insns, and sorted according to priority number, we |
| schedule the insn from the end of the list by placing its |
| predecessors in the list according to their priority order. We |
| consider this insn scheduled by setting the pointer to the "end" of |
| the list to point to the previous insn. When an insn has no |
| predecessors, we either queue it until sufficient time has elapsed |
| or add it to the ready list. As the instructions are scheduled or |
| when stalls are introduced, the queue advances and dumps insns into |
| the ready list. When all insns down to the lowest priority have |
| been scheduled, the critical path of the basic block has been made |
| as short as possible. The remaining insns are then scheduled in |
| remaining slots. |
| |
| The following list shows the order in which we want to break ties |
| among insns in the ready list: |
| |
| 1. choose insn with the longest path to end of bb, ties |
| broken by |
| 2. choose insn with least contribution to register pressure, |
| ties broken by |
| 3. prefer in-block upon interblock motion, ties broken by |
| 4. prefer useful upon speculative motion, ties broken by |
| 5. choose insn with largest control flow probability, ties |
| broken by |
| 6. choose insn with the least dependences upon the previously |
| scheduled insn, or finally |
| 7 choose the insn which has the most insns dependent on it. |
| 8. choose insn with lowest UID. |
| |
| Memory references complicate matters. Only if we can be certain |
| that memory references are not part of the data dependency graph |
| (via true, anti, or output dependence), can we move operations past |
| memory references. To first approximation, reads can be done |
| independently, while writes introduce dependencies. Better |
| approximations will yield fewer dependencies. |
| |
| Before reload, an extended analysis of interblock data dependences |
| is required for interblock scheduling. This is performed in |
| compute_block_dependences (). |
| |
| Dependencies set up by memory references are treated in exactly the |
| same way as other dependencies, by using insn backward dependences |
| INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences |
| INSN_FORW_DEPS for the purpose of forward list scheduling. |
| |
| Having optimized the critical path, we may have also unduly |
| extended the lifetimes of some registers. If an operation requires |
| that constants be loaded into registers, it is certainly desirable |
| to load those constants as early as necessary, but no earlier. |
| I.e., it will not do to load up a bunch of registers at the |
| beginning of a basic block only to use them at the end, if they |
| could be loaded later, since this may result in excessive register |
| utilization. |
| |
| Note that since branches are never in basic blocks, but only end |
| basic blocks, this pass will not move branches. But that is ok, |
| since we can use GNU's delayed branch scheduling pass to take care |
| of this case. |
| |
| Also note that no further optimizations based on algebraic |
| identities are performed, so this pass would be a good one to |
| perform instruction splitting, such as breaking up a multiply |
| instruction into shifts and adds where that is profitable. |
| |
| Given the memory aliasing analysis that this pass should perform, |
| it should be possible to remove redundant stores to memory, and to |
| load values from registers instead of hitting memory. |
| |
| Before reload, speculative insns are moved only if a 'proof' exists |
| that no exception will be caused by this, and if no live registers |
| exist that inhibit the motion (live registers constraints are not |
| represented by data dependence edges). |
| |
| This pass must update information that subsequent passes expect to |
| be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths, |
| reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END. |
| |
| The information in the line number notes is carefully retained by |
| this pass. Notes that refer to the starting and ending of |
| exception regions are also carefully retained by this pass. All |
| other NOTE insns are grouped in their same relative order at the |
| beginning of basic blocks and regions that have been scheduled. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "cfghooks.h" |
| #include "df.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "insn-config.h" |
| #include "regs.h" |
| #include "ira.h" |
| #include "recog.h" |
| #include "insn-attr.h" |
| #include "cfgrtl.h" |
| #include "cfgbuild.h" |
| #include "sched-int.h" |
| #include "common/common-target.h" |
| #include "params.h" |
| #include "dbgcnt.h" |
| #include "cfgloop.h" |
| #include "dumpfile.h" |
| #include "print-rtl.h" |
| |
| #ifdef INSN_SCHEDULING |
| |
| /* True if we do register pressure relief through live-range |
| shrinkage. */ |
| static bool live_range_shrinkage_p; |
| |
| /* Switch on live range shrinkage. */ |
| void |
| initialize_live_range_shrinkage (void) |
| { |
| live_range_shrinkage_p = true; |
| } |
| |
| /* Switch off live range shrinkage. */ |
| void |
| finish_live_range_shrinkage (void) |
| { |
| live_range_shrinkage_p = false; |
| } |
| |
| /* issue_rate is the number of insns that can be scheduled in the same |
| machine cycle. It can be defined in the config/mach/mach.h file, |
| otherwise we set it to 1. */ |
| |
| int issue_rate; |
| |
| /* This can be set to true by a backend if the scheduler should not |
| enable a DCE pass. */ |
| bool sched_no_dce; |
| |
| /* The current initiation interval used when modulo scheduling. */ |
| static int modulo_ii; |
| |
| /* The maximum number of stages we are prepared to handle. */ |
| static int modulo_max_stages; |
| |
| /* The number of insns that exist in each iteration of the loop. We use this |
| to detect when we've scheduled all insns from the first iteration. */ |
| static int modulo_n_insns; |
| |
| /* The current count of insns in the first iteration of the loop that have |
| already been scheduled. */ |
| static int modulo_insns_scheduled; |
| |
| /* The maximum uid of insns from the first iteration of the loop. */ |
| static int modulo_iter0_max_uid; |
| |
| /* The number of times we should attempt to backtrack when modulo scheduling. |
| Decreased each time we have to backtrack. */ |
| static int modulo_backtracks_left; |
| |
| /* The stage in which the last insn from the original loop was |
| scheduled. */ |
| static int modulo_last_stage; |
| |
| /* sched-verbose controls the amount of debugging output the |
| scheduler prints. It is controlled by -fsched-verbose=N: |
| N=0: no debugging output. |
| N=1: default value. |
| N=2: bb's probabilities, detailed ready list info, unit/insn info. |
| N=3: rtl at abort point, control-flow, regions info. |
| N=5: dependences info. */ |
| int sched_verbose = 0; |
| |
| /* Debugging file. All printouts are sent to dump. */ |
| FILE *sched_dump = 0; |
| |
| /* This is a placeholder for the scheduler parameters common |
| to all schedulers. */ |
| struct common_sched_info_def *common_sched_info; |
| |
| #define INSN_TICK(INSN) (HID (INSN)->tick) |
| #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick) |
| #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate) |
| #define INTER_TICK(INSN) (HID (INSN)->inter_tick) |
| #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn) |
| #define SHADOW_P(INSN) (HID (INSN)->shadow_p) |
| #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec) |
| /* Cached cost of the instruction. Use insn_sched_cost to get cost of the |
| insn. -1 here means that the field is not initialized. */ |
| #define INSN_COST(INSN) (HID (INSN)->cost) |
| |
| /* If INSN_TICK of an instruction is equal to INVALID_TICK, |
| then it should be recalculated from scratch. */ |
| #define INVALID_TICK (-(max_insn_queue_index + 1)) |
| /* The minimal value of the INSN_TICK of an instruction. */ |
| #define MIN_TICK (-max_insn_queue_index) |
| |
| /* Original order of insns in the ready list. |
| Used to keep order of normal insns while separating DEBUG_INSNs. */ |
| #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order) |
| |
| /* The deciding reason for INSN's place in the ready list. */ |
| #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win) |
| |
| /* List of important notes we must keep around. This is a pointer to the |
| last element in the list. */ |
| rtx_insn *note_list; |
| |
| static struct spec_info_def spec_info_var; |
| /* Description of the speculative part of the scheduling. |
| If NULL - no speculation. */ |
| spec_info_t spec_info = NULL; |
| |
| /* True, if recovery block was added during scheduling of current block. |
| Used to determine, if we need to fix INSN_TICKs. */ |
| static bool haifa_recovery_bb_recently_added_p; |
| |
| /* True, if recovery block was added during this scheduling pass. |
| Used to determine if we should have empty memory pools of dependencies |
| after finishing current region. */ |
| bool haifa_recovery_bb_ever_added_p; |
| |
| /* Counters of different types of speculative instructions. */ |
| static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control; |
| |
| /* Array used in {unlink, restore}_bb_notes. */ |
| static rtx_insn **bb_header = 0; |
| |
| /* Basic block after which recovery blocks will be created. */ |
| static basic_block before_recovery; |
| |
| /* Basic block just before the EXIT_BLOCK and after recovery, if we have |
| created it. */ |
| basic_block after_recovery; |
| |
| /* FALSE if we add bb to another region, so we don't need to initialize it. */ |
| bool adding_bb_to_current_region_p = true; |
| |
| /* Queues, etc. */ |
| |
| /* An instruction is ready to be scheduled when all insns preceding it |
| have already been scheduled. It is important to ensure that all |
| insns which use its result will not be executed until its result |
| has been computed. An insn is maintained in one of four structures: |
| |
| (P) the "Pending" set of insns which cannot be scheduled until |
| their dependencies have been satisfied. |
| (Q) the "Queued" set of insns that can be scheduled when sufficient |
| time has passed. |
| (R) the "Ready" list of unscheduled, uncommitted insns. |
| (S) the "Scheduled" list of insns. |
| |
| Initially, all insns are either "Pending" or "Ready" depending on |
| whether their dependencies are satisfied. |
| |
| Insns move from the "Ready" list to the "Scheduled" list as they |
| are committed to the schedule. As this occurs, the insns in the |
| "Pending" list have their dependencies satisfied and move to either |
| the "Ready" list or the "Queued" set depending on whether |
| sufficient time has passed to make them ready. As time passes, |
| insns move from the "Queued" set to the "Ready" list. |
| |
| The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the |
| unscheduled insns, i.e., those that are ready, queued, and pending. |
| The "Queued" set (Q) is implemented by the variable `insn_queue'. |
| The "Ready" list (R) is implemented by the variables `ready' and |
| `n_ready'. |
| The "Scheduled" list (S) is the new insn chain built by this pass. |
| |
| The transition (R->S) is implemented in the scheduling loop in |
| `schedule_block' when the best insn to schedule is chosen. |
| The transitions (P->R and P->Q) are implemented in `schedule_insn' as |
| insns move from the ready list to the scheduled list. |
| The transition (Q->R) is implemented in 'queue_to_insn' as time |
| passes or stalls are introduced. */ |
| |
| /* Implement a circular buffer to delay instructions until sufficient |
| time has passed. For the new pipeline description interface, |
| MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less |
| than maximal time of instruction execution computed by genattr.c on |
| the base maximal time of functional unit reservations and getting a |
| result. This is the longest time an insn may be queued. */ |
| |
| static rtx_insn_list **insn_queue; |
| static int q_ptr = 0; |
| static int q_size = 0; |
| #define NEXT_Q(X) (((X)+1) & max_insn_queue_index) |
| #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index) |
| |
| #define QUEUE_SCHEDULED (-3) |
| #define QUEUE_NOWHERE (-2) |
| #define QUEUE_READY (-1) |
| /* QUEUE_SCHEDULED - INSN is scheduled. |
| QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in |
| queue or ready list. |
| QUEUE_READY - INSN is in ready list. |
| N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */ |
| |
| #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index) |
| |
| /* The following variable value refers for all current and future |
| reservations of the processor units. */ |
| state_t curr_state; |
| |
| /* The following variable value is size of memory representing all |
| current and future reservations of the processor units. */ |
| size_t dfa_state_size; |
| |
| /* The following array is used to find the best insn from ready when |
| the automaton pipeline interface is used. */ |
| signed char *ready_try = NULL; |
| |
| /* The ready list. */ |
| struct ready_list ready = {NULL, 0, 0, 0, 0}; |
| |
| /* The pointer to the ready list (to be removed). */ |
| static struct ready_list *readyp = &ready; |
| |
| /* Scheduling clock. */ |
| static int clock_var; |
| |
| /* Clock at which the previous instruction was issued. */ |
| static int last_clock_var; |
| |
| /* Set to true if, when queuing a shadow insn, we discover that it would be |
| scheduled too late. */ |
| static bool must_backtrack; |
| |
| /* The following variable value is number of essential insns issued on |
| the current cycle. An insn is essential one if it changes the |
| processors state. */ |
| int cycle_issued_insns; |
| |
| /* This records the actual schedule. It is built up during the main phase |
| of schedule_block, and afterwards used to reorder the insns in the RTL. */ |
| static vec<rtx_insn *> scheduled_insns; |
| |
| static int may_trap_exp (const_rtx, int); |
| |
| /* Nonzero iff the address is comprised from at most 1 register. */ |
| #define CONST_BASED_ADDRESS_P(x) \ |
| (REG_P (x) \ |
| || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \ |
| || (GET_CODE (x) == LO_SUM)) \ |
| && (CONSTANT_P (XEXP (x, 0)) \ |
| || CONSTANT_P (XEXP (x, 1))))) |
| |
| /* Returns a class that insn with GET_DEST(insn)=x may belong to, |
| as found by analyzing insn's expression. */ |
| |
| |
| static int haifa_luid_for_non_insn (rtx x); |
| |
| /* Haifa version of sched_info hooks common to all headers. */ |
| const struct common_sched_info_def haifa_common_sched_info = |
| { |
| NULL, /* fix_recovery_cfg */ |
| NULL, /* add_block */ |
| NULL, /* estimate_number_of_insns */ |
| haifa_luid_for_non_insn, /* luid_for_non_insn */ |
| SCHED_PASS_UNKNOWN /* sched_pass_id */ |
| }; |
| |
| /* Mapping from instruction UID to its Logical UID. */ |
| vec<int> sched_luids; |
| |
| /* Next LUID to assign to an instruction. */ |
| int sched_max_luid = 1; |
| |
| /* Haifa Instruction Data. */ |
| vec<haifa_insn_data_def> h_i_d; |
| |
| void (* sched_init_only_bb) (basic_block, basic_block); |
| |
| /* Split block function. Different schedulers might use different functions |
| to handle their internal data consistent. */ |
| basic_block (* sched_split_block) (basic_block, rtx); |
| |
| /* Create empty basic block after the specified block. */ |
| basic_block (* sched_create_empty_bb) (basic_block); |
| |
| /* Return the number of cycles until INSN is expected to be ready. |
| Return zero if it already is. */ |
| static int |
| insn_delay (rtx_insn *insn) |
| { |
| return MAX (INSN_TICK (insn) - clock_var, 0); |
| } |
| |
| static int |
| may_trap_exp (const_rtx x, int is_store) |
| { |
| enum rtx_code code; |
| |
| if (x == 0) |
| return TRAP_FREE; |
| code = GET_CODE (x); |
| if (is_store) |
| { |
| if (code == MEM && may_trap_p (x)) |
| return TRAP_RISKY; |
| else |
| return TRAP_FREE; |
| } |
| if (code == MEM) |
| { |
| /* The insn uses memory: a volatile load. */ |
| if (MEM_VOLATILE_P (x)) |
| return IRISKY; |
| /* An exception-free load. */ |
| if (!may_trap_p (x)) |
| return IFREE; |
| /* A load with 1 base register, to be further checked. */ |
| if (CONST_BASED_ADDRESS_P (XEXP (x, 0))) |
| return PFREE_CANDIDATE; |
| /* No info on the load, to be further checked. */ |
| return PRISKY_CANDIDATE; |
| } |
| else |
| { |
| const char *fmt; |
| int i, insn_class = TRAP_FREE; |
| |
| /* Neither store nor load, check if it may cause a trap. */ |
| if (may_trap_p (x)) |
| return TRAP_RISKY; |
| /* Recursive step: walk the insn... */ |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| int tmp_class = may_trap_exp (XEXP (x, i), is_store); |
| insn_class = WORST_CLASS (insn_class, tmp_class); |
| } |
| else if (fmt[i] == 'E') |
| { |
| int j; |
| for (j = 0; j < XVECLEN (x, i); j++) |
| { |
| int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store); |
| insn_class = WORST_CLASS (insn_class, tmp_class); |
| if (insn_class == TRAP_RISKY || insn_class == IRISKY) |
| break; |
| } |
| } |
| if (insn_class == TRAP_RISKY || insn_class == IRISKY) |
| break; |
| } |
| return insn_class; |
| } |
| } |
| |
| /* Classifies rtx X of an insn for the purpose of verifying that X can be |
| executed speculatively (and consequently the insn can be moved |
| speculatively), by examining X, returning: |
| TRAP_RISKY: store, or risky non-load insn (e.g. division by variable). |
| TRAP_FREE: non-load insn. |
| IFREE: load from a globally safe location. |
| IRISKY: volatile load. |
| PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for |
| being either PFREE or PRISKY. */ |
| |
| static int |
| haifa_classify_rtx (const_rtx x) |
| { |
| int tmp_class = TRAP_FREE; |
| int insn_class = TRAP_FREE; |
| enum rtx_code code; |
| |
| if (GET_CODE (x) == PARALLEL) |
| { |
| int i, len = XVECLEN (x, 0); |
| |
| for (i = len - 1; i >= 0; i--) |
| { |
| tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i)); |
| insn_class = WORST_CLASS (insn_class, tmp_class); |
| if (insn_class == TRAP_RISKY || insn_class == IRISKY) |
| break; |
| } |
| } |
| else |
| { |
| code = GET_CODE (x); |
| switch (code) |
| { |
| case CLOBBER: |
| /* Test if it is a 'store'. */ |
| tmp_class = may_trap_exp (XEXP (x, 0), 1); |
| break; |
| case CLOBBER_HIGH: |
| gcc_assert (REG_P (XEXP (x, 0))); |
| break; |
| case SET: |
| /* Test if it is a store. */ |
| tmp_class = may_trap_exp (SET_DEST (x), 1); |
| if (tmp_class == TRAP_RISKY) |
| break; |
| /* Test if it is a load. */ |
| tmp_class = |
| WORST_CLASS (tmp_class, |
| may_trap_exp (SET_SRC (x), 0)); |
| break; |
| case COND_EXEC: |
| tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x)); |
| if (tmp_class == TRAP_RISKY) |
| break; |
| tmp_class = WORST_CLASS (tmp_class, |
| may_trap_exp (COND_EXEC_TEST (x), 0)); |
| break; |
| case TRAP_IF: |
| tmp_class = TRAP_RISKY; |
| break; |
| default:; |
| } |
| insn_class = tmp_class; |
| } |
| |
| return insn_class; |
| } |
| |
| int |
| haifa_classify_insn (const_rtx insn) |
| { |
| return haifa_classify_rtx (PATTERN (insn)); |
| } |
| |
| /* After the scheduler initialization function has been called, this function |
| can be called to enable modulo scheduling. II is the initiation interval |
| we should use, it affects the delays for delay_pairs that were recorded as |
| separated by a given number of stages. |
| |
| MAX_STAGES provides us with a limit |
| after which we give up scheduling; the caller must have unrolled at least |
| as many copies of the loop body and recorded delay_pairs for them. |
| |
| INSNS is the number of real (non-debug) insns in one iteration of |
| the loop. MAX_UID can be used to test whether an insn belongs to |
| the first iteration of the loop; all of them have a uid lower than |
| MAX_UID. */ |
| void |
| set_modulo_params (int ii, int max_stages, int insns, int max_uid) |
| { |
| modulo_ii = ii; |
| modulo_max_stages = max_stages; |
| modulo_n_insns = insns; |
| modulo_iter0_max_uid = max_uid; |
| modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS); |
| } |
| |
| /* A structure to record a pair of insns where the first one is a real |
| insn that has delay slots, and the second is its delayed shadow. |
| I1 is scheduled normally and will emit an assembly instruction, |
| while I2 describes the side effect that takes place at the |
| transition between cycles CYCLES and (CYCLES + 1) after I1. */ |
| struct delay_pair |
| { |
| struct delay_pair *next_same_i1; |
| rtx_insn *i1, *i2; |
| int cycles; |
| /* When doing modulo scheduling, we a delay_pair can also be used to |
| show that I1 and I2 are the same insn in a different stage. If that |
| is the case, STAGES will be nonzero. */ |
| int stages; |
| }; |
| |
| /* Helpers for delay hashing. */ |
| |
| struct delay_i1_hasher : nofree_ptr_hash <delay_pair> |
| { |
| typedef void *compare_type; |
| static inline hashval_t hash (const delay_pair *); |
| static inline bool equal (const delay_pair *, const void *); |
| }; |
| |
| /* Returns a hash value for X, based on hashing just I1. */ |
| |
| inline hashval_t |
| delay_i1_hasher::hash (const delay_pair *x) |
| { |
| return htab_hash_pointer (x->i1); |
| } |
| |
| /* Return true if I1 of pair X is the same as that of pair Y. */ |
| |
| inline bool |
| delay_i1_hasher::equal (const delay_pair *x, const void *y) |
| { |
| return x->i1 == y; |
| } |
| |
| struct delay_i2_hasher : free_ptr_hash <delay_pair> |
| { |
| typedef void *compare_type; |
| static inline hashval_t hash (const delay_pair *); |
| static inline bool equal (const delay_pair *, const void *); |
| }; |
| |
| /* Returns a hash value for X, based on hashing just I2. */ |
| |
| inline hashval_t |
| delay_i2_hasher::hash (const delay_pair *x) |
| { |
| return htab_hash_pointer (x->i2); |
| } |
| |
| /* Return true if I2 of pair X is the same as that of pair Y. */ |
| |
| inline bool |
| delay_i2_hasher::equal (const delay_pair *x, const void *y) |
| { |
| return x->i2 == y; |
| } |
| |
| /* Two hash tables to record delay_pairs, one indexed by I1 and the other |
| indexed by I2. */ |
| static hash_table<delay_i1_hasher> *delay_htab; |
| static hash_table<delay_i2_hasher> *delay_htab_i2; |
| |
| /* Called through htab_traverse. Walk the hashtable using I2 as |
| index, and delete all elements involving an UID higher than |
| that pointed to by *DATA. */ |
| int |
| haifa_htab_i2_traverse (delay_pair **slot, int *data) |
| { |
| int maxuid = *data; |
| struct delay_pair *p = *slot; |
| if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid) |
| { |
| delay_htab_i2->clear_slot (slot); |
| } |
| return 1; |
| } |
| |
| /* Called through htab_traverse. Walk the hashtable using I2 as |
| index, and delete all elements involving an UID higher than |
| that pointed to by *DATA. */ |
| int |
| haifa_htab_i1_traverse (delay_pair **pslot, int *data) |
| { |
| int maxuid = *data; |
| struct delay_pair *p, *first, **pprev; |
| |
| if (INSN_UID ((*pslot)->i1) >= maxuid) |
| { |
| delay_htab->clear_slot (pslot); |
| return 1; |
| } |
| pprev = &first; |
| for (p = *pslot; p; p = p->next_same_i1) |
| { |
| if (INSN_UID (p->i2) < maxuid) |
| { |
| *pprev = p; |
| pprev = &p->next_same_i1; |
| } |
| } |
| *pprev = NULL; |
| if (first == NULL) |
| delay_htab->clear_slot (pslot); |
| else |
| *pslot = first; |
| return 1; |
| } |
| |
| /* Discard all delay pairs which involve an insn with an UID higher |
| than MAX_UID. */ |
| void |
| discard_delay_pairs_above (int max_uid) |
| { |
| delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid); |
| delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid); |
| } |
| |
| /* This function can be called by a port just before it starts the final |
| scheduling pass. It records the fact that an instruction with delay |
| slots has been split into two insns, I1 and I2. The first one will be |
| scheduled normally and initiates the operation. The second one is a |
| shadow which must follow a specific number of cycles after I1; its only |
| purpose is to show the side effect that occurs at that cycle in the RTL. |
| If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN, |
| while I2 retains the original insn type. |
| |
| There are two ways in which the number of cycles can be specified, |
| involving the CYCLES and STAGES arguments to this function. If STAGES |
| is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor |
| which is multiplied by MODULO_II to give the number of cycles. This is |
| only useful if the caller also calls set_modulo_params to enable modulo |
| scheduling. */ |
| |
| void |
| record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages) |
| { |
| struct delay_pair *p = XNEW (struct delay_pair); |
| struct delay_pair **slot; |
| |
| p->i1 = i1; |
| p->i2 = i2; |
| p->cycles = cycles; |
| p->stages = stages; |
| |
| if (!delay_htab) |
| { |
| delay_htab = new hash_table<delay_i1_hasher> (10); |
| delay_htab_i2 = new hash_table<delay_i2_hasher> (10); |
| } |
| slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT); |
| p->next_same_i1 = *slot; |
| *slot = p; |
| slot = delay_htab_i2->find_slot (p, INSERT); |
| *slot = p; |
| } |
| |
| /* Examine the delay pair hashtable to see if INSN is a shadow for another, |
| and return the other insn if so. Return NULL otherwise. */ |
| rtx_insn * |
| real_insn_for_shadow (rtx_insn *insn) |
| { |
| struct delay_pair *pair; |
| |
| if (!delay_htab) |
| return NULL; |
| |
| pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn)); |
| if (!pair || pair->stages > 0) |
| return NULL; |
| return pair->i1; |
| } |
| |
| /* For a pair P of insns, return the fixed distance in cycles from the first |
| insn after which the second must be scheduled. */ |
| static int |
| pair_delay (struct delay_pair *p) |
| { |
| if (p->stages == 0) |
| return p->cycles; |
| else |
| return p->stages * modulo_ii; |
| } |
| |
| /* Given an insn INSN, add a dependence on its delayed shadow if it |
| has one. Also try to find situations where shadows depend on each other |
| and add dependencies to the real insns to limit the amount of backtracking |
| needed. */ |
| void |
| add_delay_dependencies (rtx_insn *insn) |
| { |
| struct delay_pair *pair; |
| sd_iterator_def sd_it; |
| dep_t dep; |
| |
| if (!delay_htab) |
| return; |
| |
| pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn)); |
| if (!pair) |
| return; |
| add_dependence (insn, pair->i1, REG_DEP_ANTI); |
| if (pair->stages) |
| return; |
| |
| FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep) |
| { |
| rtx_insn *pro = DEP_PRO (dep); |
| struct delay_pair *other_pair |
| = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro)); |
| if (!other_pair || other_pair->stages) |
| continue; |
| if (pair_delay (other_pair) >= pair_delay (pair)) |
| { |
| if (sched_verbose >= 4) |
| { |
| fprintf (sched_dump, ";;\tadding dependence %d <- %d\n", |
| INSN_UID (other_pair->i1), |
| INSN_UID (pair->i1)); |
| fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n", |
| INSN_UID (pair->i1), |
| INSN_UID (pair->i2), |
| pair_delay (pair)); |
| fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n", |
| INSN_UID (other_pair->i1), |
| INSN_UID (other_pair->i2), |
| pair_delay (other_pair)); |
| } |
| add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI); |
| } |
| } |
| } |
| |
| /* Forward declarations. */ |
| |
| static int priority (rtx_insn *, bool force_recompute = false); |
| static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *); |
| static int rank_for_schedule (const void *, const void *); |
| static void swap_sort (rtx_insn **, int); |
| static void queue_insn (rtx_insn *, int, const char *); |
| static int schedule_insn (rtx_insn *); |
| static void adjust_priority (rtx_insn *); |
| static void advance_one_cycle (void); |
| static void extend_h_i_d (void); |
| |
| |
| /* Notes handling mechanism: |
| ========================= |
| Generally, NOTES are saved before scheduling and restored after scheduling. |
| The scheduler distinguishes between two types of notes: |
| |
| (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes: |
| Before scheduling a region, a pointer to the note is added to the insn |
| that follows or precedes it. (This happens as part of the data dependence |
| computation). After scheduling an insn, the pointer contained in it is |
| used for regenerating the corresponding note (in reemit_notes). |
| |
| (2) All other notes (e.g. INSN_DELETED): Before scheduling a block, |
| these notes are put in a list (in rm_other_notes() and |
| unlink_other_notes ()). After scheduling the block, these notes are |
| inserted at the beginning of the block (in schedule_block()). */ |
| |
| static void ready_add (struct ready_list *, rtx_insn *, bool); |
| static rtx_insn *ready_remove_first (struct ready_list *); |
| static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready); |
| |
| static void queue_to_ready (struct ready_list *); |
| static int early_queue_to_ready (state_t, struct ready_list *); |
| |
| /* The following functions are used to implement multi-pass scheduling |
| on the first cycle. */ |
| static rtx_insn *ready_remove (struct ready_list *, int); |
| static void ready_remove_insn (rtx_insn *); |
| |
| static void fix_inter_tick (rtx_insn *, rtx_insn *); |
| static int fix_tick_ready (rtx_insn *); |
| static void change_queue_index (rtx_insn *, int); |
| |
| /* The following functions are used to implement scheduling of data/control |
| speculative instructions. */ |
| |
| static void extend_h_i_d (void); |
| static void init_h_i_d (rtx_insn *); |
| static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *); |
| static void generate_recovery_code (rtx_insn *); |
| static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t); |
| static void begin_speculative_block (rtx_insn *); |
| static void add_to_speculative_block (rtx_insn *); |
| static void init_before_recovery (basic_block *); |
| static void create_check_block_twin (rtx_insn *, bool); |
| static void fix_recovery_deps (basic_block); |
| static bool haifa_change_pattern (rtx_insn *, rtx); |
| static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *); |
| static void restore_bb_notes (basic_block); |
| static void fix_jump_move (rtx_insn *); |
| static void move_block_after_check (rtx_insn *); |
| static void move_succs (vec<edge, va_gc> **, basic_block); |
| static void sched_remove_insn (rtx_insn *); |
| static void clear_priorities (rtx_insn *, rtx_vec_t *); |
| static void calc_priorities (rtx_vec_t); |
| static void add_jump_dependencies (rtx_insn *, rtx_insn *); |
| |
| #endif /* INSN_SCHEDULING */ |
| |
| /* Point to state used for the current scheduling pass. */ |
| struct haifa_sched_info *current_sched_info; |
| |
| #ifndef INSN_SCHEDULING |
| void |
| schedule_insns (void) |
| { |
| } |
| #else |
| |
| /* Do register pressure sensitive insn scheduling if the flag is set |
| up. */ |
| enum sched_pressure_algorithm sched_pressure; |
| |
| /* Map regno -> its pressure class. The map defined only when |
| SCHED_PRESSURE != SCHED_PRESSURE_NONE. */ |
| enum reg_class *sched_regno_pressure_class; |
| |
| /* The current register pressure. Only elements corresponding pressure |
| classes are defined. */ |
| static int curr_reg_pressure[N_REG_CLASSES]; |
| |
| /* Saved value of the previous array. */ |
| static int saved_reg_pressure[N_REG_CLASSES]; |
| |
| /* Register living at given scheduling point. */ |
| static bitmap curr_reg_live; |
| |
| /* Saved value of the previous array. */ |
| static bitmap saved_reg_live; |
| |
| /* Registers mentioned in the current region. */ |
| static bitmap region_ref_regs; |
| |
| /* Temporary bitmap used for SCHED_PRESSURE_MODEL. */ |
| static bitmap tmp_bitmap; |
| |
| /* Effective number of available registers of a given class (see comment |
| in sched_pressure_start_bb). */ |
| static int sched_class_regs_num[N_REG_CLASSES]; |
| /* Number of call_saved_regs and fixed_regs. Helpers for calculating of |
| sched_class_regs_num. */ |
| static int call_saved_regs_num[N_REG_CLASSES]; |
| static int fixed_regs_num[N_REG_CLASSES]; |
| |
| /* Initiate register pressure relative info for scheduling the current |
| region. Currently it is only clearing register mentioned in the |
| current region. */ |
| void |
| sched_init_region_reg_pressure_info (void) |
| { |
| bitmap_clear (region_ref_regs); |
| } |
| |
| /* PRESSURE[CL] describes the pressure on register class CL. Update it |
| for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO. |
| LIVE tracks the set of live registers; if it is null, assume that |
| every birth or death is genuine. */ |
| static inline void |
| mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p) |
| { |
| enum reg_class pressure_class; |
| |
| pressure_class = sched_regno_pressure_class[regno]; |
| if (regno >= FIRST_PSEUDO_REGISTER) |
| { |
| if (pressure_class != NO_REGS) |
| { |
| if (birth_p) |
| { |
| if (!live || bitmap_set_bit (live, regno)) |
| pressure[pressure_class] |
| += (ira_reg_class_max_nregs |
| [pressure_class][PSEUDO_REGNO_MODE (regno)]); |
| } |
| else |
| { |
| if (!live || bitmap_clear_bit (live, regno)) |
| pressure[pressure_class] |
| -= (ira_reg_class_max_nregs |
| [pressure_class][PSEUDO_REGNO_MODE (regno)]); |
| } |
| } |
| } |
| else if (pressure_class != NO_REGS |
| && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) |
| { |
| if (birth_p) |
| { |
| if (!live || bitmap_set_bit (live, regno)) |
| pressure[pressure_class]++; |
| } |
| else |
| { |
| if (!live || bitmap_clear_bit (live, regno)) |
| pressure[pressure_class]--; |
| } |
| } |
| } |
| |
| /* Initiate current register pressure related info from living |
| registers given by LIVE. */ |
| static void |
| initiate_reg_pressure_info (bitmap live) |
| { |
| int i; |
| unsigned int j; |
| bitmap_iterator bi; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| curr_reg_pressure[ira_pressure_classes[i]] = 0; |
| bitmap_clear (curr_reg_live); |
| EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi) |
| if (sched_pressure == SCHED_PRESSURE_MODEL |
| || current_nr_blocks == 1 |
| || bitmap_bit_p (region_ref_regs, j)) |
| mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true); |
| } |
| |
| /* Mark registers in X as mentioned in the current region. */ |
| static void |
| setup_ref_regs (rtx x) |
| { |
| int i, j; |
| const RTX_CODE code = GET_CODE (x); |
| const char *fmt; |
| |
| if (REG_P (x)) |
| { |
| bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x)); |
| return; |
| } |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| if (fmt[i] == 'e') |
| setup_ref_regs (XEXP (x, i)); |
| else if (fmt[i] == 'E') |
| { |
| for (j = 0; j < XVECLEN (x, i); j++) |
| setup_ref_regs (XVECEXP (x, i, j)); |
| } |
| } |
| |
| /* Initiate current register pressure related info at the start of |
| basic block BB. */ |
| static void |
| initiate_bb_reg_pressure_info (basic_block bb) |
| { |
| unsigned int i ATTRIBUTE_UNUSED; |
| rtx_insn *insn; |
| |
| if (current_nr_blocks > 1) |
| FOR_BB_INSNS (bb, insn) |
| if (NONDEBUG_INSN_P (insn)) |
| setup_ref_regs (PATTERN (insn)); |
| initiate_reg_pressure_info (df_get_live_in (bb)); |
| if (bb_has_eh_pred (bb)) |
| for (i = 0; ; ++i) |
| { |
| unsigned int regno = EH_RETURN_DATA_REGNO (i); |
| |
| if (regno == INVALID_REGNUM) |
| break; |
| if (! bitmap_bit_p (df_get_live_in (bb), regno)) |
| mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, |
| regno, true); |
| } |
| } |
| |
| /* Save current register pressure related info. */ |
| static void |
| save_reg_pressure (void) |
| { |
| int i; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| saved_reg_pressure[ira_pressure_classes[i]] |
| = curr_reg_pressure[ira_pressure_classes[i]]; |
| bitmap_copy (saved_reg_live, curr_reg_live); |
| } |
| |
| /* Restore saved register pressure related info. */ |
| static void |
| restore_reg_pressure (void) |
| { |
| int i; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| curr_reg_pressure[ira_pressure_classes[i]] |
| = saved_reg_pressure[ira_pressure_classes[i]]; |
| bitmap_copy (curr_reg_live, saved_reg_live); |
| } |
| |
| /* Return TRUE if the register is dying after its USE. */ |
| static bool |
| dying_use_p (struct reg_use_data *use) |
| { |
| struct reg_use_data *next; |
| |
| for (next = use->next_regno_use; next != use; next = next->next_regno_use) |
| if (NONDEBUG_INSN_P (next->insn) |
| && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED) |
| return false; |
| return true; |
| } |
| |
| /* Print info about the current register pressure and its excess for |
| each pressure class. */ |
| static void |
| print_curr_reg_pressure (void) |
| { |
| int i; |
| enum reg_class cl; |
| |
| fprintf (sched_dump, ";;\t"); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| cl = ira_pressure_classes[i]; |
| gcc_assert (curr_reg_pressure[cl] >= 0); |
| fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl], |
| curr_reg_pressure[cl], |
| curr_reg_pressure[cl] - sched_class_regs_num[cl]); |
| } |
| fprintf (sched_dump, "\n"); |
| } |
| |
| /* Determine if INSN has a condition that is clobbered if a register |
| in SET_REGS is modified. */ |
| static bool |
| cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs) |
| { |
| rtx pat = PATTERN (insn); |
| gcc_assert (GET_CODE (pat) == COND_EXEC); |
| if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0)))) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| haifa_change_pattern (insn, ORIG_PAT (insn)); |
| FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) |
| DEP_STATUS (dep) &= ~DEP_CANCELLED; |
| TODO_SPEC (insn) = HARD_DEP; |
| if (sched_verbose >= 2) |
| fprintf (sched_dump, |
| ";;\t\tdequeue insn %s because of clobbered condition\n", |
| (*current_sched_info->print_insn) (insn, 0)); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* This function should be called after modifying the pattern of INSN, |
| to update scheduler data structures as needed. */ |
| static void |
| update_insn_after_change (rtx_insn *insn) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| |
| dfa_clear_single_insn_cache (insn); |
| |
| sd_it = sd_iterator_start (insn, |
| SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK); |
| while (sd_iterator_cond (&sd_it, &dep)) |
| { |
| DEP_COST (dep) = UNKNOWN_DEP_COST; |
| sd_iterator_next (&sd_it); |
| } |
| |
| /* Invalidate INSN_COST, so it'll be recalculated. */ |
| INSN_COST (insn) = -1; |
| /* Invalidate INSN_TICK, so it'll be recalculated. */ |
| INSN_TICK (insn) = INVALID_TICK; |
| |
| /* Invalidate autoprefetch data entry. */ |
| INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status |
| = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED; |
| INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status |
| = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED; |
| } |
| |
| |
| /* Two VECs, one to hold dependencies for which pattern replacements |
| need to be applied or restored at the start of the next cycle, and |
| another to hold an integer that is either one, to apply the |
| corresponding replacement, or zero to restore it. */ |
| static vec<dep_t> next_cycle_replace_deps; |
| static vec<int> next_cycle_apply; |
| |
| static void apply_replacement (dep_t, bool); |
| static void restore_pattern (dep_t, bool); |
| |
| /* Look at the remaining dependencies for insn NEXT, and compute and return |
| the TODO_SPEC value we should use for it. This is called after one of |
| NEXT's dependencies has been resolved. |
| We also perform pattern replacements for predication, and for broken |
| replacement dependencies. The latter is only done if FOR_BACKTRACK is |
| false. */ |
| |
| static ds_t |
| recompute_todo_spec (rtx_insn *next, bool for_backtrack) |
| { |
| ds_t new_ds; |
| sd_iterator_def sd_it; |
| dep_t dep, modify_dep = NULL; |
| int n_spec = 0; |
| int n_control = 0; |
| int n_replace = 0; |
| bool first_p = true; |
| |
| if (sd_lists_empty_p (next, SD_LIST_BACK)) |
| /* NEXT has all its dependencies resolved. */ |
| return 0; |
| |
| if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK)) |
| return HARD_DEP; |
| |
| /* If NEXT is intended to sit adjacent to this instruction, we don't |
| want to try to break any dependencies. Treat it as a HARD_DEP. */ |
| if (SCHED_GROUP_P (next)) |
| return HARD_DEP; |
| |
| /* Now we've got NEXT with speculative deps only. |
| 1. Look at the deps to see what we have to do. |
| 2. Check if we can do 'todo'. */ |
| new_ds = 0; |
| |
| FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep) |
| { |
| rtx_insn *pro = DEP_PRO (dep); |
| ds_t ds = DEP_STATUS (dep) & SPECULATIVE; |
| |
| if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next)) |
| continue; |
| |
| if (ds) |
| { |
| n_spec++; |
| if (first_p) |
| { |
| first_p = false; |
| |
| new_ds = ds; |
| } |
| else |
| new_ds = ds_merge (new_ds, ds); |
| } |
| else if (DEP_TYPE (dep) == REG_DEP_CONTROL) |
| { |
| if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED) |
| { |
| n_control++; |
| modify_dep = dep; |
| } |
| DEP_STATUS (dep) &= ~DEP_CANCELLED; |
| } |
| else if (DEP_REPLACE (dep) != NULL) |
| { |
| if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED) |
| { |
| n_replace++; |
| modify_dep = dep; |
| } |
| DEP_STATUS (dep) &= ~DEP_CANCELLED; |
| } |
| } |
| |
| if (n_replace > 0 && n_control == 0 && n_spec == 0) |
| { |
| if (!dbg_cnt (sched_breakdep)) |
| return HARD_DEP; |
| FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep) |
| { |
| struct dep_replacement *desc = DEP_REPLACE (dep); |
| if (desc != NULL) |
| { |
| if (desc->insn == next && !for_backtrack) |
| { |
| gcc_assert (n_replace == 1); |
| apply_replacement (dep, true); |
| } |
| DEP_STATUS (dep) |= DEP_CANCELLED; |
| } |
| } |
| return 0; |
| } |
| |
| else if (n_control == 1 && n_replace == 0 && n_spec == 0) |
| { |
| rtx_insn *pro, *other; |
| rtx new_pat; |
| rtx cond = NULL_RTX; |
| bool success; |
| rtx_insn *prev = NULL; |
| int i; |
| unsigned regno; |
| |
| if ((current_sched_info->flags & DO_PREDICATION) == 0 |
| || (ORIG_PAT (next) != NULL_RTX |
| && PREDICATED_PAT (next) == NULL_RTX)) |
| return HARD_DEP; |
| |
| pro = DEP_PRO (modify_dep); |
| other = real_insn_for_shadow (pro); |
| if (other != NULL_RTX) |
| pro = other; |
| |
| cond = sched_get_reverse_condition_uncached (pro); |
| regno = REGNO (XEXP (cond, 0)); |
| |
| /* Find the last scheduled insn that modifies the condition register. |
| We can stop looking once we find the insn we depend on through the |
| REG_DEP_CONTROL; if the condition register isn't modified after it, |
| we know that it still has the right value. */ |
| if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED) |
| FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev) |
| { |
| HARD_REG_SET t; |
| |
| find_all_hard_reg_sets (prev, &t, true); |
| if (TEST_HARD_REG_BIT (t, regno)) |
| return HARD_DEP; |
| if (prev == pro) |
| break; |
| } |
| if (ORIG_PAT (next) == NULL_RTX) |
| { |
| ORIG_PAT (next) = PATTERN (next); |
| |
| new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next)); |
| success = haifa_change_pattern (next, new_pat); |
| if (!success) |
| return HARD_DEP; |
| PREDICATED_PAT (next) = new_pat; |
| } |
| else if (PATTERN (next) != PREDICATED_PAT (next)) |
| { |
| bool success = haifa_change_pattern (next, |
| PREDICATED_PAT (next)); |
| gcc_assert (success); |
| } |
| DEP_STATUS (modify_dep) |= DEP_CANCELLED; |
| return DEP_CONTROL; |
| } |
| |
| if (PREDICATED_PAT (next) != NULL_RTX) |
| { |
| int tick = INSN_TICK (next); |
| bool success = haifa_change_pattern (next, |
| ORIG_PAT (next)); |
| INSN_TICK (next) = tick; |
| gcc_assert (success); |
| } |
| |
| /* We can't handle the case where there are both speculative and control |
| dependencies, so we return HARD_DEP in such a case. Also fail if |
| we have speculative dependencies with not enough points, or more than |
| one control dependency. */ |
| if ((n_spec > 0 && (n_control > 0 || n_replace > 0)) |
| || (n_spec > 0 |
| /* Too few points? */ |
| && ds_weak (new_ds) < spec_info->data_weakness_cutoff) |
| || n_control > 0 |
| || n_replace > 0) |
| return HARD_DEP; |
| |
| return new_ds; |
| } |
| |
| /* Pointer to the last instruction scheduled. */ |
| static rtx_insn *last_scheduled_insn; |
| |
| /* Pointer to the last nondebug instruction scheduled within the |
| block, or the prev_head of the scheduling block. Used by |
| rank_for_schedule, so that insns independent of the last scheduled |
| insn will be preferred over dependent instructions. */ |
| static rtx_insn *last_nondebug_scheduled_insn; |
| |
| /* Pointer that iterates through the list of unscheduled insns if we |
| have a dbg_cnt enabled. It always points at an insn prior to the |
| first unscheduled one. */ |
| static rtx_insn *nonscheduled_insns_begin; |
| |
| /* Compute cost of executing INSN. |
| This is the number of cycles between instruction issue and |
| instruction results. */ |
| int |
| insn_sched_cost (rtx_insn *insn) |
| { |
| int cost; |
| |
| if (sched_fusion) |
| return 0; |
| |
| if (sel_sched_p ()) |
| { |
| if (recog_memoized (insn) < 0) |
| return 0; |
| |
| cost = insn_default_latency (insn); |
| if (cost < 0) |
| cost = 0; |
| |
| return cost; |
| } |
| |
| cost = INSN_COST (insn); |
| |
| if (cost < 0) |
| { |
| /* A USE insn, or something else we don't need to |
| understand. We can't pass these directly to |
| result_ready_cost or insn_default_latency because it will |
| trigger a fatal error for unrecognizable insns. */ |
| if (recog_memoized (insn) < 0) |
| { |
| INSN_COST (insn) = 0; |
| return 0; |
| } |
| else |
| { |
| cost = insn_default_latency (insn); |
| if (cost < 0) |
| cost = 0; |
| |
| INSN_COST (insn) = cost; |
| } |
| } |
| |
| return cost; |
| } |
| |
| /* Compute cost of dependence LINK. |
| This is the number of cycles between instruction issue and |
| instruction results. |
| ??? We also use this function to call recog_memoized on all insns. */ |
| int |
| dep_cost_1 (dep_t link, dw_t dw) |
| { |
| rtx_insn *insn = DEP_PRO (link); |
| rtx_insn *used = DEP_CON (link); |
| int cost; |
| |
| if (DEP_COST (link) != UNKNOWN_DEP_COST) |
| return DEP_COST (link); |
| |
| if (delay_htab) |
| { |
| struct delay_pair *delay_entry; |
| delay_entry |
| = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used)); |
| if (delay_entry) |
| { |
| if (delay_entry->i1 == insn) |
| { |
| DEP_COST (link) = pair_delay (delay_entry); |
| return DEP_COST (link); |
| } |
| } |
| } |
| |
| /* A USE insn should never require the value used to be computed. |
| This allows the computation of a function's result and parameter |
| values to overlap the return and call. We don't care about the |
| dependence cost when only decreasing register pressure. */ |
| if (recog_memoized (used) < 0) |
| { |
| cost = 0; |
| recog_memoized (insn); |
| } |
| else |
| { |
| enum reg_note dep_type = DEP_TYPE (link); |
| |
| cost = insn_sched_cost (insn); |
| |
| if (INSN_CODE (insn) >= 0) |
| { |
| if (dep_type == REG_DEP_ANTI) |
| cost = 0; |
| else if (dep_type == REG_DEP_OUTPUT) |
| { |
| cost = (insn_default_latency (insn) |
| - insn_default_latency (used)); |
| if (cost <= 0) |
| cost = 1; |
| } |
| else if (bypass_p (insn)) |
| cost = insn_latency (insn, used); |
| } |
| |
| |
| if (targetm.sched.adjust_cost) |
| cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost, |
| dw); |
| |
| if (cost < 0) |
| cost = 0; |
| } |
| |
| DEP_COST (link) = cost; |
| return cost; |
| } |
| |
| /* Compute cost of dependence LINK. |
| This is the number of cycles between instruction issue and |
| instruction results. */ |
| int |
| dep_cost (dep_t link) |
| { |
| return dep_cost_1 (link, 0); |
| } |
| |
| /* Use this sel-sched.c friendly function in reorder2 instead of increasing |
| INSN_PRIORITY explicitly. */ |
| void |
| increase_insn_priority (rtx_insn *insn, int amount) |
| { |
| if (!sel_sched_p ()) |
| { |
| /* We're dealing with haifa-sched.c INSN_PRIORITY. */ |
| if (INSN_PRIORITY_KNOWN (insn)) |
| INSN_PRIORITY (insn) += amount; |
| } |
| else |
| { |
| /* In sel-sched.c INSN_PRIORITY is not kept up to date. |
| Use EXPR_PRIORITY instead. */ |
| sel_add_to_insn_priority (insn, amount); |
| } |
| } |
| |
| /* Return 'true' if DEP should be included in priority calculations. */ |
| static bool |
| contributes_to_priority_p (dep_t dep) |
| { |
| if (DEBUG_INSN_P (DEP_CON (dep)) |
| || DEBUG_INSN_P (DEP_PRO (dep))) |
| return false; |
| |
| /* Critical path is meaningful in block boundaries only. */ |
| if (!current_sched_info->contributes_to_priority (DEP_CON (dep), |
| DEP_PRO (dep))) |
| return false; |
| |
| if (DEP_REPLACE (dep) != NULL) |
| return false; |
| |
| /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set, |
| then speculative instructions will less likely be |
| scheduled. That is because the priority of |
| their producers will increase, and, thus, the |
| producers will more likely be scheduled, thus, |
| resolving the dependence. */ |
| if (sched_deps_info->generate_spec_deps |
| && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH) |
| && (DEP_STATUS (dep) & SPECULATIVE)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Compute the number of nondebug deps in list LIST for INSN. */ |
| |
| static int |
| dep_list_size (rtx_insn *insn, sd_list_types_def list) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| int dbgcount = 0, nodbgcount = 0; |
| |
| if (!MAY_HAVE_DEBUG_INSNS) |
| return sd_lists_size (insn, list); |
| |
| FOR_EACH_DEP (insn, list, sd_it, dep) |
| { |
| if (DEBUG_INSN_P (DEP_CON (dep))) |
| dbgcount++; |
| else if (!DEBUG_INSN_P (DEP_PRO (dep))) |
| nodbgcount++; |
| } |
| |
| gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list)); |
| |
| return nodbgcount; |
| } |
| |
| bool sched_fusion; |
| |
| /* Compute the priority number for INSN. */ |
| static int |
| priority (rtx_insn *insn, bool force_recompute) |
| { |
| if (! INSN_P (insn)) |
| return 0; |
| |
| /* We should not be interested in priority of an already scheduled insn. */ |
| gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED); |
| |
| if (force_recompute || !INSN_PRIORITY_KNOWN (insn)) |
| { |
| int this_priority = -1; |
| |
| if (sched_fusion) |
| { |
| int this_fusion_priority; |
| |
| targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY, |
| &this_fusion_priority, &this_priority); |
| INSN_FUSION_PRIORITY (insn) = this_fusion_priority; |
| } |
| else if (dep_list_size (insn, SD_LIST_FORW) == 0) |
| /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn |
| has some forward deps but all of them are ignored by |
| contributes_to_priority hook. At the moment we set priority of |
| such insn to 0. */ |
| this_priority = insn_sched_cost (insn); |
| else |
| { |
| rtx_insn *prev_first, *twin; |
| basic_block rec; |
| |
| /* For recovery check instructions we calculate priority slightly |
| different than that of normal instructions. Instead of walking |
| through INSN_FORW_DEPS (check) list, we walk through |
| INSN_FORW_DEPS list of each instruction in the corresponding |
| recovery block. */ |
| |
| /* Selective scheduling does not define RECOVERY_BLOCK macro. */ |
| rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn); |
| if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
| { |
| prev_first = PREV_INSN (insn); |
| twin = insn; |
| } |
| else |
| { |
| prev_first = NEXT_INSN (BB_HEAD (rec)); |
| twin = PREV_INSN (BB_END (rec)); |
| } |
| |
| do |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| |
| FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep) |
| { |
| rtx_insn *next; |
| int next_priority; |
| |
| next = DEP_CON (dep); |
| |
| if (BLOCK_FOR_INSN (next) != rec) |
| { |
| int cost; |
| |
| if (!contributes_to_priority_p (dep)) |
| continue; |
| |
| if (twin == insn) |
| cost = dep_cost (dep); |
| else |
| { |
| struct _dep _dep1, *dep1 = &_dep1; |
| |
| init_dep (dep1, insn, next, REG_DEP_ANTI); |
| |
| cost = dep_cost (dep1); |
| } |
| |
| next_priority = cost + priority (next); |
| |
| if (next_priority > this_priority) |
| this_priority = next_priority; |
| } |
| } |
| |
| twin = PREV_INSN (twin); |
| } |
| while (twin != prev_first); |
| } |
| |
| if (this_priority < 0) |
| { |
| gcc_assert (this_priority == -1); |
| |
| this_priority = insn_sched_cost (insn); |
| } |
| |
| INSN_PRIORITY (insn) = this_priority; |
| INSN_PRIORITY_STATUS (insn) = 1; |
| } |
| |
| return INSN_PRIORITY (insn); |
| } |
| |
| /* Macros and functions for keeping the priority queue sorted, and |
| dealing with queuing and dequeuing of instructions. */ |
| |
| /* For each pressure class CL, set DEATH[CL] to the number of registers |
| in that class that die in INSN. */ |
| |
| static void |
| calculate_reg_deaths (rtx_insn *insn, int *death) |
| { |
| int i; |
| struct reg_use_data *use; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| death[ira_pressure_classes[i]] = 0; |
| for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) |
| if (dying_use_p (use)) |
| mark_regno_birth_or_death (0, death, use->regno, true); |
| } |
| |
| /* Setup info about the current register pressure impact of scheduling |
| INSN at the current scheduling point. */ |
| static void |
| setup_insn_reg_pressure_info (rtx_insn *insn) |
| { |
| int i, change, before, after, hard_regno; |
| int excess_cost_change; |
| machine_mode mode; |
| enum reg_class cl; |
| struct reg_pressure_data *pressure_info; |
| int *max_reg_pressure; |
| static int death[N_REG_CLASSES]; |
| |
| gcc_checking_assert (!DEBUG_INSN_P (insn)); |
| |
| excess_cost_change = 0; |
| calculate_reg_deaths (insn, death); |
| pressure_info = INSN_REG_PRESSURE (insn); |
| max_reg_pressure = INSN_MAX_REG_PRESSURE (insn); |
| gcc_assert (pressure_info != NULL && max_reg_pressure != NULL); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| cl = ira_pressure_classes[i]; |
| gcc_assert (curr_reg_pressure[cl] >= 0); |
| change = (int) pressure_info[i].set_increase - death[cl]; |
| before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]); |
| after = MAX (0, max_reg_pressure[i] + change |
| - sched_class_regs_num[cl]); |
| hard_regno = ira_class_hard_regs[cl][0]; |
| gcc_assert (hard_regno >= 0); |
| mode = reg_raw_mode[hard_regno]; |
| excess_cost_change += ((after - before) |
| * (ira_memory_move_cost[mode][cl][0] |
| + ira_memory_move_cost[mode][cl][1])); |
| } |
| INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change; |
| } |
| |
| /* This is the first page of code related to SCHED_PRESSURE_MODEL. |
| It tries to make the scheduler take register pressure into account |
| without introducing too many unnecessary stalls. It hooks into the |
| main scheduling algorithm at several points: |
| |
| - Before scheduling starts, model_start_schedule constructs a |
| "model schedule" for the current block. This model schedule is |
| chosen solely to keep register pressure down. It does not take the |
| target's pipeline or the original instruction order into account, |
| except as a tie-breaker. It also doesn't work to a particular |
| pressure limit. |
| |
| This model schedule gives us an idea of what pressure can be |
| achieved for the block and gives us an example of a schedule that |
| keeps to that pressure. It also makes the final schedule less |
| dependent on the original instruction order. This is important |
| because the original order can either be "wide" (many values live |
| at once, such as in user-scheduled code) or "narrow" (few values |
| live at once, such as after loop unrolling, where several |
| iterations are executed sequentially). |
| |
| We do not apply this model schedule to the rtx stream. We simply |
| record it in model_schedule. We also compute the maximum pressure, |
| MP, that was seen during this schedule. |
| |
| - Instructions are added to the ready queue even if they require |
| a stall. The length of the stall is instead computed as: |
| |
| MAX (INSN_TICK (INSN) - clock_var, 0) |
| |
| (= insn_delay). This allows rank_for_schedule to choose between |
| introducing a deliberate stall or increasing pressure. |
| |
| - Before sorting the ready queue, model_set_excess_costs assigns |
| a pressure-based cost to each ready instruction in the queue. |
| This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE |
| (ECC for short) and is effectively measured in cycles. |
| |
| - rank_for_schedule ranks instructions based on: |
| |
| ECC (insn) + insn_delay (insn) |
| |
| then as: |
| |
| insn_delay (insn) |
| |
| So, for example, an instruction X1 with an ECC of 1 that can issue |
| now will win over an instruction X0 with an ECC of zero that would |
| introduce a stall of one cycle. However, an instruction X2 with an |
| ECC of 2 that can issue now will lose to both X0 and X1. |
| |
| - When an instruction is scheduled, model_recompute updates the model |
| schedule with the new pressures (some of which might now exceed the |
| original maximum pressure MP). model_update_limit_points then searches |
| for the new point of maximum pressure, if not already known. */ |
| |
| /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL |
| from surrounding debug information. */ |
| #define MODEL_BAR \ |
| ";;\t\t+------------------------------------------------------\n" |
| |
| /* Information about the pressure on a particular register class at a |
| particular point of the model schedule. */ |
| struct model_pressure_data { |
| /* The pressure at this point of the model schedule, or -1 if the |
| point is associated with an instruction that has already been |
| scheduled. */ |
| int ref_pressure; |
| |
| /* The maximum pressure during or after this point of the model schedule. */ |
| int max_pressure; |
| }; |
| |
| /* Per-instruction information that is used while building the model |
| schedule. Here, "schedule" refers to the model schedule rather |
| than the main schedule. */ |
| struct model_insn_info { |
| /* The instruction itself. */ |
| rtx_insn *insn; |
| |
| /* If this instruction is in model_worklist, these fields link to the |
| previous (higher-priority) and next (lower-priority) instructions |
| in the list. */ |
| struct model_insn_info *prev; |
| struct model_insn_info *next; |
| |
| /* While constructing the schedule, QUEUE_INDEX describes whether an |
| instruction has already been added to the schedule (QUEUE_SCHEDULED), |
| is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE). |
| old_queue records the value that QUEUE_INDEX had before scheduling |
| started, so that we can restore it once the schedule is complete. */ |
| int old_queue; |
| |
| /* The relative importance of an unscheduled instruction. Higher |
| values indicate greater importance. */ |
| unsigned int model_priority; |
| |
| /* The length of the longest path of satisfied true dependencies |
| that leads to this instruction. */ |
| unsigned int depth; |
| |
| /* The length of the longest path of dependencies of any kind |
| that leads from this instruction. */ |
| unsigned int alap; |
| |
| /* The number of predecessor nodes that must still be scheduled. */ |
| int unscheduled_preds; |
| }; |
| |
| /* Information about the pressure limit for a particular register class. |
| This structure is used when applying a model schedule to the main |
| schedule. */ |
| struct model_pressure_limit { |
| /* The maximum register pressure seen in the original model schedule. */ |
| int orig_pressure; |
| |
| /* The maximum register pressure seen in the current model schedule |
| (which excludes instructions that have already been scheduled). */ |
| int pressure; |
| |
| /* The point of the current model schedule at which PRESSURE is first |
| reached. It is set to -1 if the value needs to be recomputed. */ |
| int point; |
| }; |
| |
| /* Describes a particular way of measuring register pressure. */ |
| struct model_pressure_group { |
| /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */ |
| struct model_pressure_limit limits[N_REG_CLASSES]; |
| |
| /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure |
| on register class ira_pressure_classes[PCI] at point POINT of the |
| current model schedule. A POINT of model_num_insns describes the |
| pressure at the end of the schedule. */ |
| struct model_pressure_data *model; |
| }; |
| |
| /* Index POINT gives the instruction at point POINT of the model schedule. |
| This array doesn't change during main scheduling. */ |
| static vec<rtx_insn *> model_schedule; |
| |
| /* The list of instructions in the model worklist, sorted in order of |
| decreasing priority. */ |
| static struct model_insn_info *model_worklist; |
| |
| /* Index I describes the instruction with INSN_LUID I. */ |
| static struct model_insn_info *model_insns; |
| |
| /* The number of instructions in the model schedule. */ |
| static int model_num_insns; |
| |
| /* The index of the first instruction in model_schedule that hasn't yet been |
| added to the main schedule, or model_num_insns if all of them have. */ |
| static int model_curr_point; |
| |
| /* Describes the pressure before each instruction in the model schedule. */ |
| static struct model_pressure_group model_before_pressure; |
| |
| /* The first unused model_priority value (as used in model_insn_info). */ |
| static unsigned int model_next_priority; |
| |
| |
| /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP |
| at point POINT of the model schedule. */ |
| #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \ |
| (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)]) |
| |
| /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or |
| after point POINT of the model schedule. */ |
| #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \ |
| (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure) |
| |
| /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT |
| of the model schedule. */ |
| #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \ |
| (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure) |
| |
| /* Information about INSN that is used when creating the model schedule. */ |
| #define MODEL_INSN_INFO(INSN) \ |
| (&model_insns[INSN_LUID (INSN)]) |
| |
| /* The instruction at point POINT of the model schedule. */ |
| #define MODEL_INSN(POINT) \ |
| (model_schedule[POINT]) |
| |
| |
| /* Return INSN's index in the model schedule, or model_num_insns if it |
| doesn't belong to that schedule. */ |
| |
| static int |
| model_index (rtx_insn *insn) |
| { |
| if (INSN_MODEL_INDEX (insn) == 0) |
| return model_num_insns; |
| return INSN_MODEL_INDEX (insn) - 1; |
| } |
| |
| /* Make sure that GROUP->limits is up-to-date for the current point |
| of the model schedule. */ |
| |
| static void |
| model_update_limit_points_in_group (struct model_pressure_group *group) |
| { |
| int pci, max_pressure, point; |
| |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| /* We may have passed the final point at which the pressure in |
| group->limits[pci].pressure was reached. Update the limit if so. */ |
| max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci); |
| group->limits[pci].pressure = max_pressure; |
| |
| /* Find the point at which MAX_PRESSURE is first reached. We need |
| to search in three cases: |
| |
| - We've already moved past the previous pressure point. |
| In this case we search forward from model_curr_point. |
| |
| - We scheduled the previous point of maximum pressure ahead of |
| its position in the model schedule, but doing so didn't bring |
| the pressure point earlier. In this case we search forward |
| from that previous pressure point. |
| |
| - Scheduling an instruction early caused the maximum pressure |
| to decrease. In this case we will have set the pressure |
| point to -1, and we search forward from model_curr_point. */ |
| point = MAX (group->limits[pci].point, model_curr_point); |
| while (point < model_num_insns |
| && MODEL_REF_PRESSURE (group, point, pci) < max_pressure) |
| point++; |
| group->limits[pci].point = point; |
| |
| gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure); |
| gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure); |
| } |
| } |
| |
| /* Make sure that all register-pressure limits are up-to-date for the |
| current position in the model schedule. */ |
| |
| static void |
| model_update_limit_points (void) |
| { |
| model_update_limit_points_in_group (&model_before_pressure); |
| } |
| |
| /* Return the model_index of the last unscheduled use in chain USE |
| outside of USE's instruction. Return -1 if there are no other uses, |
| or model_num_insns if the register is live at the end of the block. */ |
| |
| static int |
| model_last_use_except (struct reg_use_data *use) |
| { |
| struct reg_use_data *next; |
| int last, index; |
| |
| last = -1; |
| for (next = use->next_regno_use; next != use; next = next->next_regno_use) |
| if (NONDEBUG_INSN_P (next->insn) |
| && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED) |
| { |
| index = model_index (next->insn); |
| if (index == model_num_insns) |
| return model_num_insns; |
| if (last < index) |
| last = index; |
| } |
| return last; |
| } |
| |
| /* An instruction with model_index POINT has just been scheduled, and it |
| adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1. |
| Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and |
| MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */ |
| |
| static void |
| model_start_update_pressure (struct model_pressure_group *group, |
| int point, int pci, int delta) |
| { |
| int next_max_pressure; |
| |
| if (point == model_num_insns) |
| { |
| /* The instruction wasn't part of the model schedule; it was moved |
| from a different block. Update the pressure for the end of |
| the model schedule. */ |
| MODEL_REF_PRESSURE (group, point, pci) += delta; |
| MODEL_MAX_PRESSURE (group, point, pci) += delta; |
| } |
| else |
| { |
| /* Record that this instruction has been scheduled. Nothing now |
| changes between POINT and POINT + 1, so get the maximum pressure |
| from the latter. If the maximum pressure decreases, the new |
| pressure point may be before POINT. */ |
| MODEL_REF_PRESSURE (group, point, pci) = -1; |
| next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci); |
| if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure) |
| { |
| MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure; |
| if (group->limits[pci].point == point) |
| group->limits[pci].point = -1; |
| } |
| } |
| } |
| |
| /* Record that scheduling a later instruction has changed the pressure |
| at point POINT of the model schedule by DELTA (which might be 0). |
| Update GROUP accordingly. Return nonzero if these changes might |
| trigger changes to previous points as well. */ |
| |
| static int |
| model_update_pressure (struct model_pressure_group *group, |
| int point, int pci, int delta) |
| { |
| int ref_pressure, max_pressure, next_max_pressure; |
| |
| /* If POINT hasn't yet been scheduled, update its pressure. */ |
| ref_pressure = MODEL_REF_PRESSURE (group, point, pci); |
| if (ref_pressure >= 0 && delta != 0) |
| { |
| ref_pressure += delta; |
| MODEL_REF_PRESSURE (group, point, pci) = ref_pressure; |
| |
| /* Check whether the maximum pressure in the overall schedule |
| has increased. (This means that the MODEL_MAX_PRESSURE of |
| every point <= POINT will need to increase too; see below.) */ |
| if (group->limits[pci].pressure < ref_pressure) |
| group->limits[pci].pressure = ref_pressure; |
| |
| /* If we are at maximum pressure, and the maximum pressure |
| point was previously unknown or later than POINT, |
| bring it forward. */ |
| if (group->limits[pci].pressure == ref_pressure |
| && !IN_RANGE (group->limits[pci].point, 0, point)) |
| group->limits[pci].point = point; |
| |
| /* If POINT used to be the point of maximum pressure, but isn't |
| any longer, we need to recalculate it using a forward walk. */ |
| if (group->limits[pci].pressure > ref_pressure |
| && group->limits[pci].point == point) |
| group->limits[pci].point = -1; |
| } |
| |
| /* Update the maximum pressure at POINT. Changes here might also |
| affect the maximum pressure at POINT - 1. */ |
| next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci); |
| max_pressure = MAX (ref_pressure, next_max_pressure); |
| if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure) |
| { |
| MODEL_MAX_PRESSURE (group, point, pci) = max_pressure; |
| return 1; |
| } |
| return 0; |
| } |
| |
| /* INSN has just been scheduled. Update the model schedule accordingly. */ |
| |
| static void |
| model_recompute (rtx_insn *insn) |
| { |
| struct { |
| int last_use; |
| int regno; |
| } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS]; |
| struct reg_use_data *use; |
| struct reg_pressure_data *reg_pressure; |
| int delta[N_REG_CLASSES]; |
| int pci, point, mix, new_last, cl, ref_pressure, queue; |
| unsigned int i, num_uses, num_pending_births; |
| bool print_p; |
| |
| /* The destinations of INSN were previously live from POINT onwards, but are |
| now live from model_curr_point onwards. Set up DELTA accordingly. */ |
| point = model_index (insn); |
| reg_pressure = INSN_REG_PRESSURE (insn); |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| delta[cl] = reg_pressure[pci].set_increase; |
| } |
| |
| /* Record which registers previously died at POINT, but which now die |
| before POINT. Adjust DELTA so that it represents the effect of |
| this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of |
| registers that will be born in the range [model_curr_point, POINT). */ |
| num_uses = 0; |
| num_pending_births = 0; |
| bitmap_clear (tmp_bitmap); |
| for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) |
| { |
| new_last = model_last_use_except (use); |
| if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno)) |
| { |
| gcc_assert (num_uses < ARRAY_SIZE (uses)); |
| uses[num_uses].last_use = new_last; |
| uses[num_uses].regno = use->regno; |
| /* This register is no longer live after POINT - 1. */ |
| mark_regno_birth_or_death (NULL, delta, use->regno, false); |
| num_uses++; |
| if (new_last >= 0) |
| num_pending_births++; |
| } |
| } |
| |
| /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT. |
| Also set each group pressure limit for POINT. */ |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| model_start_update_pressure (&model_before_pressure, |
| point, pci, delta[cl]); |
| } |
| |
| /* Walk the model schedule backwards, starting immediately before POINT. */ |
| print_p = false; |
| if (point != model_curr_point) |
| do |
| { |
| point--; |
| insn = MODEL_INSN (point); |
| queue = QUEUE_INDEX (insn); |
| |
| if (queue != QUEUE_SCHEDULED) |
| { |
| /* DELTA describes the effect of the move on the register pressure |
| after POINT. Make it describe the effect on the pressure |
| before POINT. */ |
| i = 0; |
| while (i < num_uses) |
| { |
| if (uses[i].last_use == point) |
| { |
| /* This register is now live again. */ |
| mark_regno_birth_or_death (NULL, delta, |
| uses[i].regno, true); |
| |
| /* Remove this use from the array. */ |
| uses[i] = uses[num_uses - 1]; |
| num_uses--; |
| num_pending_births--; |
| } |
| else |
| i++; |
| } |
| |
| if (sched_verbose >= 5) |
| { |
| if (!print_p) |
| { |
| fprintf (sched_dump, MODEL_BAR); |
| fprintf (sched_dump, ";;\t\t| New pressure for model" |
| " schedule\n"); |
| fprintf (sched_dump, MODEL_BAR); |
| print_p = true; |
| } |
| |
| fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ", |
| point, INSN_UID (insn), |
| str_pattern_slim (PATTERN (insn))); |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure, |
| point, pci); |
| fprintf (sched_dump, " %s:[%d->%d]", |
| reg_class_names[ira_pressure_classes[pci]], |
| ref_pressure, ref_pressure + delta[cl]); |
| } |
| fprintf (sched_dump, "\n"); |
| } |
| } |
| |
| /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1 |
| might have changed as well. */ |
| mix = num_pending_births; |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| mix |= delta[cl]; |
| mix |= model_update_pressure (&model_before_pressure, |
| point, pci, delta[cl]); |
| } |
| } |
| while (mix && point > model_curr_point); |
| |
| if (print_p) |
| fprintf (sched_dump, MODEL_BAR); |
| } |
| |
| /* After DEP, which was cancelled, has been resolved for insn NEXT, |
| check whether the insn's pattern needs restoring. */ |
| static bool |
| must_restore_pattern_p (rtx_insn *next, dep_t dep) |
| { |
| if (QUEUE_INDEX (next) == QUEUE_SCHEDULED) |
| return false; |
| |
| if (DEP_TYPE (dep) == REG_DEP_CONTROL) |
| { |
| gcc_assert (ORIG_PAT (next) != NULL_RTX); |
| gcc_assert (next == DEP_CON (dep)); |
| } |
| else |
| { |
| struct dep_replacement *desc = DEP_REPLACE (dep); |
| if (desc->insn != next) |
| { |
| gcc_assert (*desc->loc == desc->orig); |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /* model_spill_cost (CL, P, P') returns the cost of increasing the |
| pressure on CL from P to P'. We use this to calculate a "base ECC", |
| baseECC (CL, X), for each pressure class CL and each instruction X. |
| Supposing X changes the pressure on CL from P to P', and that the |
| maximum pressure on CL in the current model schedule is MP', then: |
| |
| * if X occurs before or at the next point of maximum pressure in |
| the model schedule and P' > MP', then: |
| |
| baseECC (CL, X) = model_spill_cost (CL, MP, P') |
| |
| The idea is that the pressure after scheduling a fixed set of |
| instructions -- in this case, the set up to and including the |
| next maximum pressure point -- is going to be the same regardless |
| of the order; we simply want to keep the intermediate pressure |
| under control. Thus X has a cost of zero unless scheduling it |
| now would exceed MP'. |
| |
| If all increases in the set are by the same amount, no zero-cost |
| instruction will ever cause the pressure to exceed MP'. However, |
| if X is instead moved past an instruction X' with pressure in the |
| range (MP' - (P' - P), MP'), the pressure at X' will increase |
| beyond MP'. Since baseECC is very much a heuristic anyway, |
| it doesn't seem worth the overhead of tracking cases like these. |
| |
| The cost of exceeding MP' is always based on the original maximum |
| pressure MP. This is so that going 2 registers over the original |
| limit has the same cost regardless of whether it comes from two |
| separate +1 deltas or from a single +2 delta. |
| |
| * if X occurs after the next point of maximum pressure in the model |
| schedule and P' > P, then: |
| |
| baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P)) |
| |
| That is, if we move X forward across a point of maximum pressure, |
| and if X increases the pressure by P' - P, then we conservatively |
| assume that scheduling X next would increase the maximum pressure |
| by P' - P. Again, the cost of doing this is based on the original |
| maximum pressure MP, for the same reason as above. |
| |
| * if P' < P, P > MP, and X occurs at or after the next point of |
| maximum pressure, then: |
| |
| baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P) |
| |
| That is, if we have already exceeded the original maximum pressure MP, |
| and if X might reduce the maximum pressure again -- or at least push |
| it further back, and thus allow more scheduling freedom -- it is given |
| a negative cost to reflect the improvement. |
| |
| * otherwise, |
| |
| baseECC (CL, X) = 0 |
| |
| In this case, X is not expected to affect the maximum pressure MP', |
| so it has zero cost. |
| |
| We then create a combined value baseECC (X) that is the sum of |
| baseECC (CL, X) for each pressure class CL. |
| |
| baseECC (X) could itself be used as the ECC value described above. |
| However, this is often too conservative, in the sense that it |
| tends to make high-priority instructions that increase pressure |
| wait too long in cases where introducing a spill would be better. |
| For this reason the final ECC is a priority-adjusted form of |
| baseECC (X). Specifically, we calculate: |
| |
| P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X) |
| baseP = MAX { P (X) | baseECC (X) <= 0 } |
| |
| Then: |
| |
| ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0) |
| |
| Thus an instruction's effect on pressure is ignored if it has a high |
| enough priority relative to the ones that don't increase pressure. |
| Negative values of baseECC (X) do not increase the priority of X |
| itself, but they do make it harder for other instructions to |
| increase the pressure further. |
| |
| This pressure cost is deliberately timid. The intention has been |
| to choose a heuristic that rarely interferes with the normal list |
| scheduler in cases where that scheduler would produce good code. |
| We simply want to curb some of its worst excesses. */ |
| |
| /* Return the cost of increasing the pressure in class CL from FROM to TO. |
| |
| Here we use the very simplistic cost model that every register above |
| sched_class_regs_num[CL] has a spill cost of 1. We could use other |
| measures instead, such as one based on MEMORY_MOVE_COST. However: |
| |
| (1) In order for an instruction to be scheduled, the higher cost |
| would need to be justified in a single saving of that many stalls. |
| This is overly pessimistic, because the benefit of spilling is |
| often to avoid a sequence of several short stalls rather than |
| a single long one. |
| |
| (2) The cost is still arbitrary. Because we are not allocating |
| registers during scheduling, we have no way of knowing for |
| sure how many memory accesses will be required by each spill, |
| where the spills will be placed within the block, or even |
| which block(s) will contain the spills. |
| |
| So a higher cost than 1 is often too conservative in practice, |
| forcing blocks to contain unnecessary stalls instead of spill code. |
| The simple cost below seems to be the best compromise. It reduces |
| the interference with the normal list scheduler, which helps make |
| it more suitable for a default-on option. */ |
| |
| static int |
| model_spill_cost (int cl, int from, int to) |
| { |
| from = MAX (from, sched_class_regs_num[cl]); |
| return MAX (to, from) - from; |
| } |
| |
| /* Return baseECC (ira_pressure_classes[PCI], POINT), given that |
| P = curr_reg_pressure[ira_pressure_classes[PCI]] and that |
| P' = P + DELTA. */ |
| |
| static int |
| model_excess_group_cost (struct model_pressure_group *group, |
| int point, int pci, int delta) |
| { |
| int pressure, cl; |
| |
| cl = ira_pressure_classes[pci]; |
| if (delta < 0 && point >= group->limits[pci].point) |
| { |
| pressure = MAX (group->limits[pci].orig_pressure, |
| curr_reg_pressure[cl] + delta); |
| return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]); |
| } |
| |
| if (delta > 0) |
| { |
| if (point > group->limits[pci].point) |
| pressure = group->limits[pci].pressure + delta; |
| else |
| pressure = curr_reg_pressure[cl] + delta; |
| |
| if (pressure > group->limits[pci].pressure) |
| return model_spill_cost (cl, group->limits[pci].orig_pressure, |
| pressure); |
| } |
| |
| return 0; |
| } |
| |
| /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump |
| if PRINT_P. */ |
| |
| static int |
| model_excess_cost (rtx_insn *insn, bool print_p) |
| { |
| int point, pci, cl, cost, this_cost, delta; |
| struct reg_pressure_data *insn_reg_pressure; |
| int insn_death[N_REG_CLASSES]; |
| |
| calculate_reg_deaths (insn, insn_death); |
| point = model_index (insn); |
| insn_reg_pressure = INSN_REG_PRESSURE (insn); |
| cost = 0; |
| |
| if (print_p) |
| fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point, |
| INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn)); |
| |
| /* Sum up the individual costs for each register class. */ |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| delta = insn_reg_pressure[pci].set_increase - insn_death[cl]; |
| this_cost = model_excess_group_cost (&model_before_pressure, |
| point, pci, delta); |
| cost += this_cost; |
| if (print_p) |
| fprintf (sched_dump, " %s:[%d base cost %d]", |
| reg_class_names[cl], delta, this_cost); |
| } |
| |
| if (print_p) |
| fprintf (sched_dump, "\n"); |
| |
| return cost; |
| } |
| |
| /* Dump the next points of maximum pressure for GROUP. */ |
| |
| static void |
| model_dump_pressure_points (struct model_pressure_group *group) |
| { |
| int pci, cl; |
| |
| fprintf (sched_dump, ";;\t\t| pressure points"); |
| for (pci = 0; pci < ira_pressure_classes_num; pci++) |
| { |
| cl = ira_pressure_classes[pci]; |
| fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl], |
| curr_reg_pressure[cl], group->limits[pci].pressure); |
| if (group->limits[pci].point < model_num_insns) |
| fprintf (sched_dump, "%d:%d]", group->limits[pci].point, |
| INSN_UID (MODEL_INSN (group->limits[pci].point))); |
| else |
| fprintf (sched_dump, "end]"); |
| } |
| fprintf (sched_dump, "\n"); |
| } |
| |
| /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */ |
| |
| static void |
| model_set_excess_costs (rtx_insn **insns, int count) |
| { |
| int i, cost, priority_base, priority; |
| bool print_p; |
| |
| /* Record the baseECC value for each instruction in the model schedule, |
| except that negative costs are converted to zero ones now rather than |
| later. Do not assign a cost to debug instructions, since they must |
| not change code-generation decisions. Experiments suggest we also |
| get better results by not assigning a cost to instructions from |
| a different block. |
| |
| Set PRIORITY_BASE to baseP in the block comment above. This is the |
| maximum priority of the "cheap" instructions, which should always |
| include the next model instruction. */ |
| priority_base = 0; |
| print_p = false; |
| for (i = 0; i < count; i++) |
| if (INSN_MODEL_INDEX (insns[i])) |
| { |
| if (sched_verbose >= 6 && !print_p) |
| { |
| fprintf (sched_dump, MODEL_BAR); |
| fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n"); |
| model_dump_pressure_points (&model_before_pressure); |
| fprintf (sched_dump, MODEL_BAR); |
| print_p = true; |
| } |
| cost = model_excess_cost (insns[i], print_p); |
| if (cost <= 0) |
| { |
| priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost; |
| priority_base = MAX (priority_base, priority); |
| cost = 0; |
| } |
| INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost; |
| } |
| if (print_p) |
| fprintf (sched_dump, MODEL_BAR); |
| |
| /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each |
| instruction. */ |
| for (i = 0; i < count; i++) |
| { |
| cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]); |
| priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]); |
| if (cost > 0 && priority > priority_base) |
| { |
| cost += priority_base - priority; |
| INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0); |
| } |
| } |
| } |
| |
| |
| /* Enum of rank_for_schedule heuristic decisions. */ |
| enum rfs_decision { |
| RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2, |
| RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK, |
| RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION, |
| RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX, |
| RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N }; |
| |
| /* Corresponding strings for print outs. */ |
| static const char *rfs_str[RFS_N] = { |
| "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2", |
| "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK", |
| "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION", |
| "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX", |
| "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" }; |
| |
| /* Statistical breakdown of rank_for_schedule decisions. */ |
| struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; }; |
| static rank_for_schedule_stats_t rank_for_schedule_stats; |
| |
| /* Return the result of comparing insns TMP and TMP2 and update |
| Rank_For_Schedule statistics. */ |
| static int |
| rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2) |
| { |
| ++rank_for_schedule_stats.stats[decision]; |
| if (result < 0) |
| INSN_LAST_RFS_WIN (tmp) = decision; |
| else if (result > 0) |
| INSN_LAST_RFS_WIN (tmp2) = decision; |
| else |
| gcc_unreachable (); |
| return result; |
| } |
| |
| /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while |
| keeping normal insns in original order. */ |
| |
| static int |
| rank_for_schedule_debug (const void *x, const void *y) |
| { |
| rtx_insn *tmp = *(rtx_insn * const *) y; |
| rtx_insn *tmp2 = *(rtx_insn * const *) x; |
| |
| /* Schedule debug insns as early as possible. */ |
| if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2)) |
| return -1; |
| else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2)) |
| return 1; |
| else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2)) |
| return INSN_LUID (tmp) - INSN_LUID (tmp2); |
| else |
| return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp); |
| } |
| |
| /* Returns a positive value if x is preferred; returns a negative value if |
| y is preferred. Should never return 0, since that will make the sort |
| unstable. */ |
| |
| static int |
| rank_for_schedule (const void *x, const void *y) |
| { |
| rtx_insn *tmp = *(rtx_insn * const *) y; |
| rtx_insn *tmp2 = *(rtx_insn * const *) x; |
| int tmp_class, tmp2_class; |
| int val, priority_val, info_val, diff; |
| |
| if (live_range_shrinkage_p) |
| { |
| /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse |
| code. */ |
| gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED); |
| if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0 |
| || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0) |
| && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) |
| - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0) |
| return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2); |
| /* Sort by INSN_LUID (original insn order), so that we make the |
| sort stable. This minimizes instruction movement, thus |
| minimizing sched's effect on debugging and cross-jumping. */ |
| return rfs_result (RFS_LIVE_RANGE_SHRINK2, |
| INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2); |
| } |
| |
| /* The insn in a schedule group should be issued the first. */ |
| if (flag_sched_group_heuristic && |
| SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2)) |
| return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1, |
| tmp, tmp2); |
| |
| /* Make sure that priority of TMP and TMP2 are initialized. */ |
| gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2)); |
| |
| if (sched_fusion) |
| { |
| /* The instruction that has the same fusion priority as the last |
| instruction is the instruction we picked next. If that is not |
| the case, we sort ready list firstly by fusion priority, then |
| by priority, and at last by INSN_LUID. */ |
| int a = INSN_FUSION_PRIORITY (tmp); |
| int b = INSN_FUSION_PRIORITY (tmp2); |
| int last = -1; |
| |
| if (last_nondebug_scheduled_insn |
| && !NOTE_P (last_nondebug_scheduled_insn) |
| && BLOCK_FOR_INSN (tmp) |
| == BLOCK_FOR_INSN (last_nondebug_scheduled_insn)) |
| last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn); |
| |
| if (a != last && b != last) |
| { |
| if (a == b) |
| { |
| a = INSN_PRIORITY (tmp); |
| b = INSN_PRIORITY (tmp2); |
| } |
| if (a != b) |
| return rfs_result (RFS_FUSION, b - a, tmp, tmp2); |
| else |
| return rfs_result (RFS_FUSION, |
| INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2); |
| } |
| else if (a == b) |
| { |
| gcc_assert (last_nondebug_scheduled_insn |
| && !NOTE_P (last_nondebug_scheduled_insn)); |
| last = INSN_PRIORITY (last_nondebug_scheduled_insn); |
| |
| a = abs (INSN_PRIORITY (tmp) - last); |
| b = abs (INSN_PRIORITY (tmp2) - last); |
| if (a != b) |
| return rfs_result (RFS_FUSION, a - b, tmp, tmp2); |
| else |
| return rfs_result (RFS_FUSION, |
| INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2); |
| } |
| else if (a == last) |
| return rfs_result (RFS_FUSION, -1, tmp, tmp2); |
| else |
| return rfs_result (RFS_FUSION, 1, tmp, tmp2); |
| } |
| |
| if (sched_pressure != SCHED_PRESSURE_NONE) |
| { |
| /* Prefer insn whose scheduling results in the smallest register |
| pressure excess. */ |
| if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) |
| + insn_delay (tmp) |
| - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) |
| - insn_delay (tmp2)))) |
| return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2); |
| } |
| |
| if (sched_pressure != SCHED_PRESSURE_NONE |
| && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var) |
| && INSN_TICK (tmp2) != INSN_TICK (tmp)) |
| { |
| diff = INSN_TICK (tmp) - INSN_TICK (tmp2); |
| return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2); |
| } |
| |
| /* If we are doing backtracking in this schedule, prefer insns that |
| have forward dependencies with negative cost against an insn that |
| was already scheduled. */ |
| if (current_sched_info->flags & DO_BACKTRACKING) |
| { |
| priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp); |
| if (priority_val) |
| return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2); |
| } |
| |
| /* Prefer insn with higher priority. */ |
| priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp); |
| |
| if (flag_sched_critical_path_heuristic && priority_val) |
| return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2); |
| |
| if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0) |
| { |
| int autopref = autopref_rank_for_schedule (tmp, tmp2); |
| if (autopref != 0) |
| return autopref; |
| } |
| |
| /* Prefer speculative insn with greater dependencies weakness. */ |
| if (flag_sched_spec_insn_heuristic && spec_info) |
| { |
| ds_t ds1, ds2; |
| dw_t dw1, dw2; |
| int dw; |
| |
| ds1 = TODO_SPEC (tmp) & SPECULATIVE; |
| if (ds1) |
| dw1 = ds_weak (ds1); |
| else |
| dw1 = NO_DEP_WEAK; |
| |
| ds2 = TODO_SPEC (tmp2) & SPECULATIVE; |
| if (ds2) |
| dw2 = ds_weak (ds2); |
| else |
| dw2 = NO_DEP_WEAK; |
| |
| dw = dw2 - dw1; |
| if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8)) |
| return rfs_result (RFS_SPECULATION, dw, tmp, tmp2); |
| } |
| |
| info_val = (*current_sched_info->rank) (tmp, tmp2); |
| if (flag_sched_rank_heuristic && info_val) |
| return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2); |
| |
| /* Compare insns based on their relation to the last scheduled |
| non-debug insn. */ |
| if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn) |
| { |
| dep_t dep1; |
| dep_t dep2; |
| rtx_insn *last = last_nondebug_scheduled_insn; |
| |
| /* Classify the instructions into three classes: |
| 1) Data dependent on last schedule insn. |
| 2) Anti/Output dependent on last scheduled insn. |
| 3) Independent of last scheduled insn, or has latency of one. |
| Choose the insn from the highest numbered class if different. */ |
| dep1 = sd_find_dep_between (last, tmp, true); |
| |
| if (dep1 == NULL || dep_cost (dep1) == 1) |
| tmp_class = 3; |
| else if (/* Data dependence. */ |
| DEP_TYPE (dep1) == REG_DEP_TRUE) |
| tmp_class = 1; |
| else |
| tmp_class = 2; |
| |
| dep2 = sd_find_dep_between (last, tmp2, true); |
| |
| if (dep2 == NULL || dep_cost (dep2) == 1) |
| tmp2_class = 3; |
| else if (/* Data dependence. */ |
| DEP_TYPE (dep2) == REG_DEP_TRUE) |
| tmp2_class = 1; |
| else |
| tmp2_class = 2; |
| |
| if ((val = tmp2_class - tmp_class)) |
| return rfs_result (RFS_LAST_INSN, val, tmp, tmp2); |
| } |
| |
| /* Prefer instructions that occur earlier in the model schedule. */ |
| if (sched_pressure == SCHED_PRESSURE_MODEL) |
| { |
| diff = model_index (tmp) - model_index (tmp2); |
| if (diff != 0) |
| return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2); |
| } |
| |
| /* Prefer the insn which has more later insns that depend on it. |
| This gives the scheduler more freedom when scheduling later |
| instructions at the expense of added register pressure. */ |
| |
| val = (dep_list_size (tmp2, SD_LIST_FORW) |
| - dep_list_size (tmp, SD_LIST_FORW)); |
| |
| if (flag_sched_dep_count_heuristic && val != 0) |
| return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2); |
| |
| /* Sort by INSN_COST rather than INSN_LUID. This means that instructions |
| which take longer to execute are prioritised and it leads to more |
| dual-issue opportunities on in-order cores which have this feature. */ |
| |
| if (INSN_COST (tmp) != INSN_COST (tmp2)) |
| return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp), |
| tmp, tmp2); |
| |
| /* If insns are equally good, sort by INSN_LUID (original insn order), |
| so that we make the sort stable. This minimizes instruction movement, |
| thus minimizing sched's effect on debugging and cross-jumping. */ |
| return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2); |
| } |
| |
| /* Resort the array A in which only element at index N may be out of order. */ |
| |
| HAIFA_INLINE static void |
| swap_sort (rtx_insn **a, int n) |
| { |
| rtx_insn *insn = a[n - 1]; |
| int i = n - 2; |
| |
| while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0) |
| { |
| a[i + 1] = a[i]; |
| i -= 1; |
| } |
| a[i + 1] = insn; |
| } |
| |
| /* Add INSN to the insn queue so that it can be executed at least |
| N_CYCLES after the currently executing insn. Preserve insns |
| chain for debugging purposes. REASON will be printed in debugging |
| output. */ |
| |
| HAIFA_INLINE static void |
| queue_insn (rtx_insn *insn, int n_cycles, const char *reason) |
| { |
| int next_q = NEXT_Q_AFTER (q_ptr, n_cycles); |
| rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]); |
| int new_tick; |
| |
| gcc_assert (n_cycles <= max_insn_queue_index); |
| gcc_assert (!DEBUG_INSN_P (insn)); |
| |
| insn_queue[next_q] = link; |
| q_size += 1; |
| |
| if (sched_verbose >= 2) |
| { |
| fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ", |
| (*current_sched_info->print_insn) (insn, 0)); |
| |
| fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason); |
| } |
| |
| QUEUE_INDEX (insn) = next_q; |
| |
| if (current_sched_info->flags & DO_BACKTRACKING) |
| { |
| new_tick = clock_var + n_cycles; |
| if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick) |
| INSN_TICK (insn) = new_tick; |
| |
| if (INSN_EXACT_TICK (insn) != INVALID_TICK |
| && INSN_EXACT_TICK (insn) < clock_var + n_cycles) |
| { |
| must_backtrack = true; |
| if (sched_verbose >= 2) |
| fprintf (sched_dump, ";;\t\tcausing a backtrack.\n"); |
| } |
| } |
| } |
| |
| /* Remove INSN from queue. */ |
| static void |
| queue_remove (rtx_insn *insn) |
| { |
| gcc_assert (QUEUE_INDEX (insn) >= 0); |
| remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]); |
| q_size--; |
| QUEUE_INDEX (insn) = QUEUE_NOWHERE; |
| } |
| |
| /* Return a pointer to the bottom of the ready list, i.e. the insn |
| with the lowest priority. */ |
| |
| rtx_insn ** |
| ready_lastpos (struct ready_list *ready) |
| { |
| gcc_assert (ready->n_ready >= 1); |
| return ready->vec + ready->first - ready->n_ready + 1; |
| } |
| |
| /* Add an element INSN to the ready list so that it ends up with the |
| lowest/highest priority depending on FIRST_P. */ |
| |
| HAIFA_INLINE static void |
| ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p) |
| { |
| if (!first_p) |
| { |
| if (ready->first == ready->n_ready) |
| { |
| memmove (ready->vec + ready->veclen - ready->n_ready, |
| ready_lastpos (ready), |
| ready->n_ready * sizeof (rtx)); |
| ready->first = ready->veclen - 1; |
| } |
| ready->vec[ready->first - ready->n_ready] = insn; |
| } |
| else |
| { |
| if (ready->first == ready->veclen - 1) |
| { |
| if (ready->n_ready) |
| /* ready_lastpos() fails when called with (ready->n_ready == 0). */ |
| memmove (ready->vec + ready->veclen - ready->n_ready - 1, |
| ready_lastpos (ready), |
| ready->n_ready * sizeof (rtx)); |
| ready->first = ready->veclen - 2; |
| } |
| ready->vec[++(ready->first)] = insn; |
| } |
| |
| ready->n_ready++; |
| if (DEBUG_INSN_P (insn)) |
| ready->n_debug++; |
| |
| gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY); |
| QUEUE_INDEX (insn) = QUEUE_READY; |
| |
| if (INSN_EXACT_TICK (insn) != INVALID_TICK |
| && INSN_EXACT_TICK (insn) < clock_var) |
| { |
| must_backtrack = true; |
| } |
| } |
| |
| /* Remove the element with the highest priority from the ready list and |
| return it. */ |
| |
| HAIFA_INLINE static rtx_insn * |
| ready_remove_first (struct ready_list *ready) |
| { |
| rtx_insn *t; |
| |
| gcc_assert (ready->n_ready); |
| t = ready->vec[ready->first--]; |
| ready->n_ready--; |
| if (DEBUG_INSN_P (t)) |
| ready->n_debug--; |
| /* If the queue becomes empty, reset it. */ |
| if (ready->n_ready == 0) |
| ready->first = ready->veclen - 1; |
| |
| gcc_assert (QUEUE_INDEX (t) == QUEUE_READY); |
| QUEUE_INDEX (t) = QUEUE_NOWHERE; |
| |
| return t; |
| } |
| |
| /* The following code implements multi-pass scheduling for the first |
| cycle. In other words, we will try to choose ready insn which |
| permits to start maximum number of insns on the same cycle. */ |
| |
| /* Return a pointer to the element INDEX from the ready. INDEX for |
| insn with the highest priority is 0, and the lowest priority has |
| N_READY - 1. */ |
| |
| rtx_insn * |
| ready_element (struct ready_list *ready, int index) |
| { |
| gcc_assert (ready->n_ready && index < ready->n_ready); |
| |
| return ready->vec[ready->first - index]; |
| } |
| |
| /* Remove the element INDEX from the ready list and return it. INDEX |
| for insn with the highest priority is 0, and the lowest priority |
| has N_READY - 1. */ |
| |
| HAIFA_INLINE static rtx_insn * |
| ready_remove (struct ready_list *ready, int index) |
| { |
| rtx_insn *t; |
| int i; |
| |
| if (index == 0) |
| return ready_remove_first (ready); |
| gcc_assert (ready->n_ready && index < ready->n_ready); |
| t = ready->vec[ready->first - index]; |
| ready->n_ready--; |
| if (DEBUG_INSN_P (t)) |
| ready->n_debug--; |
| for (i = index; i < ready->n_ready; i++) |
| ready->vec[ready->first - i] = ready->vec[ready->first - i - 1]; |
| QUEUE_INDEX (t) = QUEUE_NOWHERE; |
| return t; |
| } |
| |
| /* Remove INSN from the ready list. */ |
| static void |
| ready_remove_insn (rtx_insn *insn) |
| { |
| int i; |
| |
| for (i = 0; i < readyp->n_ready; i++) |
| if (ready_element (readyp, i) == insn) |
| { |
| ready_remove (readyp, i); |
| return; |
| } |
| gcc_unreachable (); |
| } |
| |
| /* Calculate difference of two statistics set WAS and NOW. |
| Result returned in WAS. */ |
| static void |
| rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was, |
| const rank_for_schedule_stats_t *now) |
| { |
| for (int i = 0; i < RFS_N; ++i) |
| was->stats[i] = now->stats[i] - was->stats[i]; |
| } |
| |
| /* Print rank_for_schedule statistics. */ |
| static void |
| print_rank_for_schedule_stats (const char *prefix, |
| const rank_for_schedule_stats_t *stats, |
| struct ready_list *ready) |
| { |
| for (int i = 0; i < RFS_N; ++i) |
| if (stats->stats[i]) |
| { |
| fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]); |
| |
| if (ready != NULL) |
| /* Print out insns that won due to RFS_<I>. */ |
| { |
| rtx_insn **p = ready_lastpos (ready); |
| |
| fprintf (sched_dump, ":"); |
| /* Start with 1 since least-priority insn didn't have any wins. */ |
| for (int j = 1; j < ready->n_ready; ++j) |
| if (INSN_LAST_RFS_WIN (p[j]) == i) |
| fprintf (sched_dump, " %s", |
| (*current_sched_info->print_insn) (p[j], 0)); |
| } |
| fprintf (sched_dump, "\n"); |
| } |
| } |
| |
| /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end |
| of array. */ |
| static void |
| ready_sort_debug (struct ready_list *ready) |
| { |
| int i; |
| rtx_insn **first = ready_lastpos (ready); |
| |
| for (i = 0; i < ready->n_ready; ++i) |
| if (!DEBUG_INSN_P (first[i])) |
| INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i; |
| |
| qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug); |
| } |
| |
| /* Sort non-debug insns in the ready list READY by ascending priority. |
| Assumes that all debug insns are separated from the real insns. */ |
| static void |
| ready_sort_real (struct ready_list *ready) |
| { |
| int i; |
| rtx_insn **first = ready_lastpos (ready); |
| int n_ready_real = ready->n_ready - ready->n_debug; |
| |
| if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
| for (i = 0; i < n_ready_real; ++i) |
| setup_insn_reg_pressure_info (first[i]); |
| else if (sched_pressure == SCHED_PRESSURE_MODEL |
| && model_curr_point < model_num_insns) |
| model_set_excess_costs (first, n_ready_real); |
| |
| rank_for_schedule_stats_t stats1; |
| if (sched_verbose >= 4) |
| stats1 = rank_for_schedule_stats; |
| |
| if (n_ready_real == 2) |
| swap_sort (first, n_ready_real); |
| else if (n_ready_real > 2) |
| qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule); |
| |
| if (sched_verbose >= 4) |
| { |
| rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats); |
| print_rank_for_schedule_stats (";;\t\t", &stats1, ready); |
| } |
| } |
| |
| /* Sort the ready list READY by ascending priority. */ |
| static void |
| ready_sort (struct ready_list *ready) |
| { |
| if (ready->n_debug > 0) |
| ready_sort_debug (ready); |
| else |
| ready_sort_real (ready); |
| } |
| |
| /* PREV is an insn that is ready to execute. Adjust its priority if that |
| will help shorten or lengthen register lifetimes as appropriate. Also |
| provide a hook for the target to tweak itself. */ |
| |
| HAIFA_INLINE static void |
| adjust_priority (rtx_insn *prev) |
| { |
| /* ??? There used to be code here to try and estimate how an insn |
| affected register lifetimes, but it did it by looking at REG_DEAD |
| notes, which we removed in schedule_region. Nor did it try to |
| take into account register pressure or anything useful like that. |
| |
| Revisit when we have a machine model to work with and not before. */ |
| |
| if (targetm.sched.adjust_priority) |
| INSN_PRIORITY (prev) = |
| targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev)); |
| } |
| |
| /* Advance DFA state STATE on one cycle. */ |
| void |
| advance_state (state_t state) |
| { |
| if (targetm.sched.dfa_pre_advance_cycle) |
| targetm.sched.dfa_pre_advance_cycle (); |
| |
| if (targetm.sched.dfa_pre_cycle_insn) |
| state_transition (state, |
| targetm.sched.dfa_pre_cycle_insn ()); |
| |
| state_transition (state, NULL); |
| |
| if (targetm.sched.dfa_post_cycle_insn) |
| state_transition (state, |
| targetm.sched.dfa_post_cycle_insn ()); |
| |
| if (targetm.sched.dfa_post_advance_cycle) |
| targetm.sched.dfa_post_advance_cycle (); |
| } |
| |
| /* Advance time on one cycle. */ |
| HAIFA_INLINE static void |
| advance_one_cycle (void) |
| { |
| advance_state (curr_state); |
| if (sched_verbose >= 4) |
| fprintf (sched_dump, ";;\tAdvance the current state.\n"); |
| } |
| |
| /* Update register pressure after scheduling INSN. */ |
| static void |
| update_register_pressure (rtx_insn *insn) |
| { |
| struct reg_use_data *use; |
| struct reg_set_data *set; |
| |
| gcc_checking_assert (!DEBUG_INSN_P (insn)); |
| |
| for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) |
| if (dying_use_p (use)) |
| mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, |
| use->regno, false); |
| for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set) |
| mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, |
| set->regno, true); |
| } |
| |
| /* Set up or update (if UPDATE_P) max register pressure (see its |
| meaning in sched-int.h::_haifa_insn_data) for all current BB insns |
| after insn AFTER. */ |
| static void |
| setup_insn_max_reg_pressure (rtx_insn *after, bool update_p) |
| { |
| int i, p; |
| bool eq_p; |
| rtx_insn *insn; |
| static int max_reg_pressure[N_REG_CLASSES]; |
| |
| save_reg_pressure (); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| max_reg_pressure[ira_pressure_classes[i]] |
| = curr_reg_pressure[ira_pressure_classes[i]]; |
| for (insn = NEXT_INSN (after); |
| insn != NULL_RTX && ! BARRIER_P (insn) |
| && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after); |
| insn = NEXT_INSN (insn)) |
| if (NONDEBUG_INSN_P (insn)) |
| { |
| eq_p = true; |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| p = max_reg_pressure[ira_pressure_classes[i]]; |
| if (INSN_MAX_REG_PRESSURE (insn)[i] != p) |
| { |
| eq_p = false; |
| INSN_MAX_REG_PRESSURE (insn)[i] |
| = max_reg_pressure[ira_pressure_classes[i]]; |
| } |
| } |
| if (update_p && eq_p) |
| break; |
| update_register_pressure (insn); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| if (max_reg_pressure[ira_pressure_classes[i]] |
| < curr_reg_pressure[ira_pressure_classes[i]]) |
| max_reg_pressure[ira_pressure_classes[i]] |
| = curr_reg_pressure[ira_pressure_classes[i]]; |
| } |
| restore_reg_pressure (); |
| } |
| |
| /* Update the current register pressure after scheduling INSN. Update |
| also max register pressure for unscheduled insns of the current |
| BB. */ |
| static void |
| update_reg_and_insn_max_reg_pressure (rtx_insn *insn) |
| { |
| int i; |
| int before[N_REG_CLASSES]; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| before[i] = curr_reg_pressure[ira_pressure_classes[i]]; |
| update_register_pressure (insn); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| if (curr_reg_pressure[ira_pressure_classes[i]] != before[i]) |
| break; |
| if (i < ira_pressure_classes_num) |
| setup_insn_max_reg_pressure (insn, true); |
| } |
| |
| /* Set up register pressure at the beginning of basic block BB whose |
| insns starting after insn AFTER. Set up also max register pressure |
| for all insns of the basic block. */ |
| void |
| sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after) |
| |