| /* Instruction scheduling pass. This file computes dependencies between |
| instructions. |
| Copyright (C) 1992-2021 Free Software Foundation, Inc. |
| Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
| and currently maintained by, Jim Wilson (wilson@cygnus.com) |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "df.h" |
| #include "insn-config.h" |
| #include "regs.h" |
| #include "memmodel.h" |
| #include "ira.h" |
| #include "ira-int.h" |
| #include "insn-attr.h" |
| #include "cfgbuild.h" |
| #include "sched-int.h" |
| #include "cselib.h" |
| #include "function-abi.h" |
| |
| #ifdef INSN_SCHEDULING |
| |
| /* Holds current parameters for the dependency analyzer. */ |
| struct sched_deps_info_def *sched_deps_info; |
| |
| /* The data is specific to the Haifa scheduler. */ |
| vec<haifa_deps_insn_data_def> |
| h_d_i_d = vNULL; |
| |
| /* Return the major type present in the DS. */ |
| enum reg_note |
| ds_to_dk (ds_t ds) |
| { |
| if (ds & DEP_TRUE) |
| return REG_DEP_TRUE; |
| |
| if (ds & DEP_OUTPUT) |
| return REG_DEP_OUTPUT; |
| |
| if (ds & DEP_CONTROL) |
| return REG_DEP_CONTROL; |
| |
| gcc_assert (ds & DEP_ANTI); |
| |
| return REG_DEP_ANTI; |
| } |
| |
| /* Return equivalent dep_status. */ |
| ds_t |
| dk_to_ds (enum reg_note dk) |
| { |
| switch (dk) |
| { |
| case REG_DEP_TRUE: |
| return DEP_TRUE; |
| |
| case REG_DEP_OUTPUT: |
| return DEP_OUTPUT; |
| |
| case REG_DEP_CONTROL: |
| return DEP_CONTROL; |
| |
| default: |
| gcc_assert (dk == REG_DEP_ANTI); |
| return DEP_ANTI; |
| } |
| } |
| |
| /* Functions to operate with dependence information container - dep_t. */ |
| |
| /* Init DEP with the arguments. */ |
| void |
| init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds) |
| { |
| DEP_PRO (dep) = pro; |
| DEP_CON (dep) = con; |
| DEP_TYPE (dep) = type; |
| DEP_STATUS (dep) = ds; |
| DEP_COST (dep) = UNKNOWN_DEP_COST; |
| DEP_NONREG (dep) = 0; |
| DEP_MULTIPLE (dep) = 0; |
| DEP_REPLACE (dep) = NULL; |
| dep->unused = 0; |
| } |
| |
| /* Init DEP with the arguments. |
| While most of the scheduler (including targets) only need the major type |
| of the dependency, it is convenient to hide full dep_status from them. */ |
| void |
| init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind) |
| { |
| ds_t ds; |
| |
| if ((current_sched_info->flags & USE_DEPS_LIST)) |
| ds = dk_to_ds (kind); |
| else |
| ds = 0; |
| |
| init_dep_1 (dep, pro, con, kind, ds); |
| } |
| |
| /* Make a copy of FROM in TO. */ |
| static void |
| copy_dep (dep_t to, dep_t from) |
| { |
| memcpy (to, from, sizeof (*to)); |
| } |
| |
| static void dump_ds (FILE *, ds_t); |
| |
| /* Define flags for dump_dep (). */ |
| |
| /* Dump producer of the dependence. */ |
| #define DUMP_DEP_PRO (2) |
| |
| /* Dump consumer of the dependence. */ |
| #define DUMP_DEP_CON (4) |
| |
| /* Dump type of the dependence. */ |
| #define DUMP_DEP_TYPE (8) |
| |
| /* Dump status of the dependence. */ |
| #define DUMP_DEP_STATUS (16) |
| |
| /* Dump all information about the dependence. */ |
| #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \ |
| |DUMP_DEP_STATUS) |
| |
| /* Dump DEP to DUMP. |
| FLAGS is a bit mask specifying what information about DEP needs |
| to be printed. |
| If FLAGS has the very first bit set, then dump all information about DEP |
| and propagate this bit into the callee dump functions. */ |
| static void |
| dump_dep (FILE *dump, dep_t dep, int flags) |
| { |
| if (flags & 1) |
| flags |= DUMP_DEP_ALL; |
| |
| fprintf (dump, "<"); |
| |
| if (flags & DUMP_DEP_PRO) |
| fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep))); |
| |
| if (flags & DUMP_DEP_CON) |
| fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep))); |
| |
| if (flags & DUMP_DEP_TYPE) |
| { |
| char t; |
| enum reg_note type = DEP_TYPE (dep); |
| |
| switch (type) |
| { |
| case REG_DEP_TRUE: |
| t = 't'; |
| break; |
| |
| case REG_DEP_OUTPUT: |
| t = 'o'; |
| break; |
| |
| case REG_DEP_CONTROL: |
| t = 'c'; |
| break; |
| |
| case REG_DEP_ANTI: |
| t = 'a'; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| break; |
| } |
| |
| fprintf (dump, "%c; ", t); |
| } |
| |
| if (flags & DUMP_DEP_STATUS) |
| { |
| if (current_sched_info->flags & USE_DEPS_LIST) |
| dump_ds (dump, DEP_STATUS (dep)); |
| } |
| |
| fprintf (dump, ">"); |
| } |
| |
| /* Default flags for dump_dep (). */ |
| static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON); |
| |
| /* Dump all fields of DEP to STDERR. */ |
| void |
| sd_debug_dep (dep_t dep) |
| { |
| dump_dep (stderr, dep, 1); |
| fprintf (stderr, "\n"); |
| } |
| |
| /* Determine whether DEP is a dependency link of a non-debug insn on a |
| debug insn. */ |
| |
| static inline bool |
| depl_on_debug_p (dep_link_t dep) |
| { |
| return (DEBUG_INSN_P (DEP_LINK_PRO (dep)) |
| && !DEBUG_INSN_P (DEP_LINK_CON (dep))); |
| } |
| |
| /* Functions to operate with a single link from the dependencies lists - |
| dep_link_t. */ |
| |
| /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by |
| PREV_NEXT_P. */ |
| static void |
| attach_dep_link (dep_link_t l, dep_link_t *prev_nextp) |
| { |
| dep_link_t next = *prev_nextp; |
| |
| gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL |
| && DEP_LINK_NEXT (l) == NULL); |
| |
| /* Init node being inserted. */ |
| DEP_LINK_PREV_NEXTP (l) = prev_nextp; |
| DEP_LINK_NEXT (l) = next; |
| |
| /* Fix next node. */ |
| if (next != NULL) |
| { |
| gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp); |
| |
| DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l); |
| } |
| |
| /* Fix prev node. */ |
| *prev_nextp = l; |
| } |
| |
| /* Add dep_link LINK to deps_list L. */ |
| static void |
| add_to_deps_list (dep_link_t link, deps_list_t l) |
| { |
| attach_dep_link (link, &DEPS_LIST_FIRST (l)); |
| |
| /* Don't count debug deps. */ |
| if (!depl_on_debug_p (link)) |
| ++DEPS_LIST_N_LINKS (l); |
| } |
| |
| /* Detach dep_link L from the list. */ |
| static void |
| detach_dep_link (dep_link_t l) |
| { |
| dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l); |
| dep_link_t next = DEP_LINK_NEXT (l); |
| |
| *prev_nextp = next; |
| |
| if (next != NULL) |
| DEP_LINK_PREV_NEXTP (next) = prev_nextp; |
| |
| DEP_LINK_PREV_NEXTP (l) = NULL; |
| DEP_LINK_NEXT (l) = NULL; |
| } |
| |
| /* Remove link LINK from list LIST. */ |
| static void |
| remove_from_deps_list (dep_link_t link, deps_list_t list) |
| { |
| detach_dep_link (link); |
| |
| /* Don't count debug deps. */ |
| if (!depl_on_debug_p (link)) |
| --DEPS_LIST_N_LINKS (list); |
| } |
| |
| /* Move link LINK from list FROM to list TO. */ |
| static void |
| move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to) |
| { |
| remove_from_deps_list (link, from); |
| add_to_deps_list (link, to); |
| } |
| |
| /* Return true of LINK is not attached to any list. */ |
| static bool |
| dep_link_is_detached_p (dep_link_t link) |
| { |
| return DEP_LINK_PREV_NEXTP (link) == NULL; |
| } |
| |
| /* Pool to hold all dependency nodes (dep_node_t). */ |
| static object_allocator<_dep_node> *dn_pool; |
| |
| /* Number of dep_nodes out there. */ |
| static int dn_pool_diff = 0; |
| |
| /* Create a dep_node. */ |
| static dep_node_t |
| create_dep_node (void) |
| { |
| dep_node_t n = dn_pool->allocate (); |
| dep_link_t back = DEP_NODE_BACK (n); |
| dep_link_t forw = DEP_NODE_FORW (n); |
| |
| DEP_LINK_NODE (back) = n; |
| DEP_LINK_NEXT (back) = NULL; |
| DEP_LINK_PREV_NEXTP (back) = NULL; |
| |
| DEP_LINK_NODE (forw) = n; |
| DEP_LINK_NEXT (forw) = NULL; |
| DEP_LINK_PREV_NEXTP (forw) = NULL; |
| |
| ++dn_pool_diff; |
| |
| return n; |
| } |
| |
| /* Delete dep_node N. N must not be connected to any deps_list. */ |
| static void |
| delete_dep_node (dep_node_t n) |
| { |
| gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n)) |
| && dep_link_is_detached_p (DEP_NODE_FORW (n))); |
| |
| XDELETE (DEP_REPLACE (DEP_NODE_DEP (n))); |
| |
| --dn_pool_diff; |
| |
| dn_pool->remove (n); |
| } |
| |
| /* Pool to hold dependencies lists (deps_list_t). */ |
| static object_allocator<_deps_list> *dl_pool; |
| |
| /* Number of deps_lists out there. */ |
| static int dl_pool_diff = 0; |
| |
| /* Functions to operate with dependences lists - deps_list_t. */ |
| |
| /* Return true if list L is empty. */ |
| static bool |
| deps_list_empty_p (deps_list_t l) |
| { |
| return DEPS_LIST_N_LINKS (l) == 0; |
| } |
| |
| /* Create a new deps_list. */ |
| static deps_list_t |
| create_deps_list (void) |
| { |
| deps_list_t l = dl_pool->allocate (); |
| |
| DEPS_LIST_FIRST (l) = NULL; |
| DEPS_LIST_N_LINKS (l) = 0; |
| |
| ++dl_pool_diff; |
| return l; |
| } |
| |
| /* Free deps_list L. */ |
| static void |
| free_deps_list (deps_list_t l) |
| { |
| gcc_assert (deps_list_empty_p (l)); |
| |
| --dl_pool_diff; |
| |
| dl_pool->remove (l); |
| } |
| |
| /* Return true if there is no dep_nodes and deps_lists out there. |
| After the region is scheduled all the dependency nodes and lists |
| should [generally] be returned to pool. */ |
| bool |
| deps_pools_are_empty_p (void) |
| { |
| return dn_pool_diff == 0 && dl_pool_diff == 0; |
| } |
| |
| /* Remove all elements from L. */ |
| static void |
| clear_deps_list (deps_list_t l) |
| { |
| do |
| { |
| dep_link_t link = DEPS_LIST_FIRST (l); |
| |
| if (link == NULL) |
| break; |
| |
| remove_from_deps_list (link, l); |
| } |
| while (1); |
| } |
| |
| /* Decide whether a dependency should be treated as a hard or a speculative |
| dependency. */ |
| static bool |
| dep_spec_p (dep_t dep) |
| { |
| if (current_sched_info->flags & DO_SPECULATION) |
| { |
| if (DEP_STATUS (dep) & SPECULATIVE) |
| return true; |
| } |
| if (current_sched_info->flags & DO_PREDICATION) |
| { |
| if (DEP_TYPE (dep) == REG_DEP_CONTROL) |
| return true; |
| } |
| if (DEP_REPLACE (dep) != NULL) |
| return true; |
| return false; |
| } |
| |
| static regset reg_pending_sets; |
| static regset reg_pending_clobbers; |
| static regset reg_pending_uses; |
| static regset reg_pending_control_uses; |
| static enum reg_pending_barrier_mode reg_pending_barrier; |
| |
| /* Hard registers implicitly clobbered or used (or may be implicitly |
| clobbered or used) by the currently analyzed insn. For example, |
| insn in its constraint has one register class. Even if there is |
| currently no hard register in the insn, the particular hard |
| register will be in the insn after reload pass because the |
| constraint requires it. */ |
| static HARD_REG_SET implicit_reg_pending_clobbers; |
| static HARD_REG_SET implicit_reg_pending_uses; |
| |
| /* To speed up the test for duplicate dependency links we keep a |
| record of dependencies created by add_dependence when the average |
| number of instructions in a basic block is very large. |
| |
| Studies have shown that there is typically around 5 instructions between |
| branches for typical C code. So we can make a guess that the average |
| basic block is approximately 5 instructions long; we will choose 100X |
| the average size as a very large basic block. |
| |
| Each insn has associated bitmaps for its dependencies. Each bitmap |
| has enough entries to represent a dependency on any other insn in |
| the insn chain. All bitmap for true dependencies cache is |
| allocated then the rest two ones are also allocated. */ |
| static bitmap true_dependency_cache = NULL; |
| static bitmap output_dependency_cache = NULL; |
| static bitmap anti_dependency_cache = NULL; |
| static bitmap control_dependency_cache = NULL; |
| static bitmap spec_dependency_cache = NULL; |
| static int cache_size; |
| |
| /* True if we should mark added dependencies as a non-register deps. */ |
| static bool mark_as_hard; |
| |
| static int deps_may_trap_p (const_rtx); |
| static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note); |
| static void add_dependence_list (rtx_insn *, rtx_insn_list *, int, |
| enum reg_note, bool); |
| static void add_dependence_list_and_free (class deps_desc *, rtx_insn *, |
| rtx_insn_list **, int, enum reg_note, |
| bool); |
| static void delete_all_dependences (rtx_insn *); |
| static void chain_to_prev_insn (rtx_insn *); |
| |
| static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int); |
| static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *); |
| static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *); |
| static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *); |
| |
| static bool sched_has_condition_p (const rtx_insn *); |
| static int conditions_mutex_p (const_rtx, const_rtx, bool, bool); |
| |
| static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool, |
| rtx, rtx); |
| static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx); |
| |
| static void check_dep (dep_t, bool); |
| |
| |
| /* Return nonzero if a load of the memory reference MEM can cause a trap. */ |
| |
| static int |
| deps_may_trap_p (const_rtx mem) |
| { |
| const_rtx addr = XEXP (mem, 0); |
| |
| if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER) |
| { |
| const_rtx t = get_reg_known_value (REGNO (addr)); |
| if (t) |
| addr = t; |
| } |
| return rtx_addr_can_trap_p (addr); |
| } |
| |
| |
| /* Find the condition under which INSN is executed. If REV is not NULL, |
| it is set to TRUE when the returned comparison should be reversed |
| to get the actual condition. */ |
| static rtx |
| sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev) |
| { |
| rtx pat = PATTERN (insn); |
| rtx src; |
| |
| if (rev) |
| *rev = false; |
| |
| if (GET_CODE (pat) == COND_EXEC) |
| return COND_EXEC_TEST (pat); |
| |
| if (!any_condjump_p (insn) || !onlyjump_p (insn)) |
| return 0; |
| |
| src = SET_SRC (pc_set (insn)); |
| |
| if (XEXP (src, 2) == pc_rtx) |
| return XEXP (src, 0); |
| else if (XEXP (src, 1) == pc_rtx) |
| { |
| rtx cond = XEXP (src, 0); |
| enum rtx_code revcode = reversed_comparison_code (cond, insn); |
| |
| if (revcode == UNKNOWN) |
| return 0; |
| |
| if (rev) |
| *rev = true; |
| return cond; |
| } |
| |
| return 0; |
| } |
| |
| /* Return the condition under which INSN does not execute (i.e. the |
| not-taken condition for a conditional branch), or NULL if we cannot |
| find such a condition. The caller should make a copy of the condition |
| before using it. */ |
| rtx |
| sched_get_reverse_condition_uncached (const rtx_insn *insn) |
| { |
| bool rev; |
| rtx cond = sched_get_condition_with_rev_uncached (insn, &rev); |
| if (cond == NULL_RTX) |
| return cond; |
| if (!rev) |
| { |
| enum rtx_code revcode = reversed_comparison_code (cond, insn); |
| cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond), |
| XEXP (cond, 0), |
| XEXP (cond, 1)); |
| } |
| return cond; |
| } |
| |
| /* Caching variant of sched_get_condition_with_rev_uncached. |
| We only do actual work the first time we come here for an insn; the |
| results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */ |
| static rtx |
| sched_get_condition_with_rev (const rtx_insn *insn, bool *rev) |
| { |
| bool tmp; |
| |
| if (INSN_LUID (insn) == 0) |
| return sched_get_condition_with_rev_uncached (insn, rev); |
| |
| if (INSN_CACHED_COND (insn) == const_true_rtx) |
| return NULL_RTX; |
| |
| if (INSN_CACHED_COND (insn) != NULL_RTX) |
| { |
| if (rev) |
| *rev = INSN_REVERSE_COND (insn); |
| return INSN_CACHED_COND (insn); |
| } |
| |
| INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp); |
| INSN_REVERSE_COND (insn) = tmp; |
| |
| if (INSN_CACHED_COND (insn) == NULL_RTX) |
| { |
| INSN_CACHED_COND (insn) = const_true_rtx; |
| return NULL_RTX; |
| } |
| |
| if (rev) |
| *rev = INSN_REVERSE_COND (insn); |
| return INSN_CACHED_COND (insn); |
| } |
| |
| /* True when we can find a condition under which INSN is executed. */ |
| static bool |
| sched_has_condition_p (const rtx_insn *insn) |
| { |
| return !! sched_get_condition_with_rev (insn, NULL); |
| } |
| |
| |
| |
| /* Return nonzero if conditions COND1 and COND2 can never be both true. */ |
| static int |
| conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2) |
| { |
| if (COMPARISON_P (cond1) |
| && COMPARISON_P (cond2) |
| && GET_CODE (cond1) == |
| (rev1==rev2 |
| ? reversed_comparison_code (cond2, NULL) |
| : GET_CODE (cond2)) |
| && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) |
| && XEXP (cond1, 1) == XEXP (cond2, 1)) |
| return 1; |
| return 0; |
| } |
| |
| /* Return true if insn1 and insn2 can never depend on one another because |
| the conditions under which they are executed are mutually exclusive. */ |
| bool |
| sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2) |
| { |
| rtx cond1, cond2; |
| bool rev1 = false, rev2 = false; |
| |
| /* df doesn't handle conditional lifetimes entirely correctly; |
| calls mess up the conditional lifetimes. */ |
| if (!CALL_P (insn1) && !CALL_P (insn2)) |
| { |
| cond1 = sched_get_condition_with_rev (insn1, &rev1); |
| cond2 = sched_get_condition_with_rev (insn2, &rev2); |
| if (cond1 && cond2 |
| && conditions_mutex_p (cond1, cond2, rev1, rev2) |
| /* Make sure first instruction doesn't affect condition of second |
| instruction if switched. */ |
| && !modified_in_p (cond1, insn2) |
| /* Make sure second instruction doesn't affect condition of first |
| instruction if switched. */ |
| && !modified_in_p (cond2, insn1)) |
| return true; |
| } |
| return false; |
| } |
| |
| |
| /* Return true if INSN can potentially be speculated with type DS. */ |
| bool |
| sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds) |
| { |
| if (HAS_INTERNAL_DEP (insn)) |
| return false; |
| |
| if (!NONJUMP_INSN_P (insn)) |
| return false; |
| |
| if (SCHED_GROUP_P (insn)) |
| return false; |
| |
| if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn))) |
| return false; |
| |
| if (side_effects_p (PATTERN (insn))) |
| return false; |
| |
| if (ds & BE_IN_SPEC) |
| /* The following instructions, which depend on a speculatively scheduled |
| instruction, cannot be speculatively scheduled along. */ |
| { |
| if (may_trap_or_fault_p (PATTERN (insn))) |
| /* If instruction might fault, it cannot be speculatively scheduled. |
| For control speculation it's obvious why and for data speculation |
| it's because the insn might get wrong input if speculation |
| wasn't successful. */ |
| return false; |
| |
| if ((ds & BE_IN_DATA) |
| && sched_has_condition_p (insn)) |
| /* If this is a predicated instruction, then it cannot be |
| speculatively scheduled. See PR35659. */ |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR, |
| initialize RESOLVED_P_PTR with true if that list consists of resolved deps, |
| and remove the type of returned [through LIST_PTR] list from TYPES_PTR. |
| This function is used to switch sd_iterator to the next list. |
| !!! For internal use only. Might consider moving it to sched-int.h. */ |
| void |
| sd_next_list (const_rtx insn, sd_list_types_def *types_ptr, |
| deps_list_t *list_ptr, bool *resolved_p_ptr) |
| { |
| sd_list_types_def types = *types_ptr; |
| |
| if (types & SD_LIST_HARD_BACK) |
| { |
| *list_ptr = INSN_HARD_BACK_DEPS (insn); |
| *resolved_p_ptr = false; |
| *types_ptr = types & ~SD_LIST_HARD_BACK; |
| } |
| else if (types & SD_LIST_SPEC_BACK) |
| { |
| *list_ptr = INSN_SPEC_BACK_DEPS (insn); |
| *resolved_p_ptr = false; |
| *types_ptr = types & ~SD_LIST_SPEC_BACK; |
| } |
| else if (types & SD_LIST_FORW) |
| { |
| *list_ptr = INSN_FORW_DEPS (insn); |
| *resolved_p_ptr = false; |
| *types_ptr = types & ~SD_LIST_FORW; |
| } |
| else if (types & SD_LIST_RES_BACK) |
| { |
| *list_ptr = INSN_RESOLVED_BACK_DEPS (insn); |
| *resolved_p_ptr = true; |
| *types_ptr = types & ~SD_LIST_RES_BACK; |
| } |
| else if (types & SD_LIST_RES_FORW) |
| { |
| *list_ptr = INSN_RESOLVED_FORW_DEPS (insn); |
| *resolved_p_ptr = true; |
| *types_ptr = types & ~SD_LIST_RES_FORW; |
| } |
| else |
| { |
| *list_ptr = NULL; |
| *resolved_p_ptr = false; |
| *types_ptr = SD_LIST_NONE; |
| } |
| } |
| |
| /* Return the summary size of INSN's lists defined by LIST_TYPES. */ |
| int |
| sd_lists_size (const_rtx insn, sd_list_types_def list_types) |
| { |
| int size = 0; |
| |
| while (list_types != SD_LIST_NONE) |
| { |
| deps_list_t list; |
| bool resolved_p; |
| |
| sd_next_list (insn, &list_types, &list, &resolved_p); |
| if (list) |
| size += DEPS_LIST_N_LINKS (list); |
| } |
| |
| return size; |
| } |
| |
| /* Return true if INSN's lists defined by LIST_TYPES are all empty. */ |
| |
| bool |
| sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types) |
| { |
| while (list_types != SD_LIST_NONE) |
| { |
| deps_list_t list; |
| bool resolved_p; |
| |
| sd_next_list (insn, &list_types, &list, &resolved_p); |
| if (!deps_list_empty_p (list)) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Initialize data for INSN. */ |
| void |
| sd_init_insn (rtx_insn *insn) |
| { |
| INSN_HARD_BACK_DEPS (insn) = create_deps_list (); |
| INSN_SPEC_BACK_DEPS (insn) = create_deps_list (); |
| INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (); |
| INSN_FORW_DEPS (insn) = create_deps_list (); |
| INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list (); |
| |
| /* ??? It would be nice to allocate dependency caches here. */ |
| } |
| |
| /* Free data for INSN. */ |
| void |
| sd_finish_insn (rtx_insn *insn) |
| { |
| /* ??? It would be nice to deallocate dependency caches here. */ |
| |
| free_deps_list (INSN_HARD_BACK_DEPS (insn)); |
| INSN_HARD_BACK_DEPS (insn) = NULL; |
| |
| free_deps_list (INSN_SPEC_BACK_DEPS (insn)); |
| INSN_SPEC_BACK_DEPS (insn) = NULL; |
| |
| free_deps_list (INSN_RESOLVED_BACK_DEPS (insn)); |
| INSN_RESOLVED_BACK_DEPS (insn) = NULL; |
| |
| free_deps_list (INSN_FORW_DEPS (insn)); |
| INSN_FORW_DEPS (insn) = NULL; |
| |
| free_deps_list (INSN_RESOLVED_FORW_DEPS (insn)); |
| INSN_RESOLVED_FORW_DEPS (insn) = NULL; |
| } |
| |
| /* Find a dependency between producer PRO and consumer CON. |
| Search through resolved dependency lists if RESOLVED_P is true. |
| If no such dependency is found return NULL, |
| otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull] |
| with an iterator pointing to it. */ |
| static dep_t |
| sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p, |
| sd_iterator_def *sd_it_ptr) |
| { |
| sd_list_types_def pro_list_type; |
| sd_list_types_def con_list_type; |
| sd_iterator_def sd_it; |
| dep_t dep; |
| bool found_p = false; |
| |
| if (resolved_p) |
| { |
| pro_list_type = SD_LIST_RES_FORW; |
| con_list_type = SD_LIST_RES_BACK; |
| } |
| else |
| { |
| pro_list_type = SD_LIST_FORW; |
| con_list_type = SD_LIST_BACK; |
| } |
| |
| /* Walk through either back list of INSN or forw list of ELEM |
| depending on which one is shorter. */ |
| if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type)) |
| { |
| /* Find the dep_link with producer PRO in consumer's back_deps. */ |
| FOR_EACH_DEP (con, con_list_type, sd_it, dep) |
| if (DEP_PRO (dep) == pro) |
| { |
| found_p = true; |
| break; |
| } |
| } |
| else |
| { |
| /* Find the dep_link with consumer CON in producer's forw_deps. */ |
| FOR_EACH_DEP (pro, pro_list_type, sd_it, dep) |
| if (DEP_CON (dep) == con) |
| { |
| found_p = true; |
| break; |
| } |
| } |
| |
| if (found_p) |
| { |
| if (sd_it_ptr != NULL) |
| *sd_it_ptr = sd_it; |
| |
| return dep; |
| } |
| |
| return NULL; |
| } |
| |
| /* Find a dependency between producer PRO and consumer CON. |
| Use dependency [if available] to check if dependency is present at all. |
| Search through resolved dependency lists if RESOLVED_P is true. |
| If the dependency or NULL if none found. */ |
| dep_t |
| sd_find_dep_between (rtx pro, rtx con, bool resolved_p) |
| { |
| if (true_dependency_cache != NULL) |
| /* Avoiding the list walk below can cut compile times dramatically |
| for some code. */ |
| { |
| int elem_luid = INSN_LUID (pro); |
| int insn_luid = INSN_LUID (con); |
| |
| if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid) |
| && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid) |
| && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid) |
| && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
| return NULL; |
| } |
| |
| return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL); |
| } |
| |
| /* Add or update a dependence described by DEP. |
| MEM1 and MEM2, if non-null, correspond to memory locations in case of |
| data speculation. |
| |
| The function returns a value indicating if an old entry has been changed |
| or a new entry has been added to insn's backward deps. |
| |
| This function merely checks if producer and consumer is the same insn |
| and doesn't create a dep in this case. Actual manipulation of |
| dependence data structures is performed in add_or_update_dep_1. */ |
| static enum DEPS_ADJUST_RESULT |
| maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2) |
| { |
| rtx_insn *elem = DEP_PRO (dep); |
| rtx_insn *insn = DEP_CON (dep); |
| |
| gcc_assert (INSN_P (insn) && INSN_P (elem)); |
| |
| /* Don't depend an insn on itself. */ |
| if (insn == elem) |
| { |
| if (sched_deps_info->generate_spec_deps) |
| /* INSN has an internal dependence, which we can't overcome. */ |
| HAS_INTERNAL_DEP (insn) = 1; |
| |
| return DEP_NODEP; |
| } |
| |
| return add_or_update_dep_1 (dep, resolved_p, mem1, mem2); |
| } |
| |
| /* Ask dependency caches what needs to be done for dependence DEP. |
| Return DEP_CREATED if new dependence should be created and there is no |
| need to try to find one searching the dependencies lists. |
| Return DEP_PRESENT if there already is a dependence described by DEP and |
| hence nothing is to be done. |
| Return DEP_CHANGED if there already is a dependence, but it should be |
| updated to incorporate additional information from DEP. */ |
| static enum DEPS_ADJUST_RESULT |
| ask_dependency_caches (dep_t dep) |
| { |
| int elem_luid = INSN_LUID (DEP_PRO (dep)); |
| int insn_luid = INSN_LUID (DEP_CON (dep)); |
| |
| gcc_assert (true_dependency_cache != NULL |
| && output_dependency_cache != NULL |
| && anti_dependency_cache != NULL |
| && control_dependency_cache != NULL); |
| |
| if (!(current_sched_info->flags & USE_DEPS_LIST)) |
| { |
| enum reg_note present_dep_type; |
| |
| if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) |
| present_dep_type = REG_DEP_TRUE; |
| else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) |
| present_dep_type = REG_DEP_OUTPUT; |
| else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) |
| present_dep_type = REG_DEP_ANTI; |
| else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
| present_dep_type = REG_DEP_CONTROL; |
| else |
| /* There is no existing dep so it should be created. */ |
| return DEP_CREATED; |
| |
| if ((int) DEP_TYPE (dep) >= (int) present_dep_type) |
| /* DEP does not add anything to the existing dependence. */ |
| return DEP_PRESENT; |
| } |
| else |
| { |
| ds_t present_dep_types = 0; |
| |
| if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) |
| present_dep_types |= DEP_TRUE; |
| if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) |
| present_dep_types |= DEP_OUTPUT; |
| if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) |
| present_dep_types |= DEP_ANTI; |
| if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
| present_dep_types |= DEP_CONTROL; |
| |
| if (present_dep_types == 0) |
| /* There is no existing dep so it should be created. */ |
| return DEP_CREATED; |
| |
| if (!(current_sched_info->flags & DO_SPECULATION) |
| || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid)) |
| { |
| if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES)) |
| == present_dep_types) |
| /* DEP does not add anything to the existing dependence. */ |
| return DEP_PRESENT; |
| } |
| else |
| { |
| /* Only true dependencies can be data speculative and |
| only anti dependencies can be control speculative. */ |
| gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI)) |
| == present_dep_types); |
| |
| /* if (DEP is SPECULATIVE) then |
| ..we should update DEP_STATUS |
| else |
| ..we should reset existing dep to non-speculative. */ |
| } |
| } |
| |
| return DEP_CHANGED; |
| } |
| |
| /* Set dependency caches according to DEP. */ |
| static void |
| set_dependency_caches (dep_t dep) |
| { |
| int elem_luid = INSN_LUID (DEP_PRO (dep)); |
| int insn_luid = INSN_LUID (DEP_CON (dep)); |
| |
| if (!(current_sched_info->flags & USE_DEPS_LIST)) |
| { |
| switch (DEP_TYPE (dep)) |
| { |
| case REG_DEP_TRUE: |
| bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| case REG_DEP_OUTPUT: |
| bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| case REG_DEP_ANTI: |
| bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| case REG_DEP_CONTROL: |
| bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| else |
| { |
| ds_t ds = DEP_STATUS (dep); |
| |
| if (ds & DEP_TRUE) |
| bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid); |
| if (ds & DEP_OUTPUT) |
| bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid); |
| if (ds & DEP_ANTI) |
| bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); |
| if (ds & DEP_CONTROL) |
| bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); |
| |
| if (ds & SPECULATIVE) |
| { |
| gcc_assert (current_sched_info->flags & DO_SPECULATION); |
| bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid); |
| } |
| } |
| } |
| |
| /* Type of dependence DEP have changed from OLD_TYPE. Update dependency |
| caches accordingly. */ |
| static void |
| update_dependency_caches (dep_t dep, enum reg_note old_type) |
| { |
| int elem_luid = INSN_LUID (DEP_PRO (dep)); |
| int insn_luid = INSN_LUID (DEP_CON (dep)); |
| |
| /* Clear corresponding cache entry because type of the link |
| may have changed. Keep them if we use_deps_list. */ |
| if (!(current_sched_info->flags & USE_DEPS_LIST)) |
| { |
| switch (old_type) |
| { |
| case REG_DEP_OUTPUT: |
| bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| case REG_DEP_ANTI: |
| bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| case REG_DEP_CONTROL: |
| bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| set_dependency_caches (dep); |
| } |
| |
| /* Convert a dependence pointed to by SD_IT to be non-speculative. */ |
| static void |
| change_spec_dep_to_hard (sd_iterator_def sd_it) |
| { |
| dep_node_t node = DEP_LINK_NODE (*sd_it.linkp); |
| dep_link_t link = DEP_NODE_BACK (node); |
| dep_t dep = DEP_NODE_DEP (node); |
| rtx_insn *elem = DEP_PRO (dep); |
| rtx_insn *insn = DEP_CON (dep); |
| |
| move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn)); |
| |
| DEP_STATUS (dep) &= ~SPECULATIVE; |
| |
| if (true_dependency_cache != NULL) |
| /* Clear the cache entry. */ |
| bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)], |
| INSN_LUID (elem)); |
| } |
| |
| /* Update DEP to incorporate information from NEW_DEP. |
| SD_IT points to DEP in case it should be moved to another list. |
| MEM1 and MEM2, if nonnull, correspond to memory locations in case if |
| data-speculative dependence should be updated. */ |
| static enum DEPS_ADJUST_RESULT |
| update_dep (dep_t dep, dep_t new_dep, |
| sd_iterator_def sd_it ATTRIBUTE_UNUSED, |
| rtx mem1 ATTRIBUTE_UNUSED, |
| rtx mem2 ATTRIBUTE_UNUSED) |
| { |
| enum DEPS_ADJUST_RESULT res = DEP_PRESENT; |
| enum reg_note old_type = DEP_TYPE (dep); |
| bool was_spec = dep_spec_p (dep); |
| |
| DEP_NONREG (dep) |= DEP_NONREG (new_dep); |
| DEP_MULTIPLE (dep) = 1; |
| |
| /* If this is a more restrictive type of dependence than the |
| existing one, then change the existing dependence to this |
| type. */ |
| if ((int) DEP_TYPE (new_dep) < (int) old_type) |
| { |
| DEP_TYPE (dep) = DEP_TYPE (new_dep); |
| res = DEP_CHANGED; |
| } |
| |
| if (current_sched_info->flags & USE_DEPS_LIST) |
| /* Update DEP_STATUS. */ |
| { |
| ds_t dep_status = DEP_STATUS (dep); |
| ds_t ds = DEP_STATUS (new_dep); |
| ds_t new_status = ds | dep_status; |
| |
| if (new_status & SPECULATIVE) |
| { |
| /* Either existing dep or a dep we're adding or both are |
| speculative. */ |
| if (!(ds & SPECULATIVE) |
| || !(dep_status & SPECULATIVE)) |
| /* The new dep can't be speculative. */ |
| new_status &= ~SPECULATIVE; |
| else |
| { |
| /* Both are speculative. Merge probabilities. */ |
| if (mem1 != NULL) |
| { |
| dw_t dw; |
| |
| dw = estimate_dep_weak (mem1, mem2); |
| ds = set_dep_weak (ds, BEGIN_DATA, dw); |
| } |
| |
| new_status = ds_merge (dep_status, ds); |
| } |
| } |
| |
| ds = new_status; |
| |
| if (dep_status != ds) |
| { |
| DEP_STATUS (dep) = ds; |
| res = DEP_CHANGED; |
| } |
| } |
| |
| if (was_spec && !dep_spec_p (dep)) |
| /* The old dep was speculative, but now it isn't. */ |
| change_spec_dep_to_hard (sd_it); |
| |
| if (true_dependency_cache != NULL |
| && res == DEP_CHANGED) |
| update_dependency_caches (dep, old_type); |
| |
| return res; |
| } |
| |
| /* Add or update a dependence described by DEP. |
| MEM1 and MEM2, if non-null, correspond to memory locations in case of |
| data speculation. |
| |
| The function returns a value indicating if an old entry has been changed |
| or a new entry has been added to insn's backward deps or nothing has |
| been updated at all. */ |
| static enum DEPS_ADJUST_RESULT |
| add_or_update_dep_1 (dep_t new_dep, bool resolved_p, |
| rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED) |
| { |
| bool maybe_present_p = true; |
| bool present_p = false; |
| |
| gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep)) |
| && DEP_PRO (new_dep) != DEP_CON (new_dep)); |
| |
| if (flag_checking) |
| check_dep (new_dep, mem1 != NULL); |
| |
| if (true_dependency_cache != NULL) |
| { |
| switch (ask_dependency_caches (new_dep)) |
| { |
| case DEP_PRESENT: |
| dep_t present_dep; |
| sd_iterator_def sd_it; |
| |
| present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep), |
| DEP_CON (new_dep), |
| resolved_p, &sd_it); |
| DEP_MULTIPLE (present_dep) = 1; |
| return DEP_PRESENT; |
| |
| case DEP_CHANGED: |
| maybe_present_p = true; |
| present_p = true; |
| break; |
| |
| case DEP_CREATED: |
| maybe_present_p = false; |
| present_p = false; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| break; |
| } |
| } |
| |
| /* Check that we don't already have this dependence. */ |
| if (maybe_present_p) |
| { |
| dep_t present_dep; |
| sd_iterator_def sd_it; |
| |
| gcc_assert (true_dependency_cache == NULL || present_p); |
| |
| present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep), |
| DEP_CON (new_dep), |
| resolved_p, &sd_it); |
| |
| if (present_dep != NULL) |
| /* We found an existing dependency between ELEM and INSN. */ |
| return update_dep (present_dep, new_dep, sd_it, mem1, mem2); |
| else |
| /* We didn't find a dep, it shouldn't present in the cache. */ |
| gcc_assert (!present_p); |
| } |
| |
| /* Might want to check one level of transitivity to save conses. |
| This check should be done in maybe_add_or_update_dep_1. |
| Since we made it to add_or_update_dep_1, we must create |
| (or update) a link. */ |
| |
| if (mem1 != NULL_RTX) |
| { |
| gcc_assert (sched_deps_info->generate_spec_deps); |
| DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA, |
| estimate_dep_weak (mem1, mem2)); |
| } |
| |
| sd_add_dep (new_dep, resolved_p); |
| |
| return DEP_CREATED; |
| } |
| |
| /* Initialize BACK_LIST_PTR with consumer's backward list and |
| FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true |
| initialize with lists that hold resolved deps. */ |
| static void |
| get_back_and_forw_lists (dep_t dep, bool resolved_p, |
| deps_list_t *back_list_ptr, |
| deps_list_t *forw_list_ptr) |
| { |
| rtx_insn *con = DEP_CON (dep); |
| |
| if (!resolved_p) |
| { |
| if (dep_spec_p (dep)) |
| *back_list_ptr = INSN_SPEC_BACK_DEPS (con); |
| else |
| *back_list_ptr = INSN_HARD_BACK_DEPS (con); |
| |
| *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep)); |
| } |
| else |
| { |
| *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con); |
| *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep)); |
| } |
| } |
| |
| /* Add dependence described by DEP. |
| If RESOLVED_P is true treat the dependence as a resolved one. */ |
| void |
| sd_add_dep (dep_t dep, bool resolved_p) |
| { |
| dep_node_t n = create_dep_node (); |
| deps_list_t con_back_deps; |
| deps_list_t pro_forw_deps; |
| rtx_insn *elem = DEP_PRO (dep); |
| rtx_insn *insn = DEP_CON (dep); |
| |
| gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem); |
| |
| if ((current_sched_info->flags & DO_SPECULATION) == 0 |
| || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep))) |
| DEP_STATUS (dep) &= ~SPECULATIVE; |
| |
| copy_dep (DEP_NODE_DEP (n), dep); |
| |
| get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps); |
| |
| add_to_deps_list (DEP_NODE_BACK (n), con_back_deps); |
| |
| if (flag_checking) |
| check_dep (dep, false); |
| |
| add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps); |
| |
| /* If we are adding a dependency to INSN's LOG_LINKs, then note that |
| in the bitmap caches of dependency information. */ |
| if (true_dependency_cache != NULL) |
| set_dependency_caches (dep); |
| } |
| |
| /* Add or update backward dependence between INSN and ELEM |
| with given type DEP_TYPE and dep_status DS. |
| This function is a convenience wrapper. */ |
| enum DEPS_ADJUST_RESULT |
| sd_add_or_update_dep (dep_t dep, bool resolved_p) |
| { |
| return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX); |
| } |
| |
| /* Resolved dependence pointed to by SD_IT. |
| SD_IT will advance to the next element. */ |
| void |
| sd_resolve_dep (sd_iterator_def sd_it) |
| { |
| dep_node_t node = DEP_LINK_NODE (*sd_it.linkp); |
| dep_t dep = DEP_NODE_DEP (node); |
| rtx_insn *pro = DEP_PRO (dep); |
| rtx_insn *con = DEP_CON (dep); |
| |
| if (dep_spec_p (dep)) |
| move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con), |
| INSN_RESOLVED_BACK_DEPS (con)); |
| else |
| move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con), |
| INSN_RESOLVED_BACK_DEPS (con)); |
| |
| move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro), |
| INSN_RESOLVED_FORW_DEPS (pro)); |
| } |
| |
| /* Perform the inverse operation of sd_resolve_dep. Restore the dependence |
| pointed to by SD_IT to unresolved state. */ |
| void |
| sd_unresolve_dep (sd_iterator_def sd_it) |
| { |
| dep_node_t node = DEP_LINK_NODE (*sd_it.linkp); |
| dep_t dep = DEP_NODE_DEP (node); |
| rtx_insn *pro = DEP_PRO (dep); |
| rtx_insn *con = DEP_CON (dep); |
| |
| if (dep_spec_p (dep)) |
| move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con), |
| INSN_SPEC_BACK_DEPS (con)); |
| else |
| move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con), |
| INSN_HARD_BACK_DEPS (con)); |
| |
| move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro), |
| INSN_FORW_DEPS (pro)); |
| } |
| |
| /* Make TO depend on all the FROM's producers. |
| If RESOLVED_P is true add dependencies to the resolved lists. */ |
| void |
| sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p) |
| { |
| sd_list_types_def list_type; |
| sd_iterator_def sd_it; |
| dep_t dep; |
| |
| list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK; |
| |
| FOR_EACH_DEP (from, list_type, sd_it, dep) |
| { |
| dep_def _new_dep, *new_dep = &_new_dep; |
| |
| copy_dep (new_dep, dep); |
| DEP_CON (new_dep) = to; |
| sd_add_dep (new_dep, resolved_p); |
| } |
| } |
| |
| /* Remove a dependency referred to by SD_IT. |
| SD_IT will point to the next dependence after removal. */ |
| void |
| sd_delete_dep (sd_iterator_def sd_it) |
| { |
| dep_node_t n = DEP_LINK_NODE (*sd_it.linkp); |
| dep_t dep = DEP_NODE_DEP (n); |
| rtx_insn *pro = DEP_PRO (dep); |
| rtx_insn *con = DEP_CON (dep); |
| deps_list_t con_back_deps; |
| deps_list_t pro_forw_deps; |
| |
| if (true_dependency_cache != NULL) |
| { |
| int elem_luid = INSN_LUID (pro); |
| int insn_luid = INSN_LUID (con); |
| |
| bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid); |
| bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); |
| bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); |
| bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid); |
| |
| if (current_sched_info->flags & DO_SPECULATION) |
| bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid); |
| } |
| |
| get_back_and_forw_lists (dep, sd_it.resolved_p, |
| &con_back_deps, &pro_forw_deps); |
| |
| remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps); |
| remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps); |
| |
| delete_dep_node (n); |
| } |
| |
| /* Dump size of the lists. */ |
| #define DUMP_LISTS_SIZE (2) |
| |
| /* Dump dependencies of the lists. */ |
| #define DUMP_LISTS_DEPS (4) |
| |
| /* Dump all information about the lists. */ |
| #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS) |
| |
| /* Dump deps_lists of INSN specified by TYPES to DUMP. |
| FLAGS is a bit mask specifying what information about the lists needs |
| to be printed. |
| If FLAGS has the very first bit set, then dump all information about |
| the lists and propagate this bit into the callee dump functions. */ |
| static void |
| dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| int all; |
| |
| all = (flags & 1); |
| |
| if (all) |
| flags |= DUMP_LISTS_ALL; |
| |
| fprintf (dump, "["); |
| |
| if (flags & DUMP_LISTS_SIZE) |
| fprintf (dump, "%d; ", sd_lists_size (insn, types)); |
| |
| if (flags & DUMP_LISTS_DEPS) |
| { |
| FOR_EACH_DEP (insn, types, sd_it, dep) |
| { |
| dump_dep (dump, dep, dump_dep_flags | all); |
| fprintf (dump, " "); |
| } |
| } |
| } |
| |
| /* Dump all information about deps_lists of INSN specified by TYPES |
| to STDERR. */ |
| void |
| sd_debug_lists (rtx insn, sd_list_types_def types) |
| { |
| dump_lists (stderr, insn, types, 1); |
| fprintf (stderr, "\n"); |
| } |
| |
| /* A wrapper around add_dependence_1, to add a dependence of CON on |
| PRO, with type DEP_TYPE. This function implements special handling |
| for REG_DEP_CONTROL dependencies. For these, we optionally promote |
| the type to REG_DEP_ANTI if we can determine that predication is |
| impossible; otherwise we add additional true dependencies on the |
| INSN_COND_DEPS list of the jump (which PRO must be). */ |
| void |
| add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type) |
| { |
| if (dep_type == REG_DEP_CONTROL |
| && !(current_sched_info->flags & DO_PREDICATION)) |
| dep_type = REG_DEP_ANTI; |
| |
| /* A REG_DEP_CONTROL dependence may be eliminated through predication, |
| so we must also make the insn dependent on the setter of the |
| condition. */ |
| if (dep_type == REG_DEP_CONTROL) |
| { |
| rtx_insn *real_pro = pro; |
| rtx_insn *other = real_insn_for_shadow (real_pro); |
| rtx cond; |
| |
| if (other != NULL_RTX) |
| real_pro = other; |
| cond = sched_get_reverse_condition_uncached (real_pro); |
| /* Verify that the insn does not use a different value in |
| the condition register than the one that was present at |
| the jump. */ |
| if (cond == NULL_RTX) |
| dep_type = REG_DEP_ANTI; |
| else if (INSN_CACHED_COND (real_pro) == const_true_rtx) |
| { |
| HARD_REG_SET uses; |
| CLEAR_HARD_REG_SET (uses); |
| note_uses (&PATTERN (con), record_hard_reg_uses, &uses); |
| if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0)))) |
| dep_type = REG_DEP_ANTI; |
| } |
| if (dep_type == REG_DEP_CONTROL) |
| { |
| if (sched_verbose >= 5) |
| fprintf (sched_dump, "making DEP_CONTROL for %d\n", |
| INSN_UID (real_pro)); |
| add_dependence_list (con, INSN_COND_DEPS (real_pro), 0, |
| REG_DEP_TRUE, false); |
| } |
| } |
| |
| add_dependence_1 (con, pro, dep_type); |
| } |
| |
| /* A convenience wrapper to operate on an entire list. HARD should be |
| true if DEP_NONREG should be set on newly created dependencies. */ |
| |
| static void |
| add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond, |
| enum reg_note dep_type, bool hard) |
| { |
| mark_as_hard = hard; |
| for (; list; list = list->next ()) |
| { |
| if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ())) |
| add_dependence (insn, list->insn (), dep_type); |
| } |
| mark_as_hard = false; |
| } |
| |
| /* Similar, but free *LISTP at the same time, when the context |
| is not readonly. HARD should be true if DEP_NONREG should be set on |
| newly created dependencies. */ |
| |
| static void |
| add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn, |
| rtx_insn_list **listp, |
| int uncond, enum reg_note dep_type, bool hard) |
| { |
| add_dependence_list (insn, *listp, uncond, dep_type, hard); |
| |
| /* We don't want to short-circuit dependencies involving debug |
| insns, because they may cause actual dependencies to be |
| disregarded. */ |
| if (deps->readonly || DEBUG_INSN_P (insn)) |
| return; |
| |
| free_INSN_LIST_list (listp); |
| } |
| |
| /* Remove all occurrences of INSN from LIST. Return the number of |
| occurrences removed. */ |
| |
| static int |
| remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp) |
| { |
| int removed = 0; |
| |
| while (*listp) |
| { |
| if ((*listp)->insn () == insn) |
| { |
| remove_free_INSN_LIST_node (listp); |
| removed++; |
| continue; |
| } |
| |
| listp = (rtx_insn_list **)&XEXP (*listp, 1); |
| } |
| |
| return removed; |
| } |
| |
| /* Same as above, but process two lists at once. */ |
| static int |
| remove_from_both_dependence_lists (rtx_insn *insn, |
| rtx_insn_list **listp, |
| rtx_expr_list **exprp) |
| { |
| int removed = 0; |
| |
| while (*listp) |
| { |
| if (XEXP (*listp, 0) == insn) |
| { |
| remove_free_INSN_LIST_node (listp); |
| remove_free_EXPR_LIST_node (exprp); |
| removed++; |
| continue; |
| } |
| |
| listp = (rtx_insn_list **)&XEXP (*listp, 1); |
| exprp = (rtx_expr_list **)&XEXP (*exprp, 1); |
| } |
| |
| return removed; |
| } |
| |
| /* Clear all dependencies for an insn. */ |
| static void |
| delete_all_dependences (rtx_insn *insn) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| |
| /* The below cycle can be optimized to clear the caches and back_deps |
| in one call but that would provoke duplication of code from |
| delete_dep (). */ |
| |
| for (sd_it = sd_iterator_start (insn, SD_LIST_BACK); |
| sd_iterator_cond (&sd_it, &dep);) |
| sd_delete_dep (sd_it); |
| } |
| |
| /* All insns in a scheduling group except the first should only have |
| dependencies on the previous insn in the group. So we find the |
| first instruction in the scheduling group by walking the dependence |
| chains backwards. Then we add the dependencies for the group to |
| the previous nonnote insn. */ |
| |
| static void |
| chain_to_prev_insn (rtx_insn *insn) |
| { |
| sd_iterator_def sd_it; |
| dep_t dep; |
| rtx_insn *prev_nonnote; |
| |
| FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) |
| { |
| rtx_insn *i = insn; |
| rtx_insn *pro = DEP_PRO (dep); |
| |
| do |
| { |
| i = prev_nonnote_insn (i); |
| |
| if (pro == i) |
| goto next_link; |
| } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i)); |
| |
| if (! sched_insns_conditions_mutex_p (i, pro)) |
| add_dependence (i, pro, DEP_TYPE (dep)); |
| next_link:; |
| } |
| |
| delete_all_dependences (insn); |
| |
| prev_nonnote = prev_nonnote_nondebug_insn (insn); |
| if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) |
| && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) |
| add_dependence (insn, prev_nonnote, REG_DEP_ANTI); |
| } |
| |
| /* Process an insn's memory dependencies. There are four kinds of |
| dependencies: |
| |
| (0) read dependence: read follows read |
| (1) true dependence: read follows write |
| (2) output dependence: write follows write |
| (3) anti dependence: write follows read |
| |
| We are careful to build only dependencies which actually exist, and |
| use transitivity to avoid building too many links. */ |
| |
| /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST. |
| The MEM is a memory reference contained within INSN, which we are saving |
| so that we can do memory aliasing on it. */ |
| |
| static void |
| add_insn_mem_dependence (class deps_desc *deps, bool read_p, |
| rtx_insn *insn, rtx mem) |
| { |
| rtx_insn_list **insn_list; |
| rtx_insn_list *insn_node; |
| rtx_expr_list **mem_list; |
| rtx_expr_list *mem_node; |
| |
| gcc_assert (!deps->readonly); |
| if (read_p) |
| { |
| insn_list = &deps->pending_read_insns; |
| mem_list = &deps->pending_read_mems; |
| if (!DEBUG_INSN_P (insn)) |
| deps->pending_read_list_length++; |
| } |
| else |
| { |
| insn_list = &deps->pending_write_insns; |
| mem_list = &deps->pending_write_mems; |
| deps->pending_write_list_length++; |
| } |
| |
| insn_node = alloc_INSN_LIST (insn, *insn_list); |
| *insn_list = insn_node; |
| |
| if (sched_deps_info->use_cselib) |
| { |
| mem = shallow_copy_rtx (mem); |
| XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0), |
| GET_MODE (mem), insn); |
| } |
| mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list); |
| *mem_list = mem_node; |
| } |
| |
| /* Make a dependency between every memory reference on the pending lists |
| and INSN, thus flushing the pending lists. FOR_READ is true if emitting |
| dependencies for a read operation, similarly with FOR_WRITE. */ |
| |
| static void |
| flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read, |
| int for_write) |
| { |
| if (for_write) |
| { |
| add_dependence_list_and_free (deps, insn, &deps->pending_read_insns, |
| 1, REG_DEP_ANTI, true); |
| if (!deps->readonly) |
| { |
| free_EXPR_LIST_list (&deps->pending_read_mems); |
| deps->pending_read_list_length = 0; |
| } |
| } |
| |
| add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1, |
| for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT, |
| true); |
| |
| add_dependence_list_and_free (deps, insn, |
| &deps->last_pending_memory_flush, 1, |
| for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT, |
| true); |
| |
| add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1, |
| REG_DEP_ANTI, true); |
| |
| if (DEBUG_INSN_P (insn)) |
| { |
| if (for_write) |
| free_INSN_LIST_list (&deps->pending_read_insns); |
| free_INSN_LIST_list (&deps->pending_write_insns); |
| free_INSN_LIST_list (&deps->last_pending_memory_flush); |
| free_INSN_LIST_list (&deps->pending_jump_insns); |
| } |
| |
| if (!deps->readonly) |
| { |
| free_EXPR_LIST_list (&deps->pending_write_mems); |
| deps->pending_write_list_length = 0; |
| |
| deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX); |
| deps->pending_flush_length = 1; |
| } |
| mark_as_hard = false; |
| } |
| |
| /* Instruction which dependencies we are analyzing. */ |
| static rtx_insn *cur_insn = NULL; |
| |
| /* Implement hooks for haifa scheduler. */ |
| |
| static void |
| haifa_start_insn (rtx_insn *insn) |
| { |
| gcc_assert (insn && !cur_insn); |
| |
| cur_insn = insn; |
| } |
| |
| static void |
| haifa_finish_insn (void) |
| { |
| cur_insn = NULL; |
| } |
| |
| void |
| haifa_note_reg_set (int regno) |
| { |
| SET_REGNO_REG_SET (reg_pending_sets, regno); |
| } |
| |
| void |
| haifa_note_reg_clobber (int regno) |
| { |
| SET_REGNO_REG_SET (reg_pending_clobbers, regno); |
| } |
| |
| void |
| haifa_note_reg_use (int regno) |
| { |
| SET_REGNO_REG_SET (reg_pending_uses, regno); |
| } |
| |
| static void |
| haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds) |
| { |
| if (!(ds & SPECULATIVE)) |
| { |
| mem = NULL_RTX; |
| pending_mem = NULL_RTX; |
| } |
| else |
| gcc_assert (ds & BEGIN_DATA); |
| |
| { |
| dep_def _dep, *dep = &_dep; |
| |
| init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), |
| current_sched_info->flags & USE_DEPS_LIST ? ds : 0); |
| DEP_NONREG (dep) = 1; |
| maybe_add_or_update_dep_1 (dep, false, pending_mem, mem); |
| } |
| |
| } |
| |
| static void |
| haifa_note_dep (rtx_insn *elem, ds_t ds) |
| { |
| dep_def _dep; |
| dep_t dep = &_dep; |
| |
| init_dep (dep, elem, cur_insn, ds_to_dt (ds)); |
| if (mark_as_hard) |
| DEP_NONREG (dep) = 1; |
| maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX); |
| } |
| |
| static void |
| note_reg_use (int r) |
| { |
| if (sched_deps_info->note_reg_use) |
| sched_deps_info->note_reg_use (r); |
| } |
| |
| static void |
| note_reg_set (int r) |
| { |
| if (sched_deps_info->note_reg_set) |
| sched_deps_info->note_reg_set (r); |
| } |
| |
| static void |
| note_reg_clobber (int r) |
| { |
| if (sched_deps_info->note_reg_clobber) |
| sched_deps_info->note_reg_clobber (r); |
| } |
| |
| static void |
| note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds) |
| { |
| if (sched_deps_info->note_mem_dep) |
| sched_deps_info->note_mem_dep (m1, m2, e, ds); |
| } |
| |
| static void |
| note_dep (rtx_insn *e, ds_t ds) |
| { |
| if (sched_deps_info->note_dep) |
| sched_deps_info->note_dep (e, ds); |
| } |
| |
| /* Return corresponding to DS reg_note. */ |
| enum reg_note |
| ds_to_dt (ds_t ds) |
| { |
| if (ds & DEP_TRUE) |
| return REG_DEP_TRUE; |
| else if (ds & DEP_OUTPUT) |
| return REG_DEP_OUTPUT; |
| else if (ds & DEP_ANTI) |
| return REG_DEP_ANTI; |
| else |
| { |
| gcc_assert (ds & DEP_CONTROL); |
| return REG_DEP_CONTROL; |
| } |
| } |
| |
| |
| |
| /* Functions for computation of info needed for register pressure |
| sensitive insn scheduling. */ |
| |
| |
| /* Allocate and return reg_use_data structure for REGNO and INSN. */ |
| static struct reg_use_data * |
| create_insn_reg_use (int regno, rtx_insn *insn) |
| { |
| struct reg_use_data *use; |
| |
| use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data)); |
| use->regno = regno; |
| use->insn = insn; |
| use->next_insn_use = INSN_REG_USE_LIST (insn); |
| INSN_REG_USE_LIST (insn) = use; |
| return use; |
| } |
| |
| /* Allocate reg_set_data structure for REGNO and INSN. */ |
| static void |
| create_insn_reg_set (int regno, rtx insn) |
| { |
| struct reg_set_data *set; |
| |
| set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data)); |
| set->regno = regno; |
| set->insn = insn; |
| set->next_insn_set = INSN_REG_SET_LIST (insn); |
| INSN_REG_SET_LIST (insn) = set; |
| } |
| |
| /* Set up insn register uses for INSN and dependency context DEPS. */ |
| static void |
| setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn) |
| { |
| unsigned i; |
| reg_set_iterator rsi; |
| struct reg_use_data *use, *use2, *next; |
| struct deps_reg *reg_last; |
| |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) |
| { |
| if (i < FIRST_PSEUDO_REGISTER |
| && TEST_HARD_REG_BIT (ira_no_alloc_regs, i)) |
| continue; |
| |
| if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX |
| && ! REGNO_REG_SET_P (reg_pending_sets, i) |
| && ! REGNO_REG_SET_P (reg_pending_clobbers, i)) |
| /* Ignore use which is not dying. */ |
| continue; |
| |
| use = create_insn_reg_use (i, insn); |
| use->next_regno_use = use; |
| reg_last = &deps->reg_last[i]; |
| |
| /* Create the cycle list of uses. */ |
| for (rtx_insn_list *list = reg_last->uses; list; list = list->next ()) |
| { |
| use2 = create_insn_reg_use (i, list->insn ()); |
| next = use->next_regno_use; |
| use->next_regno_use = use2; |
| use2->next_regno_use = next; |
| } |
| } |
| } |
| |
| /* Register pressure info for the currently processed insn. */ |
| static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES]; |
| |
| /* Return TRUE if INSN has the use structure for REGNO. */ |
| static bool |
| insn_use_p (rtx insn, int regno) |
| { |
| struct reg_use_data *use; |
| |
| for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) |
| if (use->regno == regno) |
| return true; |
| return false; |
| } |
| |
| /* Update the register pressure info after birth of pseudo register REGNO |
| in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that |
| the register is in clobber or unused after the insn. */ |
| static void |
| mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p) |
| { |
| int incr, new_incr; |
| enum reg_class cl; |
| |
| gcc_assert (regno >= FIRST_PSEUDO_REGISTER); |
| cl = sched_regno_pressure_class[regno]; |
| if (cl != NO_REGS) |
| { |
| incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)]; |
| if (clobber_p) |
| { |
| new_incr = reg_pressure_info[cl].clobber_increase + incr; |
| reg_pressure_info[cl].clobber_increase = new_incr; |
| } |
| else if (unused_p) |
| { |
| new_incr = reg_pressure_info[cl].unused_set_increase + incr; |
| reg_pressure_info[cl].unused_set_increase = new_incr; |
| } |
| else |
| { |
| new_incr = reg_pressure_info[cl].set_increase + incr; |
| reg_pressure_info[cl].set_increase = new_incr; |
| if (! insn_use_p (insn, regno)) |
| reg_pressure_info[cl].change += incr; |
| create_insn_reg_set (regno, insn); |
| } |
| gcc_assert (new_incr < (1 << INCREASE_BITS)); |
| } |
| } |
| |
| /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many |
| hard registers involved in the birth. */ |
| static void |
| mark_insn_hard_regno_birth (rtx insn, int regno, int nregs, |
| bool clobber_p, bool unused_p) |
| { |
| enum reg_class cl; |
| int new_incr, last = regno + nregs; |
| |
| while (regno < last) |
| { |
| gcc_assert (regno < FIRST_PSEUDO_REGISTER); |
| if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) |
| { |
| cl = sched_regno_pressure_class[regno]; |
| if (cl != NO_REGS) |
| { |
| if (clobber_p) |
| { |
| new_incr = reg_pressure_info[cl].clobber_increase + 1; |
| reg_pressure_info[cl].clobber_increase = new_incr; |
| } |
| else if (unused_p) |
| { |
| new_incr = reg_pressure_info[cl].unused_set_increase + 1; |
| reg_pressure_info[cl].unused_set_increase = new_incr; |
| } |
| else |
| { |
| new_incr = reg_pressure_info[cl].set_increase + 1; |
| reg_pressure_info[cl].set_increase = new_incr; |
| if (! insn_use_p (insn, regno)) |
| reg_pressure_info[cl].change += 1; |
| create_insn_reg_set (regno, insn); |
| } |
| gcc_assert (new_incr < (1 << INCREASE_BITS)); |
| } |
| } |
| regno++; |
| } |
| } |
| |
| /* Update the register pressure info after birth of pseudo or hard |
| register REG in INSN. Arguments CLOBBER_P and UNUSED_P say |
| correspondingly that the register is in clobber or unused after the |
| insn. */ |
| static void |
| mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p) |
| { |
| int regno; |
| |
| if (GET_CODE (reg) == SUBREG) |
| reg = SUBREG_REG (reg); |
| |
| if (! REG_P (reg)) |
| return; |
| |
| regno = REGNO (reg); |
| if (regno < FIRST_PSEUDO_REGISTER) |
| mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg), |
| clobber_p, unused_p); |
| else |
| mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p); |
| } |
| |
| /* Update the register pressure info after death of pseudo register |
| REGNO. */ |
| static void |
| mark_pseudo_death (int regno) |
| { |
| int incr; |
| enum reg_class cl; |
| |
| gcc_assert (regno >= FIRST_PSEUDO_REGISTER); |
| cl = sched_regno_pressure_class[regno]; |
| if (cl != NO_REGS) |
| { |
| incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)]; |
| reg_pressure_info[cl].change -= incr; |
| } |
| } |
| |
| /* Like mark_pseudo_death except that NREGS saying how many hard |
| registers involved in the death. */ |
| static void |
| mark_hard_regno_death (int regno, int nregs) |
| { |
| enum reg_class cl; |
| int last = regno + nregs; |
| |
| while (regno < last) |
| { |
| gcc_assert (regno < FIRST_PSEUDO_REGISTER); |
| if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) |
| { |
| cl = sched_regno_pressure_class[regno]; |
| if (cl != NO_REGS) |
| reg_pressure_info[cl].change -= 1; |
| } |
| regno++; |
| } |
| } |
| |
| /* Update the register pressure info after death of pseudo or hard |
| register REG. */ |
| static void |
| mark_reg_death (rtx reg) |
| { |
| int regno; |
| |
| if (GET_CODE (reg) == SUBREG) |
| reg = SUBREG_REG (reg); |
| |
| if (! REG_P (reg)) |
| return; |
| |
| regno = REGNO (reg); |
| if (regno < FIRST_PSEUDO_REGISTER) |
| mark_hard_regno_death (regno, REG_NREGS (reg)); |
| else |
| mark_pseudo_death (regno); |
| } |
| |
| /* Process SETTER of REG. DATA is an insn containing the setter. */ |
| static void |
| mark_insn_reg_store (rtx reg, const_rtx setter, void *data) |
| { |
| if (setter != NULL_RTX && GET_CODE (setter) != SET) |
| return; |
| mark_insn_reg_birth |
| ((rtx) data, reg, false, |
| find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX); |
| } |
| |
| /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */ |
| static void |
| mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data) |
| { |
| if (GET_CODE (setter) == CLOBBER) |
| mark_insn_reg_birth ((rtx) data, reg, true, false); |
| } |
| |
| /* Set up reg pressure info related to INSN. */ |
| void |
| init_insn_reg_pressure_info (rtx_insn *insn) |
| { |
| int i, len; |
| enum reg_class cl; |
| static struct reg_pressure_data *pressure_info; |
| rtx link; |
| |
| gcc_assert (sched_pressure != SCHED_PRESSURE_NONE); |
| |
| if (! INSN_P (insn)) |
| return; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| cl = ira_pressure_classes[i]; |
| reg_pressure_info[cl].clobber_increase = 0; |
| reg_pressure_info[cl].set_increase = 0; |
| reg_pressure_info[cl].unused_set_increase = 0; |
| reg_pressure_info[cl].change = 0; |
| } |
| |
| note_stores (insn, mark_insn_reg_clobber, insn); |
| |
| note_stores (insn, mark_insn_reg_store, insn); |
| |
| if (AUTO_INC_DEC) |
| for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
| if (REG_NOTE_KIND (link) == REG_INC) |
| mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn); |
| |
| for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
| if (REG_NOTE_KIND (link) == REG_DEAD) |
| mark_reg_death (XEXP (link, 0)); |
| |
| len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num; |
| pressure_info |
| = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len); |
| if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
| INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num |
| * sizeof (int), 1); |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| cl = ira_pressure_classes[i]; |
| pressure_info[i].clobber_increase |
| = reg_pressure_info[cl].clobber_increase; |
| pressure_info[i].set_increase = reg_pressure_info[cl].set_increase; |
| pressure_info[i].unused_set_increase |
| = reg_pressure_info[cl].unused_set_increase; |
| pressure_info[i].change = reg_pressure_info[cl].change; |
| } |
| } |
| |
| |
| |
| |
| /* Internal variable for sched_analyze_[12] () functions. |
| If it is nonzero, this means that sched_analyze_[12] looks |
| at the most toplevel SET. */ |
| static bool can_start_lhs_rhs_p; |
| |
| /* Extend reg info for the deps context DEPS given that |
| we have just generated a register numbered REGNO. */ |
| static void |
| extend_deps_reg_info (class deps_desc *deps, int regno) |
| { |
| int max_regno = regno + 1; |
| |
| gcc_assert (!reload_completed); |
| |
| /* In a readonly context, it would not hurt to extend info, |
| but it should not be needed. */ |
| if (reload_completed && deps->readonly) |
| { |
| deps->max_reg = max_regno; |
| return; |
| } |
| |
| if (max_regno > deps->max_reg) |
| { |
| deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last, |
| max_regno); |
| memset (&deps->reg_last[deps->max_reg], |
| 0, (max_regno - deps->max_reg) |
| * sizeof (struct deps_reg)); |
| deps->max_reg = max_regno; |
| } |
| } |
| |
| /* Extends REG_INFO_P if needed. */ |
| void |
| maybe_extend_reg_info_p (void) |
| { |
| /* Extend REG_INFO_P, if needed. */ |
| if ((unsigned int)max_regno - 1 >= reg_info_p_size) |
| { |
| size_t new_reg_info_p_size = max_regno + 128; |
| |
| gcc_assert (!reload_completed && sel_sched_p ()); |
| |
| reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p, |
| new_reg_info_p_size, |
| reg_info_p_size, |
| sizeof (*reg_info_p)); |
| reg_info_p_size = new_reg_info_p_size; |
| } |
| } |
| |
| /* Analyze a single reference to register (reg:MODE REGNO) in INSN. |
| The type of the reference is specified by REF and can be SET, |
| CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */ |
| |
| static void |
| sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode, |
| enum rtx_code ref, rtx_insn *insn) |
| { |
| /* We could emit new pseudos in renaming. Extend the reg structures. */ |
| if (!reload_completed && sel_sched_p () |
| && (regno >= max_reg_num () - 1 || regno >= deps->max_reg)) |
| extend_deps_reg_info (deps, regno); |
| |
| maybe_extend_reg_info_p (); |
| |
| /* A hard reg in a wide mode may really be multiple registers. |
| If so, mark all of them just like the first. */ |
| if (regno < FIRST_PSEUDO_REGISTER) |
| { |
| int i = hard_regno_nregs (regno, mode); |
| if (ref == SET) |
| { |
| while (--i >= 0) |
| note_reg_set (regno + i); |
| } |
| else if (ref == USE) |
| { |
| while (--i >= 0) |
| note_reg_use (regno + i); |
| } |
| else |
| { |
| while (--i >= 0) |
| note_reg_clobber (regno + i); |
| } |
| } |
| |
| /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that |
| it does not reload. Ignore these as they have served their |
| purpose already. */ |
| else if (regno >= deps->max_reg) |
| { |
| enum rtx_code code = GET_CODE (PATTERN (insn)); |
| gcc_assert (code == USE || code == CLOBBER); |
| } |
| |
| else |
| { |
| if (ref == SET) |
| note_reg_set (regno); |
| else if (ref == USE) |
| note_reg_use (regno); |
| else |
| note_reg_clobber (regno); |
| |
| /* Pseudos that are REG_EQUIV to something may be replaced |
| by that during reloading. We need only add dependencies for |
| the address in the REG_EQUIV note. */ |
| if (!reload_completed && get_reg_known_equiv_p (regno)) |
| { |
| rtx t = get_reg_known_value (regno); |
| if (MEM_P (t)) |
| sched_analyze_2 (deps, XEXP (t, 0), insn); |
| } |
| |
| /* Don't let it cross a call after scheduling if it doesn't |
| already cross one. */ |
| if (REG_N_CALLS_CROSSED (regno) == 0) |
| { |
| if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn)) |
| deps->sched_before_next_call |
| = alloc_INSN_LIST (insn, deps->sched_before_next_call); |
| else |
| add_dependence_list (insn, deps->last_function_call, 1, |
| REG_DEP_ANTI, false); |
| } |
| } |
| } |
| |
| /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC |
| rtx, X, creating all dependencies generated by the write to the |
| destination of X, and reads of everything mentioned. */ |
| |
| static void |
| sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn) |
| { |
| rtx dest = XEXP (x, 0); |
| enum rtx_code code = GET_CODE (x); |
| bool cslr_p = can_start_lhs_rhs_p; |
| |
| can_start_lhs_rhs_p = false; |
| |
| gcc_assert (dest); |
| if (dest == 0) |
| return; |
| |
| if (cslr_p && sched_deps_info->start_lhs) |
| sched_deps_info->start_lhs (dest); |
| |
| if (GET_CODE (dest) == PARALLEL) |
| { |
| int i; |
| |
| for (i = XVECLEN (dest, 0) - 1; i >= 0; i--) |
| if (XEXP (XVECEXP (dest, 0, i), 0) != 0) |
| sched_analyze_1 (deps, |
| gen_rtx_CLOBBER (VOIDmode, |
| XEXP (XVECEXP (dest, 0, i), 0)), |
| insn); |
| |
| if (cslr_p && sched_deps_info->finish_lhs) |
| sched_deps_info->finish_lhs (); |
| |
| if (code == SET) |
| { |
| can_start_lhs_rhs_p = cslr_p; |
| |
| sched_analyze_2 (deps, SET_SRC (x), insn); |
| |
| can_start_lhs_rhs_p = false; |
| } |
| |
| return; |
| } |
| |
| while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG |
| || GET_CODE (dest) == ZERO_EXTRACT) |
| { |
| if (GET_CODE (dest) == STRICT_LOW_PART |
| || GET_CODE (dest) == ZERO_EXTRACT |
| || read_modify_subreg_p (dest)) |
| { |
| /* These both read and modify the result. We must handle |
| them as writes to get proper dependencies for following |
| instructions. We must handle them as reads to get proper |
| dependencies from this to previous instructions. |
| Thus we need to call sched_analyze_2. */ |
| |
| sched_analyze_2 (deps, XEXP (dest, 0), insn); |
| } |
| if (GET_CODE (dest) == ZERO_EXTRACT) |
| { |
| /* The second and third arguments are values read by this insn. */ |
| sched_analyze_2 (deps, XEXP (dest, 1), insn); |
| sched_analyze_2 (deps, XEXP (dest, 2), insn); |
| } |
| dest = XEXP (dest, 0); |
| } |
| |
| if (REG_P (dest)) |
| { |
| int regno = REGNO (dest); |
| machine_mode mode = GET_MODE (dest); |
| |
| sched_analyze_reg (deps, regno, mode, code, insn); |
| |
| #ifdef STACK_REGS |
| /* Treat all writes to a stack register as modifying the TOS. */ |
| if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG) |
| { |
| /* Avoid analyzing the same register twice. */ |
| if (regno != FIRST_STACK_REG) |
| sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn); |
| |
| add_to_hard_reg_set (&implicit_reg_pending_uses, mode, |
| FIRST_STACK_REG); |
| } |
| #endif |
| } |
| else if (MEM_P (dest)) |
| { |
| /* Writing memory. */ |
| rtx t = dest; |
| |
| if (sched_deps_info->use_cselib) |
| { |
| machine_mode address_mode = get_address_mode (dest); |
| |
| t = shallow_copy_rtx (dest); |
| cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, |
| GET_MODE (t), insn); |
| XEXP (t, 0) |
| = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t), |
| insn); |
| } |
| t = canon_rtx (t); |
| |
| /* Pending lists can't get larger with a readonly context. */ |
| if (!deps->readonly |
| && ((deps->pending_read_list_length + deps->pending_write_list_length) |
| >= param_max_pending_list_length)) |
| { |
| /* Flush all pending reads and writes to prevent the pending lists |
| from getting any larger. Insn scheduling runs too slowly when |
| these lists get long. When compiling GCC with itself, |
| this flush occurs 8 times for sparc, and 10 times for m88k using |
| the default value of 32. */ |
| flush_pending_lists (deps, insn, false, true); |
| } |
| else |
| { |
| rtx_insn_list *pending; |
| rtx_expr_list *pending_mem; |
| |
| pending = deps->pending_read_insns; |
| pending_mem = deps->pending_read_mems; |
| while (pending) |
| { |
| if (anti_dependence (pending_mem->element (), t) |
| && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
| note_mem_dep (t, pending_mem->element (), pending->insn (), |
| DEP_ANTI); |
| |
| pending = pending->next (); |
| pending_mem = pending_mem->next (); |
| } |
| |
| pending = deps->pending_write_insns; |
| pending_mem = deps->pending_write_mems; |
| while (pending) |
| { |
| if (output_dependence (pending_mem->element (), t) |
| && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
| note_mem_dep (t, pending_mem->element (), |
| pending->insn (), |
| DEP_OUTPUT); |
| |
| pending = pending->next (); |
| pending_mem = pending_mem-> next (); |
| } |
| |
| add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
| REG_DEP_ANTI, true); |
| add_dependence_list (insn, deps->pending_jump_insns, 1, |
| REG_DEP_CONTROL, true); |
| |
| if (!deps->readonly) |
| add_insn_mem_dependence (deps, false, insn, dest); |
| } |
| sched_analyze_2 (deps, XEXP (dest, 0), insn); |
| } |
| |
| if (cslr_p && sched_deps_info->finish_lhs) |
| sched_deps_info->finish_lhs (); |
| |
| /* Analyze reads. */ |
| if (GET_CODE (x) == SET) |
| { |
| can_start_lhs_rhs_p = cslr_p; |
| |
| sched_analyze_2 (deps, SET_SRC (x), insn); |
| |
| can_start_lhs_rhs_p = false; |
| } |
| } |
| |
| /* Analyze the uses of memory and registers in rtx X in INSN. */ |
| static void |
| sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn) |
| { |
| int i; |
| int j; |
| enum rtx_code code; |
| const char *fmt; |
| bool cslr_p = can_start_lhs_rhs_p; |
| |
| can_start_lhs_rhs_p = false; |
| |
| gcc_assert (x); |
| if (x == 0) |
| return; |
| |
| if (cslr_p && sched_deps_info->start_rhs) |
| sched_deps_info->start_rhs (x); |
| |
| code = GET_CODE (x); |
| |
| switch (code) |
| { |
| CASE_CONST_ANY: |
| case SYMBOL_REF: |
| case CONST: |
| case LABEL_REF: |
| /* Ignore constants. */ |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| |
| case REG: |
| { |
| int regno = REGNO (x); |
| machine_mode mode = GET_MODE (x); |
| |
| sched_analyze_reg (deps, regno, mode, USE, insn); |
| |
| #ifdef STACK_REGS |
| /* Treat all reads of a stack register as modifying the TOS. */ |
| if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG) |
| { |
| /* Avoid analyzing the same register twice. */ |
| if (regno != FIRST_STACK_REG) |
| sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn); |
| sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn); |
| } |
| #endif |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| } |
| |
| case MEM: |
| { |
| /* Reading memory. */ |
| rtx_insn_list *u; |
| rtx_insn_list *pending; |
| rtx_expr_list *pending_mem; |
| rtx t = x; |
| |
| if (sched_deps_info->use_cselib) |
| { |
| machine_mode address_mode = get_address_mode (t); |
| |
| t = shallow_copy_rtx (t); |
| cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, |
| GET_MODE (t), insn); |
| XEXP (t, 0) |
| = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t), |
| insn); |
| } |
| |
| if (!DEBUG_INSN_P (insn)) |
| { |
| t = canon_rtx (t); |
| pending = deps->pending_read_insns; |
| pending_mem = deps->pending_read_mems; |
| while (pending) |
| { |
| if (read_dependence (pending_mem->element (), t) |
| && ! sched_insns_conditions_mutex_p (insn, |
| pending->insn ())) |
| note_mem_dep (t, pending_mem->element (), |
| pending->insn (), |
| DEP_ANTI); |
| |
| pending = pending->next (); |
| pending_mem = pending_mem->next (); |
| } |
| |
| pending = deps->pending_write_insns; |
| pending_mem = deps->pending_write_mems; |
| while (pending) |
| { |
| if (true_dependence (pending_mem->element (), VOIDmode, t) |
| && ! sched_insns_conditions_mutex_p (insn, |
| pending->insn ())) |
| note_mem_dep (t, pending_mem->element (), |
| pending->insn (), |
| sched_deps_info->generate_spec_deps |
| ? BEGIN_DATA | DEP_TRUE : DEP_TRUE); |
| |
| pending = pending->next (); |
| pending_mem = pending_mem->next (); |
| } |
| |
| for (u = deps->last_pending_memory_flush; u; u = u->next ()) |
| add_dependence (insn, u->insn (), REG_DEP_ANTI); |
| |
| for (u = deps->pending_jump_insns; u; u = u->next ()) |
| if (deps_may_trap_p (x)) |
| { |
| if ((sched_deps_info->generate_spec_deps) |
| && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) |
| { |
| ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, |
| MAX_DEP_WEAK); |
| |
| note_dep (u->insn (), ds); |
| } |
| else |
| add_dependence (insn, u->insn (), REG_DEP_CONTROL); |
| } |
| } |
| |
| /* Always add these dependencies to pending_reads, since |
| this insn may be followed by a write. */ |
| if (!deps->readonly) |
| { |
| if ((deps->pending_read_list_length |
| + deps->pending_write_list_length) |
| >= param_max_pending_list_length |
| && !DEBUG_INSN_P (insn)) |
| flush_pending_lists (deps, insn, true, true); |
| add_insn_mem_dependence (deps, true, insn, x); |
| } |
| |
| sched_analyze_2 (deps, XEXP (x, 0), insn); |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| } |
| |
| /* Force pending stores to memory in case a trap handler needs them. |
| Also force pending loads from memory; loads and stores can segfault |
| and the signal handler won't be triggered if the trap insn was moved |
| above load or store insn. */ |
| case TRAP_IF: |
| flush_pending_lists (deps, insn, true, true); |
| break; |
| |
| case PREFETCH: |
| if (PREFETCH_SCHEDULE_BARRIER_P (x)) |
| reg_pending_barrier = TRUE_BARRIER; |
| /* Prefetch insn contains addresses only. So if the prefetch |
| address has no registers, there will be no dependencies on |
| the prefetch insn. This is wrong with result code |
| correctness point of view as such prefetch can be moved below |
| a jump insn which usually generates MOVE_BARRIER preventing |
| to move insns containing registers or memories through the |
| barrier. It is also wrong with generated code performance |
| point of view as prefetch withouth dependecies will have a |
| tendency to be issued later instead of earlier. It is hard |
| to generate accurate dependencies for prefetch insns as |
| prefetch has only the start address but it is better to have |
| something than nothing. */ |
| if (!deps->readonly) |
| { |
| rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0)); |
| if (sched_deps_info->use_cselib) |
| cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn); |
| add_insn_mem_dependence (deps, true, insn, x); |
| } |
| break; |
| |
| case UNSPEC_VOLATILE: |
| flush_pending_lists (deps, insn, true, true); |
| /* FALLTHRU */ |
| |
| case ASM_OPERANDS: |
| case ASM_INPUT: |
| { |
| /* Traditional and volatile asm instructions must be considered to use |
| and clobber all hard registers, all pseudo-registers and all of |
| memory. So must TRAP_IF and UNSPEC_VOLATILE operations. |
| |
| Consider for instance a volatile asm that changes the fpu rounding |
| mode. An insn should not be moved across this even if it only uses |
| pseudo-regs because it might give an incorrectly rounded result. */ |
| if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x)) |
| && !DEBUG_INSN_P (insn)) |
| reg_pending_barrier = TRUE_BARRIER; |
| |
| /* For all ASM_OPERANDS, we must traverse the vector of input operands. |
| We cannot just fall through here since then we would be confused |
| by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate |
| traditional asms unlike their normal usage. */ |
| |
| if (code == ASM_OPERANDS) |
| { |
| for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) |
| sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn); |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| } |
| break; |
| } |
| |
| case PRE_DEC: |
| case POST_DEC: |
| case PRE_INC: |
| case POST_INC: |
| /* These both read and modify the result. We must handle them as writes |
| to get proper dependencies for following instructions. We must handle |
| them as reads to get proper dependencies from this to previous |
| instructions. Thus we need to pass them to both sched_analyze_1 |
| and sched_analyze_2. We must call sched_analyze_2 first in order |
| to get the proper antecedent for the read. */ |
| sched_analyze_2 (deps, XEXP (x, 0), insn); |
| sched_analyze_1 (deps, x, insn); |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| |
| case POST_MODIFY: |
| case PRE_MODIFY: |
| /* op0 = op0 + op1 */ |
| sched_analyze_2 (deps, XEXP (x, 0), insn); |
| sched_analyze_2 (deps, XEXP (x, 1), insn); |
| sched_analyze_1 (deps, x, insn); |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| |
| return; |
| |
| default: |
| break; |
| } |
| |
| /* Other cases: walk the insn. */ |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| sched_analyze_2 (deps, XEXP (x, i), insn); |
| else if (fmt[i] == 'E') |
| for (j = 0; j < XVECLEN (x, i); j++) |
| sched_analyze_2 (deps, XVECEXP (x, i, j), insn); |
| } |
| |
| if (cslr_p && sched_deps_info->finish_rhs) |
| sched_deps_info->finish_rhs (); |
| } |
| |
| /* Try to group two fusible insns together to prevent scheduler |
| from scheduling them apart. */ |
| |
| static void |
| sched_macro_fuse_insns (rtx_insn *insn) |
| { |
| rtx_insn *prev; |
| /* No target hook would return true for debug insn as any of the |
| hook operand, and with very large sequences of only debug insns |
| where on each we call sched_macro_fuse_insns it has quadratic |
| compile time complexity. */ |
| if (DEBUG_INSN_P (insn)) |
| return; |
| prev = prev_nonnote_nondebug_insn (insn); |
| if (!prev) |
| return; |
| |
| if (any_condjump_p (insn)) |
| { |
| unsigned int condreg1, condreg2; |
| rtx cc_reg_1; |
| if (targetm.fixed_condition_code_regs (&condreg1, &condreg2)) |
| { |
| cc_reg_1 = gen_rtx_REG (CCmode, condreg1); |
| if (reg_referenced_p (cc_reg_1, PATTERN (insn)) |
| && modified_in_p (cc_reg_1, prev)) |
| { |
| if (targetm.sched.macro_fusion_pair_p (prev, insn)) |
| SCHED_GROUP_P (insn) = 1; |
| return; |
| } |
| } |
| } |
| |
| if (single_set (insn) && single_set (prev)) |
| { |
| if (targetm.sched.macro_fusion_pair_p (prev, insn)) |
| SCHED_GROUP_P (insn) = 1; |
| } |
| } |
| |
| /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */ |
| void |
| get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn) |
| { |
| extract_insn (insn); |
| preprocess_constraints (insn); |
| alternative_mask preferred = get_preferred_alternatives (insn); |
| ira_implicitly_set_insn_hard_regs (temp, preferred); |
| *temp &= ~ira_no_alloc_regs; |
| } |
| |
| /* Analyze an INSN with pattern X to find all dependencies. */ |
| static void |
| sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn) |
| { |
| RTX_CODE code = GET_CODE (x); |
| rtx link; |
| unsigned i; |
| reg_set_iterator rsi; |
| |
| if (! reload_completed) |
| { |
| HARD_REG_SET temp; |
| get_implicit_reg_pending_clobbers (&temp, insn); |
| implicit_reg_pending_clobbers |= temp; |
| } |
| |
| can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn) |
| && code == SET); |
| |
| /* Group compare and branch insns for macro-fusion. */ |
| if (!deps->readonly |
| && targetm.sched.macro_fusion_p |
| && targetm.sched.macro_fusion_p ()) |
| sched_macro_fuse_insns (insn); |
| |
| if (may_trap_p (x)) |
| /* Avoid moving trapping instructions across function calls that might |
| not always return. */ |
| add_dependence_list (insn, deps->last_function_call_may_noreturn, |
| 1, REG_DEP_ANTI, true); |
| |
| /* We must avoid creating a situation in which two successors of the |
| current block have different unwind info after scheduling. If at any |
| point the two paths re-join this leads to incorrect unwind info. */ |
| /* ??? There are certain situations involving a forced frame pointer in |
| which, with extra effort, we could fix up the unwind info at a later |
| CFG join. However, it seems better to notice these cases earlier |
| during prologue generation and avoid marking the frame pointer setup |
| as frame-related at all. */ |
| if (RTX_FRAME_RELATED_P (insn)) |
| { |
| /* Make sure prologue insn is scheduled before next jump. */ |
| deps->sched_before_next_jump |
| = alloc_INSN_LIST (insn, deps->sched_before_next_jump); |
| |
| /* Make sure epilogue insn is scheduled after preceding jumps. */ |
| add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
| REG_DEP_ANTI, true); |
| add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI, |
| true); |
| } |
| |
| if (code == COND_EXEC) |
| { |
| sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); |
| |
| /* ??? Should be recording conditions so we reduce the number of |
| false dependencies. */ |
| x = COND_EXEC_CODE (x); |
| code = GET_CODE (x); |
| } |
| if (code == SET || code == CLOBBER) |
| { |
| sched_analyze_1 (deps, x, insn); |
| |
| /* Bare clobber insns are used for letting life analysis, reg-stack |
| and others know that a value is dead. Depend on the last call |
| instruction so that reg-stack won't get confused. */ |
| if (code == CLOBBER) |
| add_dependence_list (insn, deps->last_function_call, 1, |
| REG_DEP_OUTPUT, true); |
| } |
| else if (code == PARALLEL) |
| { |
| for (i = XVECLEN (x, 0); i--;) |
| { |
| rtx sub = XVECEXP (x, 0, i); |
| code = GET_CODE (sub); |
| |
| if (code == COND_EXEC) |
| { |
| sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn); |
| sub = COND_EXEC_CODE (sub); |
| code = GET_CODE (sub); |
| } |
| else if (code == SET || code == CLOBBER) |
| sched_analyze_1 (deps, sub, insn); |
| else |
| sched_analyze_2 (deps, sub, insn); |
| } |
| } |
| else |
| sched_analyze_2 (deps, x, insn); |
| |
| /* Mark registers CLOBBERED or used by called function. */ |
| if (CALL_P (insn)) |
| { |
| for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) |
| { |
| if (GET_CODE (XEXP (link, 0)) == CLOBBER) |
| sched_analyze_1 (deps, XEXP (link, 0), insn); |
| else if (GET_CODE (XEXP (link, 0)) != SET) |
| sched_analyze_2 (deps, XEXP (link, 0), insn); |
| } |
| /* Don't schedule anything after a tail call, tail call needs |
| to use at least all call-saved registers. */ |
| if (SIBLING_CALL_P (insn)) |
| reg_pending_barrier = TRUE_BARRIER; |
| else if (find_reg_note (insn, REG_SETJMP, NULL)) |
| reg_pending_barrier = MOVE_BARRIER; |
| } |
| |
| if (JUMP_P (insn)) |
| { |
| rtx_insn *next = next_nonnote_nondebug_insn (insn); |
| /* ??? For tablejumps, the barrier may appear not immediately after |
| the jump, but after a label and a jump_table_data insn. */ |
| if (next && LABEL_P (next) && NEXT_INSN (next) |
| && JUMP_TABLE_DATA_P (NEXT_INSN (next))) |
| next = NEXT_INSN (NEXT_INSN (next)); |
| if (next && BARRIER_P (next)) |
| reg_pending_barrier = MOVE_BARRIER; |
| else |
| { |
| rtx_insn_list *pending; |
| rtx_expr_list *pending_mem; |
| |
| if (sched_deps_info->compute_jump_reg_dependencies) |
| { |
| (*sched_deps_info->compute_jump_reg_dependencies) |
| (insn, reg_pending_control_uses); |
| |
| /* Make latency of jump equal to 0 by using anti-dependence. */ |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, |
| false); |
| add_dependence_list (insn, reg_last->implicit_sets, |
| 0, REG_DEP_ANTI, false); |
| add_dependence_list (insn, reg_last->clobbers, 0, |
| REG_DEP_ANTI, false); |
| } |
| } |
| |
| /* All memory writes and volatile reads must happen before the |
| jump. Non-volatile reads must happen before the jump iff |
| the result is needed by the above register used mask. */ |
| |
| pending = deps->pending_write_insns; |
| pending_mem = deps->pending_write_mems; |
| while (pending) |
| { |
| if (! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
| add_dependence (insn, pending->insn (), |
| REG_DEP_OUTPUT); |
| pending = pending->next (); |
| pending_mem = pending_mem->next (); |
| } |
| |
| pending = deps->pending_read_insns; |
| pending_mem = deps->pending_read_mems; |
| while (pending) |
| { |
| if (MEM_VOLATILE_P (pending_mem->element ()) |
| && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
| add_dependence (insn, pending->insn (), |
| REG_DEP_OUTPUT); |
| pending = pending->next (); |
| pending_mem = pending_mem->next (); |
| } |
| |
| add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
| REG_DEP_ANTI, true); |
| add_dependence_list (insn, deps->pending_jump_insns, 1, |
| REG_DEP_ANTI, true); |
| } |
| } |
| |
| /* If this instruction can throw an exception, then moving it changes |
| where block boundaries fall. This is mighty confusing elsewhere. |
| Therefore, prevent such an instruction from being moved. Same for |
| non-jump instructions that define block boundaries. |
| ??? Unclear whether this is still necessary in EBB mode. If not, |
| add_branch_dependences should be adjusted for RGN mode instead. */ |
| if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn)) |
| || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn))) |
| reg_pending_barrier = MOVE_BARRIER; |
| |
| if (sched_pressure != SCHED_PRESSURE_NONE) |
| { |
| setup_insn_reg_uses (deps, insn); |
| init_insn_reg_pressure_info (insn); |
| } |
| |
| /* Add register dependencies for insn. */ |
| if (DEBUG_INSN_P (insn)) |
| { |
| rtx_insn *prev = deps->last_debug_insn; |
| rtx_insn_list *u; |
| |
| if (!deps->readonly) |
| deps->last_debug_insn = insn; |
| |
| if (prev) |
| add_dependence (insn, prev, REG_DEP_ANTI); |
| |
| add_dependence_list (insn, deps->last_function_call, 1, |
| REG_DEP_ANTI, false); |
| |
| if (!sel_sched_p ()) |
| for (u = deps->last_pending_memory_flush; u; u = u->next ()) |
| add_dependence (insn, u->insn (), REG_DEP_ANTI); |
| |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false); |
| /* There's no point in making REG_DEP_CONTROL dependencies for |
| debug insns. */ |
| add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI, |
| false); |
| |
| if (!deps->readonly) |
| reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
| } |
| CLEAR_REG_SET (reg_pending_uses); |
| |
| /* Quite often, a debug insn will refer to stuff in the |
| previous instruction, but the reason we want this |
| dependency here is to make sure the scheduler doesn't |
| gratuitously move a debug insn ahead. This could dirty |
| DF flags and cause additional analysis that wouldn't have |
| occurred in compilation without debug insns, and such |
| additional analysis can modify the generated code. */ |
| prev = PREV_INSN (insn); |
| |
| if (prev && NONDEBUG_INSN_P (prev)) |
| add_dependence (insn, prev, REG_DEP_ANTI); |
| } |
| else |
| { |
| regset_head set_or_clobbered; |
| |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false); |
| add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI, |
| false); |
| add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE, |
| false); |
| |
| if (!deps->readonly) |
| { |
| reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
| reg_last->uses_length++; |
| } |
| } |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false); |
| add_dependence_list (insn, reg_last->implicit_sets, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE, |
| false); |
| |
| if (!deps->readonly) |
| { |
| reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
| reg_last->uses_length++; |
| } |
| } |
| |
| if (targetm.sched.exposed_pipeline) |
| { |
| INIT_REG_SET (&set_or_clobbered); |
| bitmap_ior (&set_or_clobbered, reg_pending_clobbers, |
| reg_pending_sets); |
| EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| rtx list; |
| for (list = reg_last->uses; list; list = XEXP (list, 1)) |
| { |
| rtx other = XEXP (list, 0); |
| if (INSN_CACHED_COND (other) != const_true_rtx |
| && refers_to_regno_p (i, INSN_CACHED_COND (other))) |
| INSN_CACHED_COND (other) = const_true_rtx; |
| } |
| } |
| } |
| |
| /* If the current insn is conditional, we can't free any |
| of the lists. */ |
| if (sched_has_condition_p (insn)) |
| { |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT, |
| false); |
| add_dependence_list (insn, reg_last->implicit_sets, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
| false); |
| add_dependence_list (insn, reg_last->control_uses, 0, |
| REG_DEP_CONTROL, false); |
| |
| if (!deps->readonly) |
| { |
| reg_last->clobbers |
| = alloc_INSN_LIST (insn, reg_last->clobbers); |
| reg_last->clobbers_length++; |
| } |
| } |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT, |
| false); |
| add_dependence_list (insn, reg_last->implicit_sets, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT, |
| false); |
| add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
| false); |
| add_dependence_list (insn, reg_last->control_uses, 0, |
| REG_DEP_CONTROL, false); |
| |
| if (!deps->readonly) |
| reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); |
| } |
| } |
| else |
| { |
| EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) |
| { |
| struct deps_reg *reg_last = &deps->reg_last[i]; |
| if (reg_last->uses_length >= param_max_pending_list_length |
| || reg_last->clobbers_length >= param_max_pending_list_length) |
| { |
| add_dependence_list_and_free (deps, insn, ®_last->sets, 0, |
| REG_DEP_OUTPUT, false); |
| add_dependence_list_and_free (deps, insn, |
| ®_last->implicit_sets, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list_and_free (deps, insn, ®_last->uses, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list_and_free (deps, insn, |
| ®_last->control_uses, 0, |
| REG_DEP_ANTI, false); |
| add_dependence_list_and_free (deps, insn, |
| ®_last->clobbers, 0, |
| REG_DEP_OUTPUT, false); |
| |