| /* Code for RTL transformations to satisfy insn constraints. |
| Copyright (C) 2010-2021 Free Software Foundation, Inc. |
| Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| |
| /* This file contains code for 3 passes: constraint pass, |
| inheritance/split pass, and pass for undoing failed inheritance and |
| split. |
| |
| The major goal of constraint pass is to transform RTL to satisfy |
| insn and address constraints by: |
| o choosing insn alternatives; |
| o generating *reload insns* (or reloads in brief) and *reload |
| pseudos* which will get necessary hard registers later; |
| o substituting pseudos with equivalent values and removing the |
| instructions that initialized those pseudos. |
| |
| The constraint pass has biggest and most complicated code in LRA. |
| There are a lot of important details like: |
| o reuse of input reload pseudos to simplify reload pseudo |
| allocations; |
| o some heuristics to choose insn alternative to improve the |
| inheritance; |
| o early clobbers etc. |
| |
| The pass is mimicking former reload pass in alternative choosing |
| because the reload pass is oriented to current machine description |
| model. It might be changed if the machine description model is |
| changed. |
| |
| There is special code for preventing all LRA and this pass cycling |
| in case of bugs. |
| |
| On the first iteration of the pass we process every instruction and |
| choose an alternative for each one. On subsequent iterations we try |
| to avoid reprocessing instructions if we can be sure that the old |
| choice is still valid. |
| |
| The inheritance/spilt pass is to transform code to achieve |
| ineheritance and live range splitting. It is done on backward |
| traversal of EBBs. |
| |
| The inheritance optimization goal is to reuse values in hard |
| registers. There is analogous optimization in old reload pass. The |
| inheritance is achieved by following transformation: |
| |
| reload_p1 <- p reload_p1 <- p |
| ... new_p <- reload_p1 |
| ... => ... |
| reload_p2 <- p reload_p2 <- new_p |
| |
| where p is spilled and not changed between the insns. Reload_p1 is |
| also called *original pseudo* and new_p is called *inheritance |
| pseudo*. |
| |
| The subsequent assignment pass will try to assign the same (or |
| another if it is not possible) hard register to new_p as to |
| reload_p1 or reload_p2. |
| |
| If the assignment pass fails to assign a hard register to new_p, |
| this file will undo the inheritance and restore the original code. |
| This is because implementing the above sequence with a spilled |
| new_p would make the code much worse. The inheritance is done in |
| EBB scope. The above is just a simplified example to get an idea |
| of the inheritance as the inheritance is also done for non-reload |
| insns. |
| |
| Splitting (transformation) is also done in EBB scope on the same |
| pass as the inheritance: |
| |
| r <- ... or ... <- r r <- ... or ... <- r |
| ... s <- r (new insn -- save) |
| ... => |
| ... r <- s (new insn -- restore) |
| ... <- r ... <- r |
| |
| The *split pseudo* s is assigned to the hard register of the |
| original pseudo or hard register r. |
| |
| Splitting is done: |
| o In EBBs with high register pressure for global pseudos (living |
| in at least 2 BBs) and assigned to hard registers when there |
| are more one reloads needing the hard registers; |
| o for pseudos needing save/restore code around calls. |
| |
| If the split pseudo still has the same hard register as the |
| original pseudo after the subsequent assignment pass or the |
| original pseudo was split, the opposite transformation is done on |
| the same pass for undoing inheritance. */ |
| |
| #undef REG_OK_STRICT |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "predict.h" |
| #include "df.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "expmed.h" |
| #include "optabs.h" |
| #include "regs.h" |
| #include "ira.h" |
| #include "recog.h" |
| #include "output.h" |
| #include "addresses.h" |
| #include "expr.h" |
| #include "cfgrtl.h" |
| #include "rtl-error.h" |
| #include "lra.h" |
| #include "lra-int.h" |
| #include "print-rtl.h" |
| #include "function-abi.h" |
| #include "rtl-iter.h" |
| |
| /* Value of LRA_CURR_RELOAD_NUM at the beginning of BB of the current |
| insn. Remember that LRA_CURR_RELOAD_NUM is the number of emitted |
| reload insns. */ |
| static int bb_reload_num; |
| |
| /* The current insn being processed and corresponding its single set |
| (NULL otherwise), its data (basic block, the insn data, the insn |
| static data, and the mode of each operand). */ |
| static rtx_insn *curr_insn; |
| static rtx curr_insn_set; |
| static basic_block curr_bb; |
| static lra_insn_recog_data_t curr_id; |
| static struct lra_static_insn_data *curr_static_id; |
| static machine_mode curr_operand_mode[MAX_RECOG_OPERANDS]; |
| /* Mode of the register substituted by its equivalence with VOIDmode |
| (e.g. constant) and whose subreg is given operand of the current |
| insn. VOIDmode in all other cases. */ |
| static machine_mode original_subreg_reg_mode[MAX_RECOG_OPERANDS]; |
| |
| |
| |
| /* Start numbers for new registers and insns at the current constraints |
| pass start. */ |
| static int new_regno_start; |
| static int new_insn_uid_start; |
| |
| /* If LOC is nonnull, strip any outer subreg from it. */ |
| static inline rtx * |
| strip_subreg (rtx *loc) |
| { |
| return loc && GET_CODE (*loc) == SUBREG ? &SUBREG_REG (*loc) : loc; |
| } |
| |
| /* Return hard regno of REGNO or if it is was not assigned to a hard |
| register, use a hard register from its allocno class. */ |
| static int |
| get_try_hard_regno (int regno) |
| { |
| int hard_regno; |
| enum reg_class rclass; |
| |
| if ((hard_regno = regno) >= FIRST_PSEUDO_REGISTER) |
| hard_regno = lra_get_regno_hard_regno (regno); |
| if (hard_regno >= 0) |
| return hard_regno; |
| rclass = lra_get_allocno_class (regno); |
| if (rclass == NO_REGS) |
| return -1; |
| return ira_class_hard_regs[rclass][0]; |
| } |
| |
| /* Return the hard regno of X after removing its subreg. If X is not |
| a register or a subreg of a register, return -1. If X is a pseudo, |
| use its assignment. If FINAL_P return the final hard regno which will |
| be after elimination. */ |
| static int |
| get_hard_regno (rtx x, bool final_p) |
| { |
| rtx reg; |
| int hard_regno; |
| |
| reg = x; |
| if (SUBREG_P (x)) |
| reg = SUBREG_REG (x); |
| if (! REG_P (reg)) |
| return -1; |
| if (! HARD_REGISTER_NUM_P (hard_regno = REGNO (reg))) |
| hard_regno = lra_get_regno_hard_regno (hard_regno); |
| if (hard_regno < 0) |
| return -1; |
| if (final_p) |
| hard_regno = lra_get_elimination_hard_regno (hard_regno); |
| if (SUBREG_P (x)) |
| hard_regno += subreg_regno_offset (hard_regno, GET_MODE (reg), |
| SUBREG_BYTE (x), GET_MODE (x)); |
| return hard_regno; |
| } |
| |
| /* If REGNO is a hard register or has been allocated a hard register, |
| return the class of that register. If REGNO is a reload pseudo |
| created by the current constraints pass, return its allocno class. |
| Return NO_REGS otherwise. */ |
| static enum reg_class |
| get_reg_class (int regno) |
| { |
| int hard_regno; |
| |
| if (! HARD_REGISTER_NUM_P (hard_regno = regno)) |
| hard_regno = lra_get_regno_hard_regno (regno); |
| if (hard_regno >= 0) |
| { |
| hard_regno = lra_get_elimination_hard_regno (hard_regno); |
| return REGNO_REG_CLASS (hard_regno); |
| } |
| if (regno >= new_regno_start) |
| return lra_get_allocno_class (regno); |
| return NO_REGS; |
| } |
| |
| /* Return true if REG satisfies (or will satisfy) reg class constraint |
| CL. Use elimination first if REG is a hard register. If REG is a |
| reload pseudo created by this constraints pass, assume that it will |
| be allocated a hard register from its allocno class, but allow that |
| class to be narrowed to CL if it is currently a superset of CL and |
| if either: |
| |
| - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or |
| - the instruction we're processing is not a reload move. |
| |
| If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of |
| REGNO (reg), or NO_REGS if no change in its class was needed. */ |
| static bool |
| in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class, |
| bool allow_all_reload_class_changes_p = false) |
| { |
| enum reg_class rclass, common_class; |
| machine_mode reg_mode; |
| rtx src; |
| int class_size, hard_regno, nregs, i, j; |
| int regno = REGNO (reg); |
| |
| if (new_class != NULL) |
| *new_class = NO_REGS; |
| if (regno < FIRST_PSEUDO_REGISTER) |
| { |
| rtx final_reg = reg; |
| rtx *final_loc = &final_reg; |
| |
| lra_eliminate_reg_if_possible (final_loc); |
| return TEST_HARD_REG_BIT (reg_class_contents[cl], REGNO (*final_loc)); |
| } |
| reg_mode = GET_MODE (reg); |
| rclass = get_reg_class (regno); |
| src = curr_insn_set != NULL ? SET_SRC (curr_insn_set) : NULL; |
| if (regno < new_regno_start |
| /* Do not allow the constraints for reload instructions to |
| influence the classes of new pseudos. These reloads are |
| typically moves that have many alternatives, and restricting |
| reload pseudos for one alternative may lead to situations |
| where other reload pseudos are no longer allocatable. */ |
| || (!allow_all_reload_class_changes_p |
| && INSN_UID (curr_insn) >= new_insn_uid_start |
| && src != NULL |
| && ((REG_P (src) || MEM_P (src)) |
| || (GET_CODE (src) == SUBREG |
| && (REG_P (SUBREG_REG (src)) || MEM_P (SUBREG_REG (src))))))) |
| /* When we don't know what class will be used finally for reload |
| pseudos, we use ALL_REGS. */ |
| return ((regno >= new_regno_start && rclass == ALL_REGS) |
| || (rclass != NO_REGS && ira_class_subset_p[rclass][cl] |
| && ! hard_reg_set_subset_p (reg_class_contents[cl], |
| lra_no_alloc_regs))); |
| else |
| { |
| common_class = ira_reg_class_subset[rclass][cl]; |
| if (new_class != NULL) |
| *new_class = common_class; |
| if (hard_reg_set_subset_p (reg_class_contents[common_class], |
| lra_no_alloc_regs)) |
| return false; |
| /* Check that there are enough allocatable regs. */ |
| class_size = ira_class_hard_regs_num[common_class]; |
| for (i = 0; i < class_size; i++) |
| { |
| hard_regno = ira_class_hard_regs[common_class][i]; |
| nregs = hard_regno_nregs (hard_regno, reg_mode); |
| if (nregs == 1) |
| return true; |
| for (j = 0; j < nregs; j++) |
| if (TEST_HARD_REG_BIT (lra_no_alloc_regs, hard_regno + j) |
| || ! TEST_HARD_REG_BIT (reg_class_contents[common_class], |
| hard_regno + j)) |
| break; |
| if (j >= nregs) |
| return true; |
| } |
| return false; |
| } |
| } |
| |
| /* Return true if REGNO satisfies a memory constraint. */ |
| static bool |
| in_mem_p (int regno) |
| { |
| return get_reg_class (regno) == NO_REGS; |
| } |
| |
| /* Return 1 if ADDR is a valid memory address for mode MODE in address |
| space AS, and check that each pseudo has the proper kind of hard |
| reg. */ |
| static int |
| valid_address_p (machine_mode mode ATTRIBUTE_UNUSED, |
| rtx addr, addr_space_t as) |
| { |
| #ifdef GO_IF_LEGITIMATE_ADDRESS |
| lra_assert (ADDR_SPACE_GENERIC_P (as)); |
| GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); |
| return 0; |
| |
| win: |
| return 1; |
| #else |
| return targetm.addr_space.legitimate_address_p (mode, addr, 0, as); |
| #endif |
| } |
| |
| namespace { |
| /* Temporarily eliminates registers in an address (for the lifetime of |
| the object). */ |
| class address_eliminator { |
| public: |
| address_eliminator (struct address_info *ad); |
| ~address_eliminator (); |
| |
| private: |
| struct address_info *m_ad; |
| rtx *m_base_loc; |
| rtx m_base_reg; |
| rtx *m_index_loc; |
| rtx m_index_reg; |
| }; |
| } |
| |
| address_eliminator::address_eliminator (struct address_info *ad) |
| : m_ad (ad), |
| m_base_loc (strip_subreg (ad->base_term)), |
| m_base_reg (NULL_RTX), |
| m_index_loc (strip_subreg (ad->index_term)), |
| m_index_reg (NULL_RTX) |
| { |
| if (m_base_loc != NULL) |
| { |
| m_base_reg = *m_base_loc; |
| /* If we have non-legitimate address which is decomposed not in |
| the way we expected, don't do elimination here. In such case |
| the address will be reloaded and elimination will be done in |
| reload insn finally. */ |
| if (REG_P (m_base_reg)) |
| lra_eliminate_reg_if_possible (m_base_loc); |
| if (m_ad->base_term2 != NULL) |
| *m_ad->base_term2 = *m_ad->base_term; |
| } |
| if (m_index_loc != NULL) |
| { |
| m_index_reg = *m_index_loc; |
| if (REG_P (m_index_reg)) |
| lra_eliminate_reg_if_possible (m_index_loc); |
| } |
| } |
| |
| address_eliminator::~address_eliminator () |
| { |
| if (m_base_loc && *m_base_loc != m_base_reg) |
| { |
| *m_base_loc = m_base_reg; |
| if (m_ad->base_term2 != NULL) |
| *m_ad->base_term2 = *m_ad->base_term; |
| } |
| if (m_index_loc && *m_index_loc != m_index_reg) |
| *m_index_loc = m_index_reg; |
| } |
| |
| /* Return true if the eliminated form of AD is a legitimate target address. |
| If OP is a MEM, AD is the address within OP, otherwise OP should be |
| ignored. CONSTRAINT is one constraint that the operand may need |
| to meet. */ |
| static bool |
| valid_address_p (rtx op, struct address_info *ad, |
| enum constraint_num constraint) |
| { |
| address_eliminator eliminator (ad); |
| |
| /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more |
| forgiving than "m". |
| Need to extract memory from op for special memory constraint, |
| i.e. bcst_mem_operand in i386 backend. */ |
| if (MEM_P (extract_mem_from_operand (op)) |
| && insn_extra_relaxed_memory_constraint (constraint) |
| && constraint_satisfied_p (op, constraint)) |
| return true; |
| |
| return valid_address_p (ad->mode, *ad->outer, ad->as); |
| } |
| |
| /* For special_memory_operand, it could be false for MEM_P (op), |
| i.e. bcst_mem_operand in i386 backend. |
| Extract and return real memory operand or op. */ |
| rtx |
| extract_mem_from_operand (rtx op) |
| { |
| for (rtx x = op;; x = XEXP (x, 0)) |
| { |
| if (MEM_P (x)) |
| return x; |
| if (GET_RTX_LENGTH (GET_CODE (x)) != 1 |
| || GET_RTX_FORMAT (GET_CODE (x))[0] != 'e') |
| break; |
| } |
| return op; |
| } |
| |
| /* Return true if the eliminated form of memory reference OP satisfies |
| extra (special) memory constraint CONSTRAINT. */ |
| static bool |
| satisfies_memory_constraint_p (rtx op, enum constraint_num constraint) |
| { |
| struct address_info ad; |
| rtx mem = extract_mem_from_operand (op); |
| if (!MEM_P (mem)) |
| return false; |
| |
| decompose_mem_address (&ad, mem); |
| address_eliminator eliminator (&ad); |
| return constraint_satisfied_p (op, constraint); |
| } |
| |
| /* Return true if the eliminated form of address AD satisfies extra |
| address constraint CONSTRAINT. */ |
| static bool |
| satisfies_address_constraint_p (struct address_info *ad, |
| enum constraint_num constraint) |
| { |
| address_eliminator eliminator (ad); |
| return constraint_satisfied_p (*ad->outer, constraint); |
| } |
| |
| /* Return true if the eliminated form of address OP satisfies extra |
| address constraint CONSTRAINT. */ |
| static bool |
| satisfies_address_constraint_p (rtx op, enum constraint_num constraint) |
| { |
| struct address_info ad; |
| |
| decompose_lea_address (&ad, &op); |
| return satisfies_address_constraint_p (&ad, constraint); |
| } |
| |
| /* Initiate equivalences for LRA. As we keep original equivalences |
| before any elimination, we need to make copies otherwise any change |
| in insns might change the equivalences. */ |
| void |
| lra_init_equiv (void) |
| { |
| ira_expand_reg_equiv (); |
| for (int i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++) |
| { |
| rtx res; |
| |
| if ((res = ira_reg_equiv[i].memory) != NULL_RTX) |
| ira_reg_equiv[i].memory = copy_rtx (res); |
| if ((res = ira_reg_equiv[i].invariant) != NULL_RTX) |
| ira_reg_equiv[i].invariant = copy_rtx (res); |
| } |
| } |
| |
| static rtx loc_equivalence_callback (rtx, const_rtx, void *); |
| |
| /* Update equivalence for REGNO. We need to this as the equivalence |
| might contain other pseudos which are changed by their |
| equivalences. */ |
| static void |
| update_equiv (int regno) |
| { |
| rtx x; |
| |
| if ((x = ira_reg_equiv[regno].memory) != NULL_RTX) |
| ira_reg_equiv[regno].memory |
| = simplify_replace_fn_rtx (x, NULL_RTX, loc_equivalence_callback, |
| NULL_RTX); |
| if ((x = ira_reg_equiv[regno].invariant) != NULL_RTX) |
| ira_reg_equiv[regno].invariant |
| = simplify_replace_fn_rtx (x, NULL_RTX, loc_equivalence_callback, |
| NULL_RTX); |
| } |
| |
| /* If we have decided to substitute X with another value, return that |
| value, otherwise return X. */ |
| static rtx |
| get_equiv (rtx x) |
| { |
| int regno; |
| rtx res; |
| |
| if (! REG_P (x) || (regno = REGNO (x)) < FIRST_PSEUDO_REGISTER |
| || ! ira_reg_equiv[regno].defined_p |
| || ! ira_reg_equiv[regno].profitable_p |
| || lra_get_regno_hard_regno (regno) >= 0) |
| return x; |
| if ((res = ira_reg_equiv[regno].memory) != NULL_RTX) |
| { |
| if (targetm.cannot_substitute_mem_equiv_p (res)) |
| return x; |
| return res; |
| } |
| if ((res = ira_reg_equiv[regno].constant) != NULL_RTX) |
| return res; |
| if ((res = ira_reg_equiv[regno].invariant) != NULL_RTX) |
| return res; |
| gcc_unreachable (); |
| } |
| |
| /* If we have decided to substitute X with the equivalent value, |
| return that value after elimination for INSN, otherwise return |
| X. */ |
| static rtx |
| get_equiv_with_elimination (rtx x, rtx_insn *insn) |
| { |
| rtx res = get_equiv (x); |
| |
| if (x == res || CONSTANT_P (res)) |
| return res; |
| return lra_eliminate_regs_1 (insn, res, GET_MODE (res), |
| false, false, 0, true); |
| } |
| |
| /* Set up curr_operand_mode. */ |
| static void |
| init_curr_operand_mode (void) |
| { |
| int nop = curr_static_id->n_operands; |
| for (int i = 0; i < nop; i++) |
| { |
| machine_mode mode = GET_MODE (*curr_id->operand_loc[i]); |
| if (mode == VOIDmode) |
| { |
| /* The .md mode for address operands is the mode of the |
| addressed value rather than the mode of the address itself. */ |
| if (curr_id->icode >= 0 && curr_static_id->operand[i].is_address) |
| mode = Pmode; |
| else |
| mode = curr_static_id->operand[i].mode; |
| } |
| curr_operand_mode[i] = mode; |
| } |
| } |
| |
| |
| |
| /* The page contains code to reuse input reloads. */ |
| |
| /* Structure describes input reload of the current insns. */ |
| struct input_reload |
| { |
| /* True for input reload of matched operands. */ |
| bool match_p; |
| /* Reloaded value. */ |
| rtx input; |
| /* Reload pseudo used. */ |
| rtx reg; |
| }; |
| |
| /* The number of elements in the following array. */ |
| static int curr_insn_input_reloads_num; |
| /* Array containing info about input reloads. It is used to find the |
| same input reload and reuse the reload pseudo in this case. */ |
| static struct input_reload curr_insn_input_reloads[LRA_MAX_INSN_RELOADS]; |
| |
| /* Initiate data concerning reuse of input reloads for the current |
| insn. */ |
| static void |
| init_curr_insn_input_reloads (void) |
| { |
| curr_insn_input_reloads_num = 0; |
| } |
| |
| /* The canonical form of an rtx inside a MEM is not necessarily the same as the |
| canonical form of the rtx outside the MEM. Fix this up in the case that |
| we're reloading an address (and therefore pulling it outside a MEM). */ |
| static rtx |
| canonicalize_reload_addr (rtx addr) |
| { |
| subrtx_var_iterator::array_type array; |
| FOR_EACH_SUBRTX_VAR (iter, array, addr, NONCONST) |
| { |
| rtx x = *iter; |
| if (GET_CODE (x) == MULT && CONST_INT_P (XEXP (x, 1))) |
| { |
| const HOST_WIDE_INT ci = INTVAL (XEXP (x, 1)); |
| const int pwr2 = exact_log2 (ci); |
| if (pwr2 > 0) |
| { |
| /* Rewrite this to use a shift instead, which is canonical when |
| outside of a MEM. */ |
| PUT_CODE (x, ASHIFT); |
| XEXP (x, 1) = GEN_INT (pwr2); |
| } |
| } |
| } |
| |
| return addr; |
| } |
| |
| /* Create a new pseudo using MODE, RCLASS, ORIGINAL or reuse an existing |
| reload pseudo. Don't reuse an existing reload pseudo if IN_SUBREG_P |
| is true and the reused pseudo should be wrapped up in a SUBREG. |
| The result pseudo is returned through RESULT_REG. Return TRUE if we |
| created a new pseudo, FALSE if we reused an existing reload pseudo. |
| Use TITLE to describe new registers for debug purposes. */ |
| static bool |
| get_reload_reg (enum op_type type, machine_mode mode, rtx original, |
| enum reg_class rclass, bool in_subreg_p, |
| const char *title, rtx *result_reg) |
| { |
| int i, regno; |
| enum reg_class new_class; |
| bool unique_p = false; |
| |
| if (type == OP_OUT) |
| { |
| /* Output reload registers tend to start out with a conservative |
| choice of register class. Usually this is ALL_REGS, although |
| a target might narrow it (for performance reasons) through |
| targetm.preferred_reload_class. It's therefore quite common |
| for a reload instruction to require a more restrictive class |
| than the class that was originally assigned to the reload register. |
| |
| In these situations, it's more efficient to refine the choice |
| of register class rather than create a second reload register. |
| This also helps to avoid cycling for registers that are only |
| used by reload instructions. */ |
| if (REG_P (original) |
| && (int) REGNO (original) >= new_regno_start |
| && INSN_UID (curr_insn) >= new_insn_uid_start |
| && in_class_p (original, rclass, &new_class, true)) |
| { |
| unsigned int regno = REGNO (original); |
| if (lra_dump_file != NULL) |
| { |
| fprintf (lra_dump_file, " Reuse r%d for output ", regno); |
| dump_value_slim (lra_dump_file, original, 1); |
| } |
| if (new_class != lra_get_allocno_class (regno)) |
| lra_change_class (regno, new_class, ", change to", false); |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, "\n"); |
| *result_reg = original; |
| return false; |
| } |
| *result_reg |
| = lra_create_new_reg_with_unique_value (mode, original, rclass, title); |
| return true; |
| } |
| /* Prevent reuse value of expression with side effects, |
| e.g. volatile memory. */ |
| if (! side_effects_p (original)) |
| for (i = 0; i < curr_insn_input_reloads_num; i++) |
| { |
| if (! curr_insn_input_reloads[i].match_p |
| && rtx_equal_p (curr_insn_input_reloads[i].input, original) |
| && in_class_p (curr_insn_input_reloads[i].reg, rclass, &new_class)) |
| { |
| rtx reg = curr_insn_input_reloads[i].reg; |
| regno = REGNO (reg); |
| /* If input is equal to original and both are VOIDmode, |
| GET_MODE (reg) might be still different from mode. |
| Ensure we don't return *result_reg with wrong mode. */ |
| if (GET_MODE (reg) != mode) |
| { |
| if (in_subreg_p) |
| continue; |
| if (maybe_lt (GET_MODE_SIZE (GET_MODE (reg)), |
| GET_MODE_SIZE (mode))) |
| continue; |
| reg = lowpart_subreg (mode, reg, GET_MODE (reg)); |
| if (reg == NULL_RTX || GET_CODE (reg) != SUBREG) |
| continue; |
| } |
| *result_reg = reg; |
| if (lra_dump_file != NULL) |
| { |
| fprintf (lra_dump_file, " Reuse r%d for reload ", regno); |
| dump_value_slim (lra_dump_file, original, 1); |
| } |
| if (new_class != lra_get_allocno_class (regno)) |
| lra_change_class (regno, new_class, ", change to", false); |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, "\n"); |
| return false; |
| } |
| /* If we have an input reload with a different mode, make sure it |
| will get a different hard reg. */ |
| else if (REG_P (original) |
| && REG_P (curr_insn_input_reloads[i].input) |
| && REGNO (original) == REGNO (curr_insn_input_reloads[i].input) |
| && (GET_MODE (original) |
| != GET_MODE (curr_insn_input_reloads[i].input))) |
| unique_p = true; |
| } |
| *result_reg = (unique_p |
| ? lra_create_new_reg_with_unique_value |
| : lra_create_new_reg) (mode, original, rclass, title); |
| lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
| curr_insn_input_reloads[curr_insn_input_reloads_num].input = original; |
| curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = false; |
| curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = *result_reg; |
| return true; |
| } |
| |
| |
| /* The page contains major code to choose the current insn alternative |
| and generate reloads for it. */ |
| |
| /* Return the offset from REGNO of the least significant register |
| in (reg:MODE REGNO). |
| |
| This function is used to tell whether two registers satisfy |
| a matching constraint. (reg:MODE1 REGNO1) matches (reg:MODE2 REGNO2) if: |
| |
| REGNO1 + lra_constraint_offset (REGNO1, MODE1) |
| == REGNO2 + lra_constraint_offset (REGNO2, MODE2) */ |
| int |
| lra_constraint_offset (int regno, machine_mode mode) |
| { |
| lra_assert (regno < FIRST_PSEUDO_REGISTER); |
| |
| scalar_int_mode int_mode; |
| if (WORDS_BIG_ENDIAN |
| && is_a <scalar_int_mode> (mode, &int_mode) |
| && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD) |
| return hard_regno_nregs (regno, mode) - 1; |
| return 0; |
| } |
| |
| /* Like rtx_equal_p except that it allows a REG and a SUBREG to match |
| if they are the same hard reg, and has special hacks for |
| auto-increment and auto-decrement. This is specifically intended for |
| process_alt_operands to use in determining whether two operands |
| match. X is the operand whose number is the lower of the two. |
| |
| It is supposed that X is the output operand and Y is the input |
| operand. Y_HARD_REGNO is the final hard regno of register Y or |
| register in subreg Y as we know it now. Otherwise, it is a |
| negative value. */ |
| static bool |
| operands_match_p (rtx x, rtx y, int y_hard_regno) |
| { |
| int i; |
| RTX_CODE code = GET_CODE (x); |
| const char *fmt; |
| |
| if (x == y) |
| return true; |
| if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x)))) |
| && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))))) |
| { |
| int j; |
| |
| i = get_hard_regno (x, false); |
| if (i < 0) |
| goto slow; |
| |
| if ((j = y_hard_regno) < 0) |
| goto slow; |
| |
| i += lra_constraint_offset (i, GET_MODE (x)); |
| j += lra_constraint_offset (j, GET_MODE (y)); |
| |
| return i == j; |
| } |
| |
| /* If two operands must match, because they are really a single |
| operand of an assembler insn, then two post-increments are invalid |
| because the assembler insn would increment only once. On the |
| other hand, a post-increment matches ordinary indexing if the |
| post-increment is the output operand. */ |
| if (code == POST_DEC || code == POST_INC || code == POST_MODIFY) |
| return operands_match_p (XEXP (x, 0), y, y_hard_regno); |
| |
| /* Two pre-increments are invalid because the assembler insn would |
| increment only once. On the other hand, a pre-increment matches |
| ordinary indexing if the pre-increment is the input operand. */ |
| if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC |
| || GET_CODE (y) == PRE_MODIFY) |
| return operands_match_p (x, XEXP (y, 0), -1); |
| |
| slow: |
| |
| if (code == REG && REG_P (y)) |
| return REGNO (x) == REGNO (y); |
| |
| if (code == REG && GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y)) |
| && x == SUBREG_REG (y)) |
| return true; |
| if (GET_CODE (y) == REG && code == SUBREG && REG_P (SUBREG_REG (x)) |
| && SUBREG_REG (x) == y) |
| return true; |
| |
| /* Now we have disposed of all the cases in which different rtx |
| codes can match. */ |
| if (code != GET_CODE (y)) |
| return false; |
| |
| /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ |
| if (GET_MODE (x) != GET_MODE (y)) |
| return false; |
| |
| switch (code) |
| { |
| CASE_CONST_UNIQUE: |
| return false; |
| |
| case CONST_VECTOR: |
| if (!same_vector_encodings_p (x, y)) |
| return false; |
| break; |
| |
| case LABEL_REF: |
| return label_ref_label (x) == label_ref_label (y); |
| case SYMBOL_REF: |
| return XSTR (x, 0) == XSTR (y, 0); |
| |
| default: |
| break; |
| } |
| |
| /* Compare the elements. If any pair of corresponding elements fail |
| to match, return false for the whole things. */ |
| |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| int val, j; |
| switch (fmt[i]) |
| { |
| case 'w': |
| if (XWINT (x, i) != XWINT (y, i)) |
| return false; |
| break; |
| |
| case 'i': |
| if (XINT (x, i) != XINT (y, i)) |
| return false; |
| break; |
| |
| case 'p': |
| if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y))) |
| return false; |
| break; |
| |
| case 'e': |
| val = operands_match_p (XEXP (x, i), XEXP (y, i), -1); |
| if (val == 0) |
| return false; |
| break; |
| |
| case '0': |
| break; |
| |
| case 'E': |
| if (XVECLEN (x, i) != XVECLEN (y, i)) |
| return false; |
| for (j = XVECLEN (x, i) - 1; j >= 0; --j) |
| { |
| val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j), -1); |
| if (val == 0) |
| return false; |
| } |
| break; |
| |
| /* It is believed that rtx's at this level will never |
| contain anything but integers and other rtx's, except for |
| within LABEL_REFs and SYMBOL_REFs. */ |
| default: |
| gcc_unreachable (); |
| } |
| } |
| return true; |
| } |
| |
| /* True if X is a constant that can be forced into the constant pool. |
| MODE is the mode of the operand, or VOIDmode if not known. */ |
| #define CONST_POOL_OK_P(MODE, X) \ |
| ((MODE) != VOIDmode \ |
| && CONSTANT_P (X) \ |
| && GET_CODE (X) != HIGH \ |
| && GET_MODE_SIZE (MODE).is_constant () \ |
| && !targetm.cannot_force_const_mem (MODE, X)) |
| |
| /* True if C is a non-empty register class that has too few registers |
| to be safely used as a reload target class. */ |
| #define SMALL_REGISTER_CLASS_P(C) \ |
| (ira_class_hard_regs_num [(C)] == 1 \ |
| || (ira_class_hard_regs_num [(C)] >= 1 \ |
| && targetm.class_likely_spilled_p (C))) |
| |
| /* If REG is a reload pseudo, try to make its class satisfying CL. */ |
| static void |
| narrow_reload_pseudo_class (rtx reg, enum reg_class cl) |
| { |
| enum reg_class rclass; |
| |
| /* Do not make more accurate class from reloads generated. They are |
| mostly moves with a lot of constraints. Making more accurate |
| class may results in very narrow class and impossibility of find |
| registers for several reloads of one insn. */ |
| if (INSN_UID (curr_insn) >= new_insn_uid_start) |
| return; |
| if (GET_CODE (reg) == SUBREG) |
| reg = SUBREG_REG (reg); |
| if (! REG_P (reg) || (int) REGNO (reg) < new_regno_start) |
| return; |
| if (in_class_p (reg, cl, &rclass) && rclass != cl) |
| lra_change_class (REGNO (reg), rclass, " Change to", true); |
| } |
| |
| /* Searches X for any reference to a reg with the same value as REGNO, |
| returning the rtx of the reference found if any. Otherwise, |
| returns NULL_RTX. */ |
| static rtx |
| regno_val_use_in (unsigned int regno, rtx x) |
| { |
| const char *fmt; |
| int i, j; |
| rtx tem; |
| |
| if (REG_P (x) && lra_reg_info[REGNO (x)].val == lra_reg_info[regno].val) |
| return x; |
| |
| fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| if ((tem = regno_val_use_in (regno, XEXP (x, i)))) |
| return tem; |
| } |
| else if (fmt[i] == 'E') |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| if ((tem = regno_val_use_in (regno , XVECEXP (x, i, j)))) |
| return tem; |
| } |
| |
| return NULL_RTX; |
| } |
| |
| /* Return true if all current insn non-output operands except INS (it |
| has a negaitve end marker) do not use pseudos with the same value |
| as REGNO. */ |
| static bool |
| check_conflict_input_operands (int regno, signed char *ins) |
| { |
| int in; |
| int n_operands = curr_static_id->n_operands; |
| |
| for (int nop = 0; nop < n_operands; nop++) |
| if (! curr_static_id->operand[nop].is_operator |
| && curr_static_id->operand[nop].type != OP_OUT) |
| { |
| for (int i = 0; (in = ins[i]) >= 0; i++) |
| if (in == nop) |
| break; |
| if (in < 0 |
| && regno_val_use_in (regno, *curr_id->operand_loc[nop]) != NULL_RTX) |
| return false; |
| } |
| return true; |
| } |
| |
| /* Generate reloads for matching OUT and INS (array of input operand |
| numbers with end marker -1) with reg class GOAL_CLASS, considering |
| output operands OUTS (similar array to INS) needing to be in different |
| registers. Add input and output reloads correspondingly to the lists |
| *BEFORE and *AFTER. OUT might be negative. In this case we generate |
| input reloads for matched input operands INS. EARLY_CLOBBER_P is a flag |
| that the output operand is early clobbered for chosen alternative. */ |
| static void |
| match_reload (signed char out, signed char *ins, signed char *outs, |
| enum reg_class goal_class, rtx_insn **before, |
| rtx_insn **after, bool early_clobber_p) |
| { |
| bool out_conflict; |
| int i, in; |
| rtx new_in_reg, new_out_reg, reg; |
| machine_mode inmode, outmode; |
| rtx in_rtx = *curr_id->operand_loc[ins[0]]; |
| rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out]; |
| |
| inmode = curr_operand_mode[ins[0]]; |
| outmode = out < 0 ? inmode : curr_operand_mode[out]; |
| push_to_sequence (*before); |
| if (inmode != outmode) |
| { |
| /* process_alt_operands has already checked that the mode sizes |
| are ordered. */ |
| if (partial_subreg_p (outmode, inmode)) |
| { |
| reg = new_in_reg |
| = lra_create_new_reg_with_unique_value (inmode, in_rtx, |
| goal_class, ""); |
| new_out_reg = gen_lowpart_SUBREG (outmode, reg); |
| LRA_SUBREG_P (new_out_reg) = 1; |
| /* If the input reg is dying here, we can use the same hard |
| register for REG and IN_RTX. We do it only for original |
| pseudos as reload pseudos can die although original |
| pseudos still live where reload pseudos dies. */ |
| if (REG_P (in_rtx) && (int) REGNO (in_rtx) < lra_new_regno_start |
| && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
| && (!early_clobber_p |
| || check_conflict_input_operands(REGNO (in_rtx), ins))) |
| lra_assign_reg_val (REGNO (in_rtx), REGNO (reg)); |
| } |
| else |
| { |
| reg = new_out_reg |
| = lra_create_new_reg_with_unique_value (outmode, out_rtx, |
| goal_class, ""); |
| new_in_reg = gen_lowpart_SUBREG (inmode, reg); |
| /* NEW_IN_REG is non-paradoxical subreg. We don't want |
| NEW_OUT_REG living above. We add clobber clause for |
| this. This is just a temporary clobber. We can remove |
| it at the end of LRA work. */ |
| rtx_insn *clobber = emit_clobber (new_out_reg); |
| LRA_TEMP_CLOBBER_P (PATTERN (clobber)) = 1; |
| LRA_SUBREG_P (new_in_reg) = 1; |
| if (GET_CODE (in_rtx) == SUBREG) |
| { |
| rtx subreg_reg = SUBREG_REG (in_rtx); |
| |
| /* If SUBREG_REG is dying here and sub-registers IN_RTX |
| and NEW_IN_REG are similar, we can use the same hard |
| register for REG and SUBREG_REG. */ |
| if (REG_P (subreg_reg) |
| && (int) REGNO (subreg_reg) < lra_new_regno_start |
| && GET_MODE (subreg_reg) == outmode |
| && known_eq (SUBREG_BYTE (in_rtx), SUBREG_BYTE (new_in_reg)) |
| && find_regno_note (curr_insn, REG_DEAD, REGNO (subreg_reg)) |
| && (! early_clobber_p |
| || check_conflict_input_operands (REGNO (subreg_reg), |
| ins))) |
| lra_assign_reg_val (REGNO (subreg_reg), REGNO (reg)); |
| } |
| } |
| } |
| else |
| { |
| /* Pseudos have values -- see comments for lra_reg_info. |
| Different pseudos with the same value do not conflict even if |
| they live in the same place. When we create a pseudo we |
| assign value of original pseudo (if any) from which we |
| created the new pseudo. If we create the pseudo from the |
| input pseudo, the new pseudo will have no conflict with the |
| input pseudo which is wrong when the input pseudo lives after |
| the insn and as the new pseudo value is changed by the insn |
| output. Therefore we create the new pseudo from the output |
| except the case when we have single matched dying input |
| pseudo. |
| |
| We cannot reuse the current output register because we might |
| have a situation like "a <- a op b", where the constraints |
| force the second input operand ("b") to match the output |
| operand ("a"). "b" must then be copied into a new register |
| so that it doesn't clobber the current value of "a". |
| |
| We cannot use the same value if the output pseudo is |
| early clobbered or the input pseudo is mentioned in the |
| output, e.g. as an address part in memory, because |
| output reload will actually extend the pseudo liveness. |
| We don't care about eliminable hard regs here as we are |
| interesting only in pseudos. */ |
| |
| /* Matching input's register value is the same as one of the other |
| output operand. Output operands in a parallel insn must be in |
| different registers. */ |
| out_conflict = false; |
| if (REG_P (in_rtx)) |
| { |
| for (i = 0; outs[i] >= 0; i++) |
| { |
| rtx other_out_rtx = *curr_id->operand_loc[outs[i]]; |
| if (REG_P (other_out_rtx) |
| && (regno_val_use_in (REGNO (in_rtx), other_out_rtx) |
| != NULL_RTX)) |
| { |
| out_conflict = true; |
| break; |
| } |
| } |
| } |
| |
| new_in_reg = new_out_reg |
| = (! early_clobber_p && ins[1] < 0 && REG_P (in_rtx) |
| && (int) REGNO (in_rtx) < lra_new_regno_start |
| && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
| && (! early_clobber_p |
| || check_conflict_input_operands (REGNO (in_rtx), ins)) |
| && (out < 0 |
| || regno_val_use_in (REGNO (in_rtx), out_rtx) == NULL_RTX) |
| && !out_conflict |
| ? lra_create_new_reg (inmode, in_rtx, goal_class, "") |
| : lra_create_new_reg_with_unique_value (outmode, out_rtx, |
| goal_class, "")); |
| } |
| /* In operand can be got from transformations before processing insn |
| constraints. One example of such transformations is subreg |
| reloading (see function simplify_operand_subreg). The new |
| pseudos created by the transformations might have inaccurate |
| class (ALL_REGS) and we should make their classes more |
| accurate. */ |
| narrow_reload_pseudo_class (in_rtx, goal_class); |
| lra_emit_move (copy_rtx (new_in_reg), in_rtx); |
| *before = get_insns (); |
| end_sequence (); |
| /* Add the new pseudo to consider values of subsequent input reload |
| pseudos. */ |
| lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
| curr_insn_input_reloads[curr_insn_input_reloads_num].input = in_rtx; |
| curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = true; |
| curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = new_in_reg; |
| for (i = 0; (in = ins[i]) >= 0; i++) |
| if (GET_MODE (*curr_id->operand_loc[in]) == VOIDmode |
| || GET_MODE (new_in_reg) == GET_MODE (*curr_id->operand_loc[in])) |
| *curr_id->operand_loc[in] = new_in_reg; |
| else |
| { |
| lra_assert |
| (GET_MODE (new_out_reg) == GET_MODE (*curr_id->operand_loc[in])); |
| *curr_id->operand_loc[in] = new_out_reg; |
| } |
| lra_update_dups (curr_id, ins); |
| if (out < 0) |
| return; |
| /* See a comment for the input operand above. */ |
| narrow_reload_pseudo_class (out_rtx, goal_class); |
| if (find_reg_note (curr_insn, REG_UNUSED, out_rtx) == NULL_RTX) |
| { |
| reg = SUBREG_P (out_rtx) ? SUBREG_REG (out_rtx) : out_rtx; |
| start_sequence (); |
| /* If we had strict_low_part, use it also in reload to keep other |
| parts unchanged but do it only for regs as strict_low_part |
| has no sense for memory and probably there is no insn pattern |
| to match the reload insn in memory case. */ |
| if (out >= 0 && curr_static_id->operand[out].strict_low && REG_P (reg)) |
| out_rtx = gen_rtx_STRICT_LOW_PART (VOIDmode, out_rtx); |
| lra_emit_move (out_rtx, copy_rtx (new_out_reg)); |
| emit_insn (*after); |
| *after = get_insns (); |
| end_sequence (); |
| } |
| *curr_id->operand_loc[out] = new_out_reg; |
| lra_update_dup (curr_id, out); |
| } |
| |
| /* Return register class which is union of all reg classes in insn |
| constraint alternative string starting with P. */ |
| static enum reg_class |
| reg_class_from_constraints (const char *p) |
| { |
| int c, len; |
| enum reg_class op_class = NO_REGS; |
| |
| do |
| switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
| { |
| case '#': |
| case ',': |
| return op_class; |
| |
| case 'g': |
| op_class = reg_class_subunion[op_class][GENERAL_REGS]; |
| break; |
| |
| default: |
| enum constraint_num cn = lookup_constraint (p); |
| enum reg_class cl = reg_class_for_constraint (cn); |
| if (cl == NO_REGS) |
| { |
| if (insn_extra_address_constraint (cn)) |
| op_class |
| = (reg_class_subunion |
| [op_class][base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
| ADDRESS, SCRATCH)]); |
| break; |
| } |
| |
| op_class = reg_class_subunion[op_class][cl]; |
| break; |
| } |
| while ((p += len), c); |
| return op_class; |
| } |
| |
| /* If OP is a register, return the class of the register as per |
| get_reg_class, otherwise return NO_REGS. */ |
| static inline enum reg_class |
| get_op_class (rtx op) |
| { |
| return REG_P (op) ? get_reg_class (REGNO (op)) : NO_REGS; |
| } |
| |
| /* Return generated insn mem_pseudo:=val if TO_P or val:=mem_pseudo |
| otherwise. If modes of MEM_PSEUDO and VAL are different, use |
| SUBREG for VAL to make them equal. */ |
| static rtx_insn * |
| emit_spill_move (bool to_p, rtx mem_pseudo, rtx val) |
| { |
| if (GET_MODE (mem_pseudo) != GET_MODE (val)) |
| { |
| /* Usually size of mem_pseudo is greater than val size but in |
| rare cases it can be less as it can be defined by target |
| dependent macro HARD_REGNO_CALLER_SAVE_MODE. */ |
| if (! MEM_P (val)) |
| { |
| val = gen_lowpart_SUBREG (GET_MODE (mem_pseudo), |
| GET_CODE (val) == SUBREG |
| ? SUBREG_REG (val) : val); |
| LRA_SUBREG_P (val) = 1; |
| } |
| else |
| { |
| mem_pseudo = gen_lowpart_SUBREG (GET_MODE (val), mem_pseudo); |
| LRA_SUBREG_P (mem_pseudo) = 1; |
| } |
| } |
| return to_p ? gen_move_insn (mem_pseudo, val) |
| : gen_move_insn (val, mem_pseudo); |
| } |
| |
| /* Process a special case insn (register move), return true if we |
| don't need to process it anymore. INSN should be a single set |
| insn. Set up that RTL was changed through CHANGE_P and that hook |
| TARGET_SECONDARY_MEMORY_NEEDED says to use secondary memory through |
| SEC_MEM_P. */ |
| static bool |
| check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) |
| { |
| int sregno, dregno; |
| rtx dest, src, dreg, sreg, new_reg, scratch_reg; |
| rtx_insn *before; |
| enum reg_class dclass, sclass, secondary_class; |
| secondary_reload_info sri; |
| |
| lra_assert (curr_insn_set != NULL_RTX); |
| dreg = dest = SET_DEST (curr_insn_set); |
| sreg = src = SET_SRC (curr_insn_set); |
| if (GET_CODE (dest) == SUBREG) |
| dreg = SUBREG_REG (dest); |
| if (GET_CODE (src) == SUBREG) |
| sreg = SUBREG_REG (src); |
| if (! (REG_P (dreg) || MEM_P (dreg)) || ! (REG_P (sreg) || MEM_P (sreg))) |
| return false; |
| sclass = dclass = NO_REGS; |
| if (REG_P (dreg)) |
| dclass = get_reg_class (REGNO (dreg)); |
| gcc_assert (dclass < LIM_REG_CLASSES && dclass >= NO_REGS); |
| if (dclass == ALL_REGS) |
| /* ALL_REGS is used for new pseudos created by transformations |
| like reload of SUBREG_REG (see function |
| simplify_operand_subreg). We don't know their class yet. We |
| should figure out the class from processing the insn |
| constraints not in this fast path function. Even if ALL_REGS |
| were a right class for the pseudo, secondary_... hooks usually |
| are not define for ALL_REGS. */ |
| return false; |
| if (REG_P (sreg)) |
| sclass = get_reg_class (REGNO (sreg)); |
| gcc_assert (sclass < LIM_REG_CLASSES && sclass >= NO_REGS); |
| if (sclass == ALL_REGS) |
| /* See comments above. */ |
| return false; |
| if (sclass == NO_REGS && dclass == NO_REGS) |
| return false; |
| if (targetm.secondary_memory_needed (GET_MODE (src), sclass, dclass) |
| && ((sclass != NO_REGS && dclass != NO_REGS) |
| || (GET_MODE (src) |
| != targetm.secondary_memory_needed_mode (GET_MODE (src))))) |
| { |
| *sec_mem_p = true; |
| return false; |
| } |
| if (! REG_P (dreg) || ! REG_P (sreg)) |
| return false; |
| sri.prev_sri = NULL; |
| sri.icode = CODE_FOR_nothing; |
| sri.extra_cost = 0; |
| secondary_class = NO_REGS; |
| /* Set up hard register for a reload pseudo for hook |
| secondary_reload because some targets just ignore unassigned |
| pseudos in the hook. */ |
| if (dclass != NO_REGS && lra_get_regno_hard_regno (REGNO (dreg)) < 0) |
| { |
| dregno = REGNO (dreg); |
| reg_renumber[dregno] = ira_class_hard_regs[dclass][0]; |
| } |
| else |
| dregno = -1; |
| if (sclass != NO_REGS && lra_get_regno_hard_regno (REGNO (sreg)) < 0) |
| { |
| sregno = REGNO (sreg); |
| reg_renumber[sregno] = ira_class_hard_regs[sclass][0]; |
| } |
| else |
| sregno = -1; |
| if (sclass != NO_REGS) |
| secondary_class |
| = (enum reg_class) targetm.secondary_reload (false, dest, |
| (reg_class_t) sclass, |
| GET_MODE (src), &sri); |
| if (sclass == NO_REGS |
| || ((secondary_class != NO_REGS || sri.icode != CODE_FOR_nothing) |
| && dclass != NO_REGS)) |
| { |
| enum reg_class old_sclass = secondary_class; |
| secondary_reload_info old_sri = sri; |
| |
| sri.prev_sri = NULL; |
| sri.icode = CODE_FOR_nothing; |
| sri.extra_cost = 0; |
| secondary_class |
| = (enum reg_class) targetm.secondary_reload (true, src, |
| (reg_class_t) dclass, |
| GET_MODE (src), &sri); |
| /* Check the target hook consistency. */ |
| lra_assert |
| ((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
| || (old_sclass == NO_REGS && old_sri.icode == CODE_FOR_nothing) |
| || (secondary_class == old_sclass && sri.icode == old_sri.icode)); |
| } |
| if (sregno >= 0) |
| reg_renumber [sregno] = -1; |
| if (dregno >= 0) |
| reg_renumber [dregno] = -1; |
| if (secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
| return false; |
| *change_p = true; |
| new_reg = NULL_RTX; |
| if (secondary_class != NO_REGS) |
| new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src), NULL_RTX, |
| secondary_class, |
| "secondary"); |
| start_sequence (); |
| if (sri.icode == CODE_FOR_nothing) |
| lra_emit_move (new_reg, src); |
| else |
| { |
| enum reg_class scratch_class; |
| |
| scratch_class = (reg_class_from_constraints |
| (insn_data[sri.icode].operand[2].constraint)); |
| scratch_reg = (lra_create_new_reg_with_unique_value |
| (insn_data[sri.icode].operand[2].mode, NULL_RTX, |
| scratch_class, "scratch")); |
| emit_insn (GEN_FCN (sri.icode) (new_reg != NULL_RTX ? new_reg : dest, |
| src, scratch_reg)); |
| } |
| before = get_insns (); |
| end_sequence (); |
| lra_process_new_insns (curr_insn, before, NULL, "Inserting the move"); |
| if (new_reg != NULL_RTX) |
| SET_SRC (curr_insn_set) = new_reg; |
| else |
| { |
| if (lra_dump_file != NULL) |
| { |
| fprintf (lra_dump_file, "Deleting move %u\n", INSN_UID (curr_insn)); |
| dump_insn_slim (lra_dump_file, curr_insn); |
| } |
| lra_set_insn_deleted (curr_insn); |
| return true; |
| } |
| return false; |
| } |
| |
| /* The following data describe the result of process_alt_operands. |
| The data are used in curr_insn_transform to generate reloads. */ |
| |
| /* The chosen reg classes which should be used for the corresponding |
| operands. */ |
| static enum reg_class goal_alt[MAX_RECOG_OPERANDS]; |
| /* True if the operand should be the same as another operand and that |
| other operand does not need a reload. */ |
| static bool goal_alt_match_win[MAX_RECOG_OPERANDS]; |
| /* True if the operand does not need a reload. */ |
| static bool goal_alt_win[MAX_RECOG_OPERANDS]; |
| /* True if the operand can be offsetable memory. */ |
| static bool goal_alt_offmemok[MAX_RECOG_OPERANDS]; |
| /* The number of an operand to which given operand can be matched to. */ |
| static int goal_alt_matches[MAX_RECOG_OPERANDS]; |
| /* The number of elements in the following array. */ |
| static int goal_alt_dont_inherit_ops_num; |
| /* Numbers of operands whose reload pseudos should not be inherited. */ |
| static int goal_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
| /* True if the insn commutative operands should be swapped. */ |
| static bool goal_alt_swapped; |
| /* The chosen insn alternative. */ |
| static int goal_alt_number; |
| |
| /* True if the corresponding operand is the result of an equivalence |
| substitution. */ |
| static bool equiv_substition_p[MAX_RECOG_OPERANDS]; |
| |
| /* The following five variables are used to choose the best insn |
| alternative. They reflect final characteristics of the best |
| alternative. */ |
| |
| /* Number of necessary reloads and overall cost reflecting the |
| previous value and other unpleasantness of the best alternative. */ |
| static int best_losers, best_overall; |
| /* Overall number hard registers used for reloads. For example, on |
| some targets we need 2 general registers to reload DFmode and only |
| one floating point register. */ |
| static int best_reload_nregs; |
| /* Overall number reflecting distances of previous reloading the same |
| value. The distances are counted from the current BB start. It is |
| used to improve inheritance chances. */ |
| static int best_reload_sum; |
| |
| /* True if the current insn should have no correspondingly input or |
| output reloads. */ |
| static bool no_input_reloads_p, no_output_reloads_p; |
| |
| /* True if we swapped the commutative operands in the current |
| insn. */ |
| static int curr_swapped; |
| |
| /* if CHECK_ONLY_P is false, arrange for address element *LOC to be a |
| register of class CL. Add any input reloads to list BEFORE. AFTER |
| is nonnull if *LOC is an automodified value; handle that case by |
| adding the required output reloads to list AFTER. Return true if |
| the RTL was changed. |
| |
| if CHECK_ONLY_P is true, check that the *LOC is a correct address |
| register. Return false if the address register is correct. */ |
| static bool |
| process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **after, |
| enum reg_class cl) |
| { |
| int regno; |
| enum reg_class rclass, new_class; |
| rtx reg; |
| rtx new_reg; |
| machine_mode mode; |
| bool subreg_p, before_p = false; |
| |
| subreg_p = GET_CODE (*loc) == SUBREG; |
| if (subreg_p) |
| { |
| reg = SUBREG_REG (*loc); |
| mode = GET_MODE (reg); |
| |
| /* For mode with size bigger than ptr_mode, there unlikely to be "mov" |
| between two registers with different classes, but there normally will |
| be "mov" which transfers element of vector register into the general |
| register, and this normally will be a subreg which should be reloaded |
| as a whole. This is particularly likely to be triggered when |
| -fno-split-wide-types specified. */ |
| if (!REG_P (reg) |
| || in_class_p (reg, cl, &new_class) |
| || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode))) |
| loc = &SUBREG_REG (*loc); |
| } |
| |
| reg = *loc; |
| mode = GET_MODE (reg); |
| if (! REG_P (reg)) |
| { |
| if (check_only_p) |
| return true; |
| /* Always reload memory in an address even if the target supports |
| such addresses. */ |
| new_reg = lra_create_new_reg_with_unique_value (mode, reg, cl, "address"); |
| before_p = true; |
| } |
| else |
| { |
| regno = REGNO (reg); |
| rclass = get_reg_class (regno); |
| if (! check_only_p |
| && (*loc = get_equiv_with_elimination (reg, curr_insn)) != reg) |
| { |
| if (lra_dump_file != NULL) |
| { |
| fprintf (lra_dump_file, |
| "Changing pseudo %d in address of insn %u on equiv ", |
| REGNO (reg), INSN_UID (curr_insn)); |
| dump_value_slim (lra_dump_file, *loc, 1); |
| fprintf (lra_dump_file, "\n"); |
| } |
| *loc = copy_rtx (*loc); |
| } |
| if (*loc != reg || ! in_class_p (reg, cl, &new_class)) |
| { |
| if (check_only_p) |
| return true; |
| reg = *loc; |
| if (get_reload_reg (after == NULL ? OP_IN : OP_INOUT, |
| mode, reg, cl, subreg_p, "address", &new_reg)) |
| before_p = true; |
| } |
| else if (new_class != NO_REGS && rclass != new_class) |
| { |
| if (check_only_p) |
| return true; |
| lra_change_class (regno, new_class, " Change to", true); |
| return false; |
| } |
| else |
| return false; |
| } |
| if (before_p) |
| { |
| push_to_sequence (*before); |
| lra_emit_move (new_reg, reg); |
| *before = get_insns (); |
| end_sequence (); |
| } |
| *loc = new_reg; |
| if (after != NULL) |
| { |
| start_sequence (); |
| lra_emit_move (before_p ? copy_rtx (reg) : reg, new_reg); |
| emit_insn (*after); |
| *after = get_insns (); |
| end_sequence (); |
| } |
| return true; |
| } |
| |
| /* Insert move insn in simplify_operand_subreg. BEFORE returns |
| the insn to be inserted before curr insn. AFTER returns the |
| the insn to be inserted after curr insn. ORIGREG and NEWREG |
| are the original reg and new reg for reload. */ |
| static void |
| insert_move_for_subreg (rtx_insn **before, rtx_insn **after, rtx origreg, |
| rtx newreg) |
| { |
| if (before) |
| { |
| push_to_sequence (*before); |
| lra_emit_move (newreg, origreg); |
| *before = get_insns (); |
| end_sequence (); |
| } |
| if (after) |
| { |
| start_sequence (); |
| lra_emit_move (origreg, newreg); |
| emit_insn (*after); |
| *after = get_insns (); |
| end_sequence (); |
| } |
| } |
| |
| static int valid_address_p (machine_mode mode, rtx addr, addr_space_t as); |
| static bool process_address (int, bool, rtx_insn **, rtx_insn **); |
| |
| /* Make reloads for subreg in operand NOP with internal subreg mode |
| REG_MODE, add new reloads for further processing. Return true if |
| any change was done. */ |
| static bool |
| simplify_operand_subreg (int nop, machine_mode reg_mode) |
| { |
| int hard_regno, inner_hard_regno; |
| rtx_insn *before, *after; |
| machine_mode mode, innermode; |
| rtx reg, new_reg; |
| rtx operand = *curr_id->operand_loc[nop]; |
| enum reg_class regclass; |
| enum op_type type; |
| |
| before = after = NULL; |
| |
| if (GET_CODE (operand) != SUBREG) |
| return false; |
| |
| mode = GET_MODE (operand); |
| reg = SUBREG_REG (operand); |
| innermode = GET_MODE (reg); |
| type = curr_static_id->operand[nop].type; |
| if (MEM_P (reg)) |
| { |
| const bool addr_was_valid |
| = valid_address_p (innermode, XEXP (reg, 0), MEM_ADDR_SPACE (reg)); |
| alter_subreg (curr_id->operand_loc[nop], false); |
| rtx subst = *curr_id->operand_loc[nop]; |
| lra_assert (MEM_P (subst)); |
| const bool addr_is_valid = valid_address_p (GET_MODE (subst), |
| XEXP (subst, 0), |
| MEM_ADDR_SPACE (subst)); |
| if (!addr_was_valid |
| || addr_is_valid |
| || ((get_constraint_type (lookup_constraint |
| (curr_static_id->operand[nop].constraint)) |
| != CT_SPECIAL_MEMORY) |
| /* We still can reload address and if the address is |
| valid, we can remove subreg without reloading its |
| inner memory. */ |
| && valid_address_p (GET_MODE (subst), |
| regno_reg_rtx |
| [ira_class_hard_regs |
| [base_reg_class (GET_MODE (subst), |
| MEM_ADDR_SPACE (subst), |
| ADDRESS, SCRATCH)][0]], |
| MEM_ADDR_SPACE (subst)))) |
| { |
| /* If we change the address for a paradoxical subreg of memory, the |
| new address might violate the necessary alignment or the access |
| might be slow; take this into consideration. We need not worry |
| about accesses beyond allocated memory for paradoxical memory |
| subregs as we don't substitute such equiv memory (see processing |
| equivalences in function lra_constraints) and because for spilled |
| pseudos we allocate stack memory enough for the biggest |
| corresponding paradoxical subreg. |
| |
| However, do not blindly simplify a (subreg (mem ...)) for |
| WORD_REGISTER_OPERATIONS targets as this may lead to loading junk |
| data into a register when the inner is narrower than outer or |
| missing important data from memory when the inner is wider than |
| outer. This rule only applies to modes that are no wider than |
| a word. |
| |
| If valid memory becomes invalid after subreg elimination |
| and address might be different we still have to reload |
| memory. |
| */ |
| if ((! addr_was_valid |
| || addr_is_valid |
| || known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode))) |
| && !(maybe_ne (GET_MODE_PRECISION (mode), |
| GET_MODE_PRECISION (innermode)) |
| && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD) |
| && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD) |
| && WORD_REGISTER_OPERATIONS) |
| && (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode) |
| && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst))) |
| || (MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (innermode) |
| && targetm.slow_unaligned_access (innermode, |
| MEM_ALIGN (reg))))) |
| return true; |
| |
| *curr_id->operand_loc[nop] = operand; |
| |
| /* But if the address was not valid, we cannot reload the MEM without |
| reloading the address first. */ |
| if (!addr_was_valid) |
| process_address (nop, false, &before, &after); |
| |
| /* INNERMODE is fast, MODE slow. Reload the mem in INNERMODE. */ |
| enum reg_class rclass |
| = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
| if (get_reload_reg (curr_static_id->operand[nop].type, innermode, |
| reg, rclass, TRUE, "slow/invalid mem", &new_reg)) |
| { |
| bool insert_before, insert_after; |
| bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
| |
| insert_before = (type != OP_OUT |
| || partial_subreg_p (mode, innermode)); |
| insert_after = type != OP_IN; |
| insert_move_for_subreg (insert_before ? &before : NULL, |
| insert_after ? &after : NULL, |
| reg, new_reg); |
| } |
| SUBREG_REG (operand) = new_reg; |
| |
| /* Convert to MODE. */ |
| reg = operand; |
| rclass |
| = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
| if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg, |
| rclass, TRUE, "slow/invalid mem", &new_reg)) |
| { |
| bool insert_before, insert_after; |
| bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
| |
| insert_before = type != OP_OUT; |
| insert_after = type != OP_IN; |
| insert_move_for_subreg (insert_before ? &before : NULL, |
| insert_after ? &after : NULL, |
| reg, new_reg); |
| } |
| *curr_id->operand_loc[nop] = new_reg; |
| lra_process_new_insns (curr_insn, before, after, |
| "Inserting slow/invalid mem reload"); |
| return true; |
| } |
| |
| /* If the address was valid and became invalid, prefer to reload |
| the memory. Typical case is when the index scale should |
| correspond the memory. */ |
| *curr_id->operand_loc[nop] = operand; |
| /* Do not return false here as the MEM_P (reg) will be processed |
| later in this function. */ |
| } |
| else if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) |
| { |
| alter_subreg (curr_id->operand_loc[nop], false); |
| return true; |
| } |
| else if (CONSTANT_P (reg)) |
| { |
| /* Try to simplify subreg of constant. It is usually result of |
| equivalence substitution. */ |
| if (innermode == VOIDmode |
| && (innermode = original_subreg_reg_mode[nop]) == VOIDmode) |
| innermode = curr_static_id->operand[nop].mode; |
| if ((new_reg = simplify_subreg (mode, reg, innermode, |
| SUBREG_BYTE (operand))) != NULL_RTX) |
| { |
| *curr_id->operand_loc[nop] = new_reg; |
| return true; |
| } |
| } |
| /* Put constant into memory when we have mixed modes. It generates |
| a better code in most cases as it does not need a secondary |
| reload memory. It also prevents LRA looping when LRA is using |
| secondary reload memory again and again. */ |
| if (CONSTANT_P (reg) && CONST_POOL_OK_P (reg_mode, reg) |
| && SCALAR_INT_MODE_P (reg_mode) != SCALAR_INT_MODE_P (mode)) |
| { |
| SUBREG_REG (operand) = force_const_mem (reg_mode, reg); |
| alter_subreg (curr_id->operand_loc[nop], false); |
| return true; |
| } |
| /* Force a reload of the SUBREG_REG if this is a constant or PLUS or |
| if there may be a problem accessing OPERAND in the outer |
| mode. */ |
| if ((REG_P (reg) |
| && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
| && (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
| /* Don't reload paradoxical subregs because we could be looping |
| having repeatedly final regno out of hard regs range. */ |
| && (hard_regno_nregs (hard_regno, innermode) |
| >= hard_regno_nregs (hard_regno, mode)) |
| && simplify_subreg_regno (hard_regno, innermode, |
| SUBREG_BYTE (operand), mode) < 0 |
| /* Don't reload subreg for matching reload. It is actually |
| valid subreg in LRA. */ |
| && ! LRA_SUBREG_P (operand)) |
| || CONSTANT_P (reg) || GET_CODE (reg) == PLUS || MEM_P (reg)) |
| { |
| enum reg_class rclass; |
| |
| if (REG_P (reg)) |
| /* There is a big probability that we will get the same class |
| for the new pseudo and we will get the same insn which |
| means infinite looping. So spill the new pseudo. */ |
| rclass = NO_REGS; |
| else |
| /* The class will be defined later in curr_insn_transform. */ |
| rclass |
| = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
| |
| if (get_reload_reg (curr_static_id->operand[nop].type, reg_mode, reg, |
| rclass, TRUE, "subreg reg", &new_reg)) |
| { |
| bool insert_before, insert_after; |
| bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
| |
| insert_before = (type != OP_OUT |
| || read_modify_subreg_p (operand)); |
| insert_after = (type != OP_IN); |
| insert_move_for_subreg (insert_before ? &before : NULL, |
| insert_after ? &after : NULL, |
| reg, new_reg); |
| } |
| SUBREG_REG (operand) = new_reg; |
| lra_process_new_insns (curr_insn, before, after, |
| "Inserting subreg reload"); |
| return true; |
| } |
| /* Force a reload for a paradoxical subreg. For paradoxical subreg, |
| IRA allocates hardreg to the inner pseudo reg according to its mode |
| instead of the outermode, so the size of the hardreg may not be enough |
| to contain the outermode operand, in that case we may need to insert |
| reload for the reg. For the following two types of paradoxical subreg, |
| we need to insert reload: |
| 1. If the op_type is OP_IN, and the hardreg could not be paired with |
| other hardreg to contain the outermode operand |
| (checked by in_hard_reg_set_p), we need to insert the reload. |
| 2. If the op_type is OP_OUT or OP_INOUT. |
| |
| Here is a paradoxical subreg example showing how the reload is generated: |
| |
| (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
| (subreg:TI (reg:DI 107 [ __comp ]) 0)) {*movti_internal_rex64} |
| |
| In IRA, reg107 is allocated to a DImode hardreg. We use x86-64 as example |
| here, if reg107 is assigned to hardreg R15, because R15 is the last |
| hardreg, compiler cannot find another hardreg to pair with R15 to |
| contain TImode data. So we insert a TImode reload reg180 for it. |
| After reload is inserted: |
| |
| (insn 283 0 0 (set (subreg:DI (reg:TI 180 [orig:107 __comp ] [107]) 0) |
| (reg:DI 107 [ __comp ])) -1 |
| (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
| (subreg:TI (reg:TI 180 [orig:107 __comp ] [107]) 0)) {*movti_internal_rex64} |
| |
| Two reload hard registers will be allocated to reg180 to save TImode data |
| in LRA_assign. |
| |
| For LRA pseudos this should normally be handled by the biggest_mode |
| mechanism. However, it's possible for new uses of an LRA pseudo |
| to be introduced after we've allocated it, such as when undoing |
| inheritance, and the allocated register might not then be appropriate |
| for the new uses. */ |
| else if (REG_P (reg) |
| && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
| && paradoxical_subreg_p (operand) |
| && (inner_hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
| && ((hard_regno |
| = simplify_subreg_regno (inner_hard_regno, innermode, |
| SUBREG_BYTE (operand), mode)) < 0 |
| || ((hard_regno_nregs (inner_hard_regno, innermode) |
| < hard_regno_nregs (hard_regno, mode)) |
| && (regclass = lra_get_allocno_class (REGNO (reg))) |
| && (type != OP_IN |
| || !in_hard_reg_set_p (reg_class_contents[regclass], |
| mode, hard_regno) |
| || overlaps_hard_reg_set_p (lra_no_alloc_regs, |
| mode, hard_regno))))) |
| { |
| /* The class will be defined later in curr_insn_transform. */ |
| enum reg_class rclass |
| = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
| |
| if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg, |
| rclass, TRUE, "paradoxical subreg", &new_reg)) |
| { |
| rtx subreg; |
| bool insert_before, insert_after; |
| |
| PUT_MODE (new_reg, mode); |
| subreg = gen_lowpart_SUBREG (innermode, new_reg); |
| bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
| |
| insert_before = (type != OP_OUT); |
| insert_after = (type != OP_IN); |
| insert_move_for_subreg (insert_before ? &before : NULL, |
| insert_after ? &after : NULL, |
| reg, subreg); |
| } |
| SUBREG_REG (operand) = new_reg; |
| lra_process_new_insns (curr_insn, before, after, |
| "Inserting paradoxical subreg reload"); |
| return true; |
| } |
| return false; |
| } |
| |
| /* Return TRUE if X refers for a hard register from SET. */ |
| static bool |
| uses_hard_regs_p (rtx x, HARD_REG_SET set) |
| { |
| int i, j, x_hard_regno; |
| machine_mode mode; |
| const char *fmt; |
| enum rtx_code code; |
| |
| if (x == NULL_RTX) |
| return false; |
| code = GET_CODE (x); |
| mode = GET_MODE (x); |
| |
| if (code == SUBREG) |
| { |
| /* For all SUBREGs we want to check whether the full multi-register |
| overlaps the set. For normal SUBREGs this means 'get_hard_regno' of |
| the inner register, for paradoxical SUBREGs this means the |
| 'get_hard_regno' of the full SUBREG and for complete SUBREGs either is |
| fine. Use the wider mode for all cases. */ |
| rtx subreg = SUBREG_REG (x); |
| mode = wider_subreg_mode (x); |
| if (mode == GET_MODE (subreg)) |
| { |
| x = subreg; |
| code = GET_CODE (x); |
| } |
| } |
| |
| if (REG_P (x) || SUBREG_P (x)) |
| { |
| x_hard_regno = get_hard_regno (x, true); |
| return (x_hard_regno >= 0 |
| && overlaps_hard_reg_set_p (set, mode, x_hard_regno)); |
| } |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| if (uses_hard_regs_p (XEXP (x, i), set)) |
| return true; |
| } |
| else if (fmt[i] == 'E') |
| { |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| if (uses_hard_regs_p (XVECEXP (x, i, j), set)) |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* Return true if OP is a spilled pseudo. */ |
| static inline bool |
| spilled_pseudo_p (rtx op) |
| { |
| return (REG_P (op) |
| && REGNO (op) >= FIRST_PSEUDO_REGISTER && in_mem_p (REGNO (op))); |
| } |
| |
| /* Return true if X is a general constant. */ |
| static inline bool |
| general_constant_p (rtx x) |
| { |
| return CONSTANT_P (x) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (x)); |
| } |
| |
| static bool |
| reg_in_class_p (rtx reg, enum reg_class cl) |
| { |
| if (cl == NO_REGS) |
| return get_reg_class (REGNO (reg)) == NO_REGS; |
| return in_class_p (reg, cl, NULL); |
| } |
| |
| /* Return true if SET of RCLASS contains no hard regs which can be |
| used in MODE. */ |
| static bool |
| prohibited_class_reg_set_mode_p (enum reg_class rclass, |
| HARD_REG_SET &set, |
| machine_mode mode) |
| { |
| HARD_REG_SET temp; |
| |
| lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set)); |
| temp = set & ~lra_no_alloc_regs; |
| return (hard_reg_set_subset_p |
| (temp, ira_prohibited_class_mode_regs[rclass][mode])); |
| } |
| |
| |
| /* Used to check validity info about small class input operands. It |
| should be incremented at start of processing an insn |
| alternative. */ |
| static unsigned int curr_small_class_check = 0; |
| |
| /* Update number of used inputs of class OP_CLASS for operand NOP |
| of alternative NALT. Return true if we have more such class operands |
| than the number of available regs. */ |
| static bool |
| update_and_check_small_class_inputs (int nop, int nalt, |
| enum reg_class op_class) |
| { |
| static unsigned int small_class_check[LIM_REG_CLASSES]; |
| static int small_class_input_nums[LIM_REG_CLASSES]; |
| |
| if (SMALL_REGISTER_CLASS_P (op_class) |
| /* We are interesting in classes became small because of fixing |
| some hard regs, e.g. by an user through GCC options. */ |
| && hard_reg_set_intersect_p (reg_class_contents[op_class], |
| ira_no_alloc_regs) |
| && (curr_static_id->operand[nop].type != OP_OUT |
| || TEST_BIT (curr_static_id->operand[nop].early_clobber_alts, nalt))) |
| { |
| if (small_class_check[op_class] == curr_small_class_check) |
| small_class_input_nums[op_class]++; |
| else |
| { |
| small_class_check[op_class] = curr_small_class_check; |
| small_class_input_nums[op_class] = 1; |
| } |
| if (small_class_input_nums[op_class] > ira_class_hard_regs_num[op_class]) |
| return true; |
| } |
| return false; |
| } |
| |
| /* Major function to choose the current insn alternative and what |
| operands should be reloaded and how. If ONLY_ALTERNATIVE is not |
| negative we should consider only this alternative. Return false if |
| we cannot choose the alternative or find how to reload the |
| operands. */ |
| static bool |
| process_alt_operands (int only_alternative) |
| { |
| bool ok_p = false; |
| int nop, overall, nalt; |
| int n_alternatives = curr_static_id->n_alternatives; |
| int n_operands = curr_static_id->n_operands; |
| /* LOSERS counts the operands that don't fit this alternative and |
| would require loading. */ |
| int losers; |
| int addr_losers; |
| /* REJECT is a count of how undesirable this alternative says it is |
| if any reloading is required. If the alternative matches exactly |
| then REJECT is ignored, but otherwise it gets this much counted |
| against it in addition to the reloading needed. */ |
| int reject; |
| /* This is defined by '!' or '?' alternative constraint and added to |
| reject. But in some cases it can be ignored. */ |
| int static_reject; |
| int op_reject; |
| /* The number of elements in the following array. */ |
| int early_clobbered_regs_num; |
| /* Numbers of operands which are early clobber registers. */ |
| int early_clobbered_nops[MAX_RECOG_OPERANDS]; |
| enum reg_class curr_alt[MAX_RECOG_OPERANDS]; |
| HARD_REG_SET curr_alt_set[MAX_RECOG_OPERANDS]; |
| bool curr_alt_match_win[MAX_RECOG_OPERANDS]; |
| bool curr_alt_win[MAX_RECOG_OPERANDS]; |
| bool curr_alt_offmemok[MAX_RECOG_OPERANDS]; |
| int curr_alt_matches[MAX_RECOG_OPERANDS]; |
| /* The number of elements in the following array. */ |
| int curr_alt_dont_inherit_ops_num; |
| /* Numbers of operands whose reload pseudos should not be inherited. */ |
| int curr_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
| rtx op; |
| /* The register when the operand is a subreg of register, otherwise the |
| operand itself. */ |
| rtx no_subreg_reg_operand[MAX_RECOG_OPERANDS]; |
| /* The register if the operand is a register or subreg of register, |
| otherwise NULL. */ |
| rtx operand_reg[MAX_RECOG_OPERANDS]; |
| int hard_regno[MAX_RECOG_OPERANDS]; |
| machine_mode biggest_mode[MAX_RECOG_OPERANDS]; |
| int reload_nregs, reload_sum; |
| bool costly_p; |
| enum reg_class cl; |
| |
| /* Calculate some data common for all alternatives to speed up the |
| function. */ |
| for (nop = 0; nop < n_operands; nop++) |
| { |
| rtx reg; |
| |
| op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop]; |
| /* The real hard regno of the operand after the allocation. */ |
| hard_regno[nop] = get_hard_regno (op, true); |
| |
| operand_reg[nop] = reg = op; |
| biggest_mode[nop] = GET_MODE (op); |
| if (GET_CODE (op) == SUBREG) |
| { |
| biggest_mode[nop] = wider_subreg_mode (op); |
| operand_reg[nop] = reg = SUBREG_REG (op); |
| } |
| if (! REG_P (reg)) |
| operand_reg[nop] = NULL_RTX; |
| else if (REGNO (reg) >= FIRST_PSEUDO_REGISTER |
| || ((int) REGNO (reg) |
| == lra_get_elimination_hard_regno (REGNO (reg)))) |
| no_subreg_reg_operand[nop] = reg; |
| else |
| operand_reg[nop] = no_subreg_reg_operand[nop] |
| /* Just use natural mode for elimination result. It should |
| be enough for extra constraints hooks. */ |
| = regno_reg_rtx[hard_regno[nop]]; |
| } |
| |
| /* The constraints are made of several alternatives. Each operand's |
| constraint looks like foo,bar,... with commas separating the |
| alternatives. The first alternatives for all operands go |
| together, the second alternatives go together, etc. |
| |
| First loop over alternatives. */ |
| alternative_mask preferred = curr_id->preferred_alternatives; |
| if (only_alternative >= 0) |
| preferred &= ALTERNATIVE_BIT (only_alternative); |
| |
| for (nalt = 0; nalt < n_alternatives; nalt++) |
| { |
| /* Loop over operands for one constraint alternative. */ |
| if (!TEST_BIT (preferred, nalt)) |
| continue; |
| |
| bool matching_early_clobber[MAX_RECOG_OPERANDS]; |
| curr_small_class_check++; |
| overall = losers = addr_losers = 0; |
| static_reject = reject = reload_nregs = reload_sum = 0; |
| for (nop = 0; nop < n_operands; nop++) |
| { |
| int inc = (curr_static_id |
| ->operand_alternative[nalt * n_operands + nop].reject); |
| if (lra_dump_file != NULL && inc != 0) |
| fprintf (lra_dump_file, |
| " Staticly defined alt reject+=%d\n", inc); |
| static_reject += inc; |
| matching_early_clobber[nop] = 0; |
| } |
| reject += static_reject; |
| early_clobbered_regs_num = 0; |
| |
| for (nop = 0; nop < n_operands; nop++) |
| { |
| const char *p; |
| char *end; |
| int len, c, m, i, opalt_num, this_alternative_matches; |
| bool win, did_match, offmemok, early_clobber_p; |
| /* false => this operand can be reloaded somehow for this |
| alternative. */ |
| bool badop; |
| /* true => this operand can be reloaded if the alternative |
| allows regs. */ |
| bool winreg; |
| /* True if a constant forced into memory would be OK for |
| this operand. */ |
| bool constmemok; |
| enum reg_class this_alternative, this_costly_alternative; |
| HARD_REG_SET this_alternative_set, this_costly_alternative_set; |
| bool this_alternative_match_win, this_alternative_win; |
| bool this_alternative_offmemok; |
| bool scratch_p; |
| machine_mode mode; |
| enum constraint_num cn; |
| |
| opalt_num = nalt * n_operands + nop; |
| if (curr_static_id->operand_alternative[opalt_num].anything_ok) |
| { |
| /* Fast track for no constraints at all. */ |
| curr_alt[nop] = NO_REGS; |
| CLEAR_HARD_REG_SET (curr_alt_set[nop]); |
| curr_alt_win[nop] = true; |
| curr_alt_match_win[nop] = false; |
| curr_alt_offmemok[nop] = false; |
| curr_alt_matches[nop] = -1; |
| continue; |
| } |
| |
| op = no_subreg_reg_operand[nop]; |
| mode = curr_operand_mode[nop]; |
| |
| win = did_match = winreg = offmemok = constmemok = false; |
| badop = true; |
| |
| early_clobber_p = false; |
| p = curr_static_id->operand_alternative[opalt_num].constraint; |
| |
| this_costly_alternative = this_alternative = NO_REGS; |
| /* We update set of possible hard regs besides its class |
| because reg class might be inaccurate. For example, |
| union of LO_REGS (l), HI_REGS(h), and STACK_REG(k) in ARM |
| is translated in HI_REGS because classes are merged by |
| pairs and there is no accurate intermediate class. */ |
| CLEAR_HARD_REG_SET (this_alternative_set); |
| CLEAR_HARD_REG_SET (this_costly_alternative_set); |
| this_alternative_win = false; |
| this_alternative_match_win = false; |
| this_alternative_offmemok = false; |
| this_alternative_matches = -1; |
| |
| /* An empty constraint should be excluded by the fast |
| track. */ |
| lra_assert (*p != 0 && *p != ','); |
| |
| op_reject = 0; |
| /* Scan this alternative's specs for this operand; set WIN |
| if the operand fits any letter in this alternative. |
| Otherwise, clear BADOP if this operand could fit some |
| letter after reloads, or set WINREG if this operand could |
| fit after reloads provided the constraint allows some |
| registers. */ |
| costly_p = false; |
| do |
| { |
| switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
| { |
| case '\0': |
| len = 0; |
| break; |
| case ',': |
| c = '\0'; |
| break; |
| |
| case '&': |
| early_clobber_p = true; |
| break; |
| |
| case '$': |
| op_reject += LRA_MAX_REJECT; |
| break; |
| case '^': |
| op_reject += LRA_LOSER_COST_FACTOR; |
| break; |
| |
| case '#': |
| /* Ignore rest of this alternative. */ |
| c = '\0'; |
| break; |
| |
| case '0': case '1': case '2': case '3': case '4': |
| case '5': case '6': case '7': case '8': case '9': |
| { |
| int m_hregno; |
| bool match_p; |
| |
| m = strtoul (p, &end, 10); |
| p = end; |
| len = 0; |
| lra_assert (nop > m); |
| |
| /* Reject matches if we don't know which operand is |
| bigger. This situation would arguably be a bug in |
| an .md pattern, but could also occur in a user asm. */ |
| if (!ordered_p (GET_MODE_SIZE (biggest_mode[m]), |
| GET_MODE_SIZE (biggest_mode[nop]))) |
| break; |
| |
| /* Don't match wrong asm insn operands for proper |
| diagnostic later. */ |
| if (INSN_CODE (curr_insn) < 0 |
| && (curr_operand_mode[m] == BLKmode |
| || curr_operand_mode[nop] == BLKmode) |
| && curr_operand_mode[m] != curr_operand_mode[nop]) |
| break; |
| |
| m_hregno = get_hard_regno (*curr_id->operand_loc[m], false); |
| /* We are supposed to match a previous operand. |
| If we do, we win if that one did. If we do |
| not, count both of the operands as losers. |
| (This is too conservative, since most of the |
| time only a single reload insn will be needed |
| to make the two operands win. As a result, |
| this alternative may be rejected when it is |
| actually desirable.) */ |
| match_p = false; |
| if (operands_match_p (*curr_id->operand_loc[nop], |
| *curr_id->operand_loc[m], m_hregno)) |
| { |
| /* We should reject matching of an early |
| clobber operand if the matching operand is |
| not dying in the insn. */ |
| if (!TEST_BIT (curr_static_id->operand[m] |
| .early_clobber_alts, nalt) |
| || operand_reg[nop] == NULL_RTX |
| || (find_regno_note (curr_insn, REG_DEAD, |
| REGNO (op)) |
| || REGNO (op) == REGNO (operand_reg[m]))) |
| match_p = true; |
| } |
| if (match_p) |
| { |
| /* If we are matching a non-offsettable |
| address where an offsettable address was |
| expected, then we must reject this |
| combination, because we can't reload |
| it. */ |
| if (curr_alt_offmemok[m] |
| && MEM_P (*curr_id->operand_loc[m]) |
| && curr_alt[m] == NO_REGS && ! curr_alt_win[m]) |
| continue; |
| } |
| else |
| { |
| /* If the operands do not match and one |
| operand is INOUT, we can not match them. |
| Try other possibilities, e.g. other |
| alternatives or commutative operand |
| exchange. */ |
| if (curr_static_id->operand[nop].type == OP_INOUT |
| || curr_static_id->operand[m].type == OP_INOUT) |
| break; |
| /* Operands don't match. If the operands are |
| different user defined explicit hard |
| registers, then we cannot make them match |
| when one is early clobber operand. */ |
| if ((REG_P (*curr_id->operand_loc[nop]) |
| || SUBREG_P (*curr_id->operand_loc[nop])) |
| && (REG_P (*curr_id->operand_loc[m]) |
| || SUBREG_P (*curr_id->operand_loc[m]))) |
| { |
| rtx nop_reg = *curr_id->operand_loc[nop]; |
| if (SUBREG_P (nop_reg)) |
| nop_reg = SUBREG_REG (nop_reg); |
| rtx m_reg = *curr_id->operand_loc[m]; |
| if (SUBREG_P (m_reg)) |
| m_reg = SUBREG_REG (m_reg); |
| |
| if (REG_P (nop_reg) |
| && HARD_REGISTER_P (nop_reg) |
| && REG_USERVAR_P (nop_reg) |
| && REG_P (m_reg) |
| && HARD_REGISTER_P (m_reg) |
| && REG_USERVAR_P (m_reg)) |
| { |
| int i; |
| |
| for (i = 0; i < early_clobbered_regs_num; i++) |
| if (m == early_clobbered_nops[i]) |
| break; |
| if (i < early_clobbered_regs_num |
| || early_clobber_p) |
| break; |
| } |
| } |
| /* Both operands must allow a reload register, |
| otherwise we cannot make them match. */ |
| if (curr_alt[m] == NO_REGS) |
| break; |
| /* Retroactively mark the operand we had to |
| match as a loser, if it wasn't already and |
| it wasn't matched to a register constraint |
| (e.g it might be matched by memory). */ |
| if (curr_alt_win[m] |
| && (operand_reg[m] == NULL_RTX |
| || hard_regno[m] < 0)) |
| { |
| losers++; |
| reload_nregs |
| += (ira_reg_class_max_nregs[curr_alt[m]] |
| [GET_MODE (*curr_id->operand_loc[m])]); |
| } |
| |
| /* Prefer matching earlyclobber alternative as |
| it results in less hard regs required for |
| the insn than a non-matching earlyclobber |
| alternative. */ |
| if (TEST_BIT (curr_static_id->operand[m] |
| .early_clobber_alts, nalt)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Matching earlyclobber alt:" |
| " reject--\n", |
| nop); |
| if (!matching_early_clobber[m]) |
| { |
| reject--; |
| matching_early_clobber[m] = 1; |
| } |
| } |
| /* Otherwise we prefer no matching |
| alternatives because it gives more freedom |
| in RA. */ |
| else if (operand_reg[nop] == NULL_RTX |
| || (find_regno_note (curr_insn, REG_DEAD, |
| REGNO (operand_reg[nop])) |
| == NULL_RTX)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Matching alt: reject+=2\n", |
| nop); |
| reject += 2; |
| } |
| } |
| /* If we have to reload this operand and some |
| previous operand also had to match the same |
| thing as this operand, we don't know how to do |
| that. */ |
| if (!match_p || !curr_alt_win[m]) |
| { |
| for (i = 0; i < nop; i++) |
| if (curr_alt_matches[i] == m) |
| break; |
| if (i < nop) |
| break; |
| } |
| else |
| did_match = true; |
| |
| this_alternative_matches = m; |
| /* This can be fixed with reloads if the operand |
| we are supposed to match can be fixed with |
| reloads. */ |
| badop = false; |
| this_alternative = curr_alt[m]; |
| this_alternative_set = curr_alt_set[m]; |
| winreg = this_alternative != NO_REGS; |
| break; |
| } |
| |
| case 'g': |
| if (MEM_P (op) |
| || general_constant_p (op) |
| || spilled_pseudo_p (op)) |
| win = true; |
| cl = GENERAL_REGS; |
| goto reg; |
| |
| default: |
| cn = lookup_constraint (p); |
| switch (get_constraint_type (cn)) |
| { |
| case CT_REGISTER: |
| cl = reg_class_for_constraint (cn); |
| if (cl != NO_REGS) |
| goto reg; |
| break; |
| |
| case CT_CONST_INT: |
| if (CONST_INT_P (op) |
| && insn_const_int_ok_for_constraint (INTVAL (op), cn)) |
| win = true; |
| break; |
| |
| case CT_MEMORY: |
| case CT_RELAXED_MEMORY: |
| if (MEM_P (op) |
| && satisfies_memory_constraint_p (op, cn)) |
| win = true; |
| else if (spilled_pseudo_p (op)) |
| win = true; |
| |
| /* If we didn't already win, we can reload constants |
| via force_const_mem or put the pseudo value into |
| memory, or make other memory by reloading the |
| address like for 'o'. */ |
| if (CONST_POOL_OK_P (mode, op) |
| || MEM_P (op) || REG_P (op) |
| /* We can restore the equiv insn by a |
| reload. */ |
| || equiv_substition_p[nop]) |
| badop = false; |
| constmemok = true; |
| offmemok = true; |
| break; |
| |
| case CT_ADDRESS: |
| /* An asm operand with an address constraint |
| that doesn't satisfy address_operand has |
| is_address cleared, so that we don't try to |
| make a non-address fit. */ |
| if (!curr_static_id->operand[nop].is_address) |
| break; |
| /* If we didn't already win, we can reload the address |
| into a base register. */ |
| if (satisfies_address_constraint_p (op, cn)) |
| win = true; |
| cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
| ADDRESS, SCRATCH); |
| badop = false; |
| goto reg; |
| |
| case CT_FIXED_FORM: |
| if (constraint_satisfied_p (op, cn)) |
| win = true; |
| break; |
| |
| case CT_SPECIAL_MEMORY: |
| if (satisfies_memory_constraint_p (op, cn)) |
| win = true; |
| else if (spilled_pseudo_p (op)) |
| win = true; |
| break; |
| } |
| break; |
| |
| reg: |
| if (mode == BLKmode) |
| break; |
| this_alternative = reg_class_subunion[this_alternative][cl]; |
| this_alternative_set |= reg_class_contents[cl]; |
| if (costly_p) |
| { |
| this_costly_alternative |
| = reg_class_subunion[this_costly_alternative][cl]; |
| this_costly_alternative_set |= reg_class_contents[cl]; |
| } |
| winreg = true; |
| if (REG_P (op)) |
| { |
| if (hard_regno[nop] >= 0 |
| && in_hard_reg_set_p (this_alternative_set, |
| mode, hard_regno[nop])) |
| win = true; |
| else if (hard_regno[nop] < 0 |
| && in_class_p (op, this_alternative, NULL)) |
| win = true; |
| } |
| break; |
| } |
| if (c != ' ' && c != '\t') |
| costly_p = c == '*'; |
| } |
| while ((p += len), c); |
| |
| scratch_p = (operand_reg[nop] != NULL_RTX |
| && ira_former_scratch_p (REGNO (operand_reg[nop]))); |
| /* Record which operands fit this alternative. */ |
| if (win) |
| { |
| this_alternative_win = true; |
| if (operand_reg[nop] != NULL_RTX) |
| { |
| if (hard_regno[nop] >= 0) |
| { |
| if (in_hard_reg_set_p (this_costly_alternative_set, |
| mode, hard_regno[nop])) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " %d Costly set: reject++\n", |
| nop); |
| reject++; |
| } |
| } |
| else |
| { |
| /* Prefer won reg to spilled pseudo under other |
| equal conditions for possibe inheritance. */ |
| if (! scratch_p) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Non pseudo reload: reject++\n", |
| nop); |
| reject++; |
| } |
| if (in_class_p (operand_reg[nop], |
| this_costly_alternative, NULL)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Non pseudo costly reload:" |
| " reject++\n", |
| nop); |
| reject++; |
| } |
| } |
| /* We simulate the behavior of old reload here. |
| Although scratches need hard registers and it |
| might result in spilling other pseudos, no reload |
| insns are generated for the scratches. So it |
| might cost something but probably less than old |
| reload pass believes. */ |
| if (scratch_p) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " %d Scratch win: reject+=2\n", |
| nop); |
| reject += 2; |
| } |
| } |
| } |
| else if (did_match) |
| this_alternative_match_win = true; |
| else |
| { |
| int const_to_mem = 0; |
| bool no_regs_p; |
| |
| reject += op_reject; |
| /* Never do output reload of stack pointer. It makes |
| impossible to do elimination when SP is changed in |
| RTL. */ |
| if (op == stack_pointer_rtx && ! frame_pointer_needed |
| && curr_static_id->operand[nop].type != OP_IN) |
| goto fail; |
| |
| /* If this alternative asks for a specific reg class, see if there |
| is at least one allocatable register in that class. */ |
| no_regs_p |
| = (this_alternative == NO_REGS |
| || (hard_reg_set_subset_p |
| (reg_class_contents[this_alternative], |
| lra_no_alloc_regs))); |
| |
| /* For asms, verify that the class for this alternative is possible |
| for the mode that is specified. */ |
| if (!no_regs_p && INSN_CODE (curr_insn) < 0) |
| { |
| int i; |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| if (targetm.hard_regno_mode_ok (i, mode) |
| && in_hard_reg_set_p (reg_class_contents[this_alternative], |
| mode, i)) |
| break; |
| if (i == FIRST_PSEUDO_REGISTER) |
| winreg = false; |
| } |
| |
| /* If this operand accepts a register, and if the |
| register class has at least one allocatable register, |
| then this operand can be reloaded. */ |
| if (winreg && !no_regs_p) |
| badop = false; |
| |
| if (badop) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " alt=%d: Bad operand -- refuse\n", |
| nalt); |
| goto fail; |
| } |
| |
| if (this_alternative != NO_REGS) |
| { |
| HARD_REG_SET available_regs |
| = (reg_class_contents[this_alternative] |
| & ~((ira_prohibited_class_mode_regs |
| [this_alternative][mode]) |
| | lra_no_alloc_regs)); |
| if (hard_reg_set_empty_p (available_regs)) |
| { |
| /* There are no hard regs holding a value of given |
| mode. */ |
| if (offmemok) |
| { |
| this_alternative = NO_REGS; |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " %d Using memory because of" |
| " a bad mode: reject+=2\n", |
| nop); |
| reject += 2; |
| } |
| else |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " alt=%d: Wrong mode -- refuse\n", |
| nalt); |
| goto fail; |
| } |
| } |
| } |
| |
| /* If not assigned pseudo has a class which a subset of |
| required reg class, it is a less costly alternative |
| as the pseudo still can get a hard reg of necessary |
| class. */ |
| if (! no_regs_p && REG_P (op) && hard_regno[nop] < 0 |
| && (cl = get_reg_class (REGNO (op))) != NO_REGS |
| && ira_class_subset_p[this_alternative][cl]) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Super set class reg: reject-=3\n", nop); |
| reject -= 3; |
| } |
| |
| this_alternative_offmemok = offmemok; |
| if (this_costly_alternative != NO_REGS) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " %d Costly loser: reject++\n", nop); |
| reject++; |
| } |
| /* If the operand is dying, has a matching constraint, |
| and satisfies constraints of the matched operand |
| which failed to satisfy the own constraints, most probably |
| the reload for this operand will be gone. */ |
| if (this_alternative_matches >= 0 |
| && !curr_alt_win[this_alternative_matches] |
| && REG_P (op) |
| && find_regno_note (curr_insn, REG_DEAD, REGNO (op)) |
| && (hard_regno[nop] >= 0 |
| ? in_hard_reg_set_p (this_alternative_set, |
| mode, hard_regno[nop]) |
| : in_class_p (op, this_alternative, NULL))) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Dying matched operand reload: reject++\n", |
| nop); |
| reject++; |
| } |
| else |
| { |
| /* Strict_low_part requires to reload the register |
| not the sub-register. In this case we should |
| check that a final reload hard reg can hold the |
| value mode. */ |
| if (curr_static_id->operand[nop].strict_low |
| && REG_P (op) |
| && hard_regno[nop] < 0 |
| && GET_CODE (*curr_id->operand_loc[nop]) == SUBREG |
| && ira_class_hard_regs_num[this_alternative] > 0 |
| && (!targetm.hard_regno_mode_ok |
| (ira_class_hard_regs[this_alternative][0], |
| GET_MODE (*curr_id->operand_loc[nop])))) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " alt=%d: Strict low subreg reload -- refuse\n", |
| nalt); |
| goto fail; |
| } |
| losers++; |
| } |
| if (operand_reg[nop] != NULL_RTX |
| /* Output operands and matched input operands are |
| not inherited. The following conditions do not |
| exactly describe the previous statement but they |
| are pretty close. */ |
| && curr_static_id->operand[nop].type != OP_OUT |
| && (this_alternative_matches < 0 |
| || curr_static_id->operand[nop].type != OP_IN)) |
| { |
| int last_reload = (lra_reg_info[ORIGINAL_REGNO |
| (operand_reg[nop])] |
| .last_reload); |
| |
| /* The value of reload_sum has sense only if we |
| process insns in their order. It happens only on |
| the first constraints sub-pass when we do most of |
| reload work. */ |
| if (lra_constraint_iter == 1 && last_reload > bb_reload_num) |
| reload_sum += last_reload - bb_reload_num; |
| } |
| /* If this is a constant that is reloaded into the |
| desired class by copying it to memory first, count |
| that as another reload. This is consistent with |
| other code and is required to avoid choosing another |
| alternative when the constant is moved into memory. |
| Note that the test here is precisely the same as in |
| the code below that calls force_const_mem. */ |
| if (CONST_POOL_OK_P (mode, op) |
| && ((targetm.preferred_reload_class |
| (op, this_alternative) == NO_REGS) |
| || no_input_reloads_p)) |
| { |
| const_to_mem = 1; |
| if (! no_regs_p) |
| losers++; |
| } |
| |
| /* Alternative loses if it requires a type of reload not |
| permitted for this insn. We can always reload |
| objects with a REG_UNUSED note. */ |
| if ((curr_static_id->operand[nop].type != OP_IN |
| && no_output_reloads_p |
| && ! find_reg_note (curr_insn, REG_UNUSED, op)) |
| || (curr_static_id->operand[nop].type != OP_OUT |
| && no_input_reloads_p && ! const_to_mem) |
| || (this_alternative_matches >= 0 |
| && (no_input_reloads_p |
| || (no_output_reloads_p |
| && (curr_static_id->operand |
| [this_alternative_matches].type != OP_IN) |
| && ! find_reg_note (curr_insn, REG_UNUSED, |
| no_subreg_reg_operand |
| [this_alternative_matches]))))) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " alt=%d: No input/output reload -- refuse\n", |
| nalt); |
| goto fail; |
| } |
| |
| /* Alternative loses if it required class pseudo cannot |
| hold value of required mode. Such insns can be |
| described by insn definitions with mode iterators. */ |
| if (GET_MODE (*curr_id->operand_loc[nop]) != VOIDmode |
| && ! hard_reg_set_empty_p (this_alternative_set) |
| /* It is common practice for constraints to use a |
| class which does not have actually enough regs to |
| hold the value (e.g. x86 AREG for mode requiring |
| more one general reg). Therefore we have 2 |
| conditions to check that the reload pseudo cannot |
| hold the mode value. */ |
| && (!targetm.hard_regno_mode_ok |
| (ira_class_hard_regs[this_alternative][0], |
| GET_MODE (*curr_id->operand_loc[nop]))) |
| /* The above condition is not enough as the first |
| reg in ira_class_hard_regs can be not aligned for |
| multi-words mode values. */ |
| && (prohibited_class_reg_set_mode_p |
| (this_alternative, this_alternative_set, |
| GET_MODE (*curr_id->operand_loc[nop])))) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " alt=%d: reload pseudo for op %d " |
| "cannot hold the mode value -- refuse\n", |
| nalt, nop); |
| goto fail; |
| } |
| |
| /* Check strong discouragement of reload of non-constant |
| into class THIS_ALTERNATIVE. */ |
| if (! CONSTANT_P (op) && ! no_regs_p |
| && (targetm.preferred_reload_class |
| (op, this_alternative) == NO_REGS |
| || (curr_static_id->operand[nop].type == OP_OUT |
| && (targetm.preferred_output_reload_class |
| (op, this_alternative) == NO_REGS)))) |
| { |
| if (offmemok && REG_P (op)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Spill pseudo into memory: reject+=3\n", |
| nop); |
| reject += 3; |
| } |
| else |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Non-prefered reload: reject+=%d\n", |
| nop, LRA_MAX_REJECT); |
| reject += LRA_MAX_REJECT; |
| } |
| } |
| |
| if (! (MEM_P (op) && offmemok) |
| && ! (const_to_mem && constmemok)) |
| { |
| /* We prefer to reload pseudos over reloading other |
| things, since such reloads may be able to be |
| eliminated later. So bump REJECT in other cases. |
| Don't do this in the case where we are forcing a |
| constant into memory and it will then win since |
| we don't want to have a different alternative |
| match then. */ |
| if (! (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Non-pseudo reload: reject+=2\n", |
| nop); |
| reject += 2; |
| } |
| |
| if (! no_regs_p) |
| reload_nregs |
| += ira_reg_class_max_nregs[this_alternative][mode]; |
| |
| if (SMALL_REGISTER_CLASS_P (this_alternative)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Small class reload: reject+=%d\n", |
| nop, LRA_LOSER_COST_FACTOR / 2); |
| reject += LRA_LOSER_COST_FACTOR / 2; |
| } |
| } |
| |
| /* We are trying to spill pseudo into memory. It is |
| usually more costly than moving to a hard register |
| although it might takes the same number of |
| reloads. |
| |
| Non-pseudo spill may happen also. Suppose a target allows both |
| register and memory in the operand constraint alternatives, |
| then it's typical that an eliminable register has a substition |
| of "base + offset" which can either be reloaded by a simple |
| "new_reg <= base + offset" which will match the register |
| constraint, or a similar reg addition followed by further spill |
| to and reload from memory which will match the memory |
| constraint, but this memory spill will be much more costly |
| usually. |
| |
| Code below increases the reject for both pseudo and non-pseudo |
| spill. */ |
| if (no_regs_p |
| && !(MEM_P (op) && offmemok) |
| && !(REG_P (op) && hard_regno[nop] < 0)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Spill %spseudo into memory: reject+=3\n", |
| nop, REG_P (op) ? "" : "Non-"); |
| reject += 3; |
| if (VECTOR_MODE_P (mode)) |
| { |
| /* Spilling vectors into memory is usually more |
| costly as they contain big values. */ |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Spill vector pseudo: reject+=2\n", |
| nop); |
| reject += 2; |
| } |
| } |
| |
| /* When we use an operand requiring memory in given |
| alternative, the insn should write *and* read the |
| value to/from memory it is costly in comparison with |
| an insn alternative which does not use memory |
| (e.g. register or immediate operand). We exclude |
| memory operand for such case as we can satisfy the |
| memory constraints by reloading address. */ |
| if (no_regs_p && offmemok && !MEM_P (op)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " Using memory insn operand %d: reject+=3\n", |
| nop); |
| reject += 3; |
| } |
| |
| /* If reload requires moving value through secondary |
| memory, it will need one more insn at least. */ |
| if (this_alternative != NO_REGS |
| && REG_P (op) && (cl = get_reg_class (REGNO (op))) != NO_REGS |
| && ((curr_static_id->operand[nop].type != OP_OUT |
| && targetm.secondary_memory_needed (GET_MODE (op), cl, |
| this_alternative)) |
| || (curr_static_id->operand[nop].type != OP_IN |
| && (targetm.secondary_memory_needed |
| (GET_MODE (op), this_alternative, cl))))) |
| losers++; |
| |
| if (MEM_P (op) && offmemok) |
| addr_losers++; |
| else |
| { |
| /* Input reloads can be inherited more often than |
| output reloads can be removed, so penalize output |
| reloads. */ |
| if (!REG_P (op) || curr_static_id->operand[nop].type != OP_IN) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Non input pseudo reload: reject++\n", |
| nop); |
| reject++; |
| } |
| |
| if (curr_static_id->operand[nop].type == OP_INOUT) |
| { |
| if (lra_dump_file != NULL) |
| fprintf |
| (lra_dump_file, |
| " %d Input/Output reload: reject+=%d\n", |
| nop, LRA_LOSER_COST_FACTOR); |
| reject += LRA_LOSER_COST_FACTOR; |
| } |
| } |
| } |
| |
| if (early_clobber_p && ! scratch_p) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " %d Early clobber: reject++\n", nop); |
| reject++; |
| } |
| /* ??? We check early clobbers after processing all operands |
| (see loop below) and there we update the costs more. |
| Should we update the cost (may be approximately) here |
| because of early clobber register reloads or it is a rare |
| or non-important thing to be worth to do it. */ |
| overall = (losers * LRA_LOSER_COST_FACTOR + reject |
| - (addr_losers == losers ? static_reject : 0)); |
| if ((best_losers == 0 || losers != 0) && best_overall < overall) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " alt=%d,overall=%d,losers=%d -- refuse\n", |
| nalt, overall, losers); |
| goto fail; |
| } |
| |
| if (update_and_check_small_class_inputs (nop, nalt, |
| this_alternative)) |
| { |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " alt=%d, not enough small class regs -- refuse\n", |
| nalt); |
| goto fail; |
| } |
| curr_alt[nop] = this_alternative; |
| curr_alt_set[nop] = this_alternative_set; |
| curr_alt_win[nop] = this_alternative_win; |
| curr_alt_match_win[nop] = this_alternative_match_win; |
| curr_alt_offmemok[nop] = this_alternative_offmemok; |
| curr_alt_matches[nop] = this_alternative_matches; |
| |
| if (this_alternative_matches >= 0 |
| && !did_match && !this_alternative_win) |
| curr_alt_win[this_alternative_matches] = false; |
| |
| if (early_clobber_p && operand_reg[nop] != NULL_RTX) |
| early_clobbered_nops[early_clobbered_regs_num++] = nop; |
| } |
| |
| if (curr_insn_set != NULL_RTX && n_operands == 2 |
| /* Prevent processing non-move insns. */ |
| && (GET_CODE (SET_SRC (curr_insn_set)) == SUBREG |
| || SET_SRC (curr_insn_set) == no_subreg_reg_operand[1]) |
| && ((! curr_alt_win[0] && ! curr_alt_win[1] |
| && REG_P (no_subreg_reg_operand[0]) |
| && REG_P (no_subreg_reg_operand[1]) |
| && (reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1]) |
| || reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0]))) |
| || (! curr_alt_win[0] && curr_alt_win[1] |
| && REG_P (no_subreg_reg_operand[1]) |
| /* Check that we reload memory not the memory |
| address. */ |
| && ! (curr_alt_offmemok[0] |
| && MEM_P (no_subreg_reg_operand[0])) |
| && reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0])) |
| || (curr_alt_win[0] && ! curr_alt_win[1] |
| && REG_P (no_subreg_reg_operand[0]) |
| /* Check that we reload memory not the memory |
| address. */ |
| && ! (curr_alt_offmemok[1] |
| && MEM_P (no_subreg_reg_operand[1])) |
| && reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1]) |
| && (! CONST_POOL_OK_P (curr_operand_mode[1], |
| no_subreg_reg_operand[1]) |
| || (targetm.preferred_reload_class |
| (no_subreg_reg_operand[1], |
| (enum reg_class) curr_alt[1]) != NO_REGS)) |
| /* If it is a result of recent elimination in move |
| insn we can transform it into an add still by |
| using this alternative. */ |
| && GET_CODE (no_subreg_reg_operand[1]) != PLUS |
| /* Likewise if the source has been replaced with an |
| equivalent value. This only happens once -- the reload |
| will use the equivalent value instead of the register it |
| replaces -- so there should be no danger of cycling. */ |
| && !equiv_substition_p[1]))) |
| { |
| /* We have a move insn and a new reload insn will be similar |
| to the current insn. We should avoid such situation as |
| it results in LRA cycling. */ |
| if (lra_dump_file != NULL) |
| fprintf (lra_dump_file, |
| " Cycle danger: overall += LRA_MAX_REJECT\n"); |
| overall += LRA_MAX_REJECT; |
| } |
| ok_p = true; |
| curr_alt_dont_inherit_ops_num = 0; |
| for (nop = 0; nop < early_clobbered_regs_num; nop++) |
| { |
| int i, j, clobbered_hard_regno, first_conflict_j, last_conflict_j; |
| HARD_REG_SET temp_set; |
| |
| i = early_clobbered_nops[nop]; |
| if ((! curr_alt_win[i] && ! curr_alt_match_win[i]) |
| || hard_regno[i] < 0) |
| continue; |
| lra_assert (operand_reg[i] != NULL_RTX); |
| clobbered_hard_regno = hard_regno[i]; |
| CLEAR_HARD_REG_SET (temp_set); |
| add_to_hard_reg_set (&temp_set, biggest_mode[i], clobbered_hard_regno); |
| first_conflict_j = last_conflict_j = -1; |
| for (j = 0; j < n_operands; j++) |
| if (j == i |
| /* We don't want process insides of match_operator and |
| match_parallel because otherwise we would process |
| their operands once again generating a wrong |
| code. */ |
| || curr_static_id->operand[j].is_operator) |
| continue; |
| else if ((curr_alt_matches[j] == i && curr_alt_match_win[j]) |
| || (curr_alt_matches[i] == j && curr_alt_match_win[i])) |
| continue; |
| /* If we don't reload j-th operand, check conflicts. */ |
| else if ((curr_alt_win[j] || curr_alt_match_win[j]) |
| && uses_hard_regs_p (*curr_id->operand_loc[j], temp_set)) |
| { |
| if (first_conflict_j < 0) |
| first_conflict_j = j; |
| last_conflict_j = j; |
| /* Both the earlyclobber operand and conflicting operand |
| cannot both be user defined hard registers. */ |
| if (HARD_REGISTER_P (operand_reg |