| /* Integrated Register Allocator (IRA) entry point. |
| Copyright (C) 2006-2020 Free Software Foundation, Inc. |
| Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* The integrated register allocator (IRA) is a |
| regional register allocator performing graph coloring on a top-down |
| traversal of nested regions. Graph coloring in a region is based |
| on Chaitin-Briggs algorithm. It is called integrated because |
| register coalescing, register live range splitting, and choosing a |
| better hard register are done on-the-fly during coloring. Register |
| coalescing and choosing a cheaper hard register is done by hard |
| register preferencing during hard register assigning. The live |
| range splitting is a byproduct of the regional register allocation. |
| |
| Major IRA notions are: |
| |
| o *Region* is a part of CFG where graph coloring based on |
| Chaitin-Briggs algorithm is done. IRA can work on any set of |
| nested CFG regions forming a tree. Currently the regions are |
| the entire function for the root region and natural loops for |
| the other regions. Therefore data structure representing a |
| region is called loop_tree_node. |
| |
| o *Allocno class* is a register class used for allocation of |
| given allocno. It means that only hard register of given |
| register class can be assigned to given allocno. In reality, |
| even smaller subset of (*profitable*) hard registers can be |
| assigned. In rare cases, the subset can be even smaller |
| because our modification of Chaitin-Briggs algorithm requires |
| that sets of hard registers can be assigned to allocnos forms a |
| forest, i.e. the sets can be ordered in a way where any |
| previous set is not intersected with given set or is a superset |
| of given set. |
| |
| o *Pressure class* is a register class belonging to a set of |
| register classes containing all of the hard-registers available |
| for register allocation. The set of all pressure classes for a |
| target is defined in the corresponding machine-description file |
| according some criteria. Register pressure is calculated only |
| for pressure classes and it affects some IRA decisions as |
| forming allocation regions. |
| |
| o *Allocno* represents the live range of a pseudo-register in a |
| region. Besides the obvious attributes like the corresponding |
| pseudo-register number, allocno class, conflicting allocnos and |
| conflicting hard-registers, there are a few allocno attributes |
| which are important for understanding the allocation algorithm: |
| |
| - *Live ranges*. This is a list of ranges of *program points* |
| where the allocno lives. Program points represent places |
| where a pseudo can be born or become dead (there are |
| approximately two times more program points than the insns) |
| and they are represented by integers starting with 0. The |
| live ranges are used to find conflicts between allocnos. |
| They also play very important role for the transformation of |
| the IRA internal representation of several regions into a one |
| region representation. The later is used during the reload |
| pass work because each allocno represents all of the |
| corresponding pseudo-registers. |
| |
| - *Hard-register costs*. This is a vector of size equal to the |
| number of available hard-registers of the allocno class. The |
| cost of a callee-clobbered hard-register for an allocno is |
| increased by the cost of save/restore code around the calls |
| through the given allocno's life. If the allocno is a move |
| instruction operand and another operand is a hard-register of |
| the allocno class, the cost of the hard-register is decreased |
| by the move cost. |
| |
| When an allocno is assigned, the hard-register with minimal |
| full cost is used. Initially, a hard-register's full cost is |
| the corresponding value from the hard-register's cost vector. |
| If the allocno is connected by a *copy* (see below) to |
| another allocno which has just received a hard-register, the |
| cost of the hard-register is decreased. Before choosing a |
| hard-register for an allocno, the allocno's current costs of |
| the hard-registers are modified by the conflict hard-register |
| costs of all of the conflicting allocnos which are not |
| assigned yet. |
| |
| - *Conflict hard-register costs*. This is a vector of the same |
| size as the hard-register costs vector. To permit an |
| unassigned allocno to get a better hard-register, IRA uses |
| this vector to calculate the final full cost of the |
| available hard-registers. Conflict hard-register costs of an |
| unassigned allocno are also changed with a change of the |
| hard-register cost of the allocno when a copy involving the |
| allocno is processed as described above. This is done to |
| show other unassigned allocnos that a given allocno prefers |
| some hard-registers in order to remove the move instruction |
| corresponding to the copy. |
| |
| o *Cap*. If a pseudo-register does not live in a region but |
| lives in a nested region, IRA creates a special allocno called |
| a cap in the outer region. A region cap is also created for a |
| subregion cap. |
| |
| o *Copy*. Allocnos can be connected by copies. Copies are used |
| to modify hard-register costs for allocnos during coloring. |
| Such modifications reflects a preference to use the same |
| hard-register for the allocnos connected by copies. Usually |
| copies are created for move insns (in this case it results in |
| register coalescing). But IRA also creates copies for operands |
| of an insn which should be assigned to the same hard-register |
| due to constraints in the machine description (it usually |
| results in removing a move generated in reload to satisfy |
| the constraints) and copies referring to the allocno which is |
| the output operand of an instruction and the allocno which is |
| an input operand dying in the instruction (creation of such |
| copies results in less register shuffling). IRA *does not* |
| create copies between the same register allocnos from different |
| regions because we use another technique for propagating |
| hard-register preference on the borders of regions. |
| |
| Allocnos (including caps) for the upper region in the region tree |
| *accumulate* information important for coloring from allocnos with |
| the same pseudo-register from nested regions. This includes |
| hard-register and memory costs, conflicts with hard-registers, |
| allocno conflicts, allocno copies and more. *Thus, attributes for |
| allocnos in a region have the same values as if the region had no |
| subregions*. It means that attributes for allocnos in the |
| outermost region corresponding to the function have the same values |
| as though the allocation used only one region which is the entire |
| function. It also means that we can look at IRA work as if the |
| first IRA did allocation for all function then it improved the |
| allocation for loops then their subloops and so on. |
| |
| IRA major passes are: |
| |
| o Building IRA internal representation which consists of the |
| following subpasses: |
| |
| * First, IRA builds regions and creates allocnos (file |
| ira-build.c) and initializes most of their attributes. |
| |
| * Then IRA finds an allocno class for each allocno and |
| calculates its initial (non-accumulated) cost of memory and |
| each hard-register of its allocno class (file ira-cost.c). |
| |
| * IRA creates live ranges of each allocno, calculates register |
| pressure for each pressure class in each region, sets up |
| conflict hard registers for each allocno and info about calls |
| the allocno lives through (file ira-lives.c). |
| |
| * IRA removes low register pressure loops from the regions |
| mostly to speed IRA up (file ira-build.c). |
| |
| * IRA propagates accumulated allocno info from lower region |
| allocnos to corresponding upper region allocnos (file |
| ira-build.c). |
| |
| * IRA creates all caps (file ira-build.c). |
| |
| * Having live-ranges of allocnos and their classes, IRA creates |
| conflicting allocnos for each allocno. Conflicting allocnos |
| are stored as a bit vector or array of pointers to the |
| conflicting allocnos whatever is more profitable (file |
| ira-conflicts.c). At this point IRA creates allocno copies. |
| |
| o Coloring. Now IRA has all necessary info to start graph coloring |
| process. It is done in each region on top-down traverse of the |
| region tree (file ira-color.c). There are following subpasses: |
| |
| * Finding profitable hard registers of corresponding allocno |
| class for each allocno. For example, only callee-saved hard |
| registers are frequently profitable for allocnos living |
| through colors. If the profitable hard register set of |
| allocno does not form a tree based on subset relation, we use |
| some approximation to form the tree. This approximation is |
| used to figure out trivial colorability of allocnos. The |
| approximation is a pretty rare case. |
| |
| * Putting allocnos onto the coloring stack. IRA uses Briggs |
| optimistic coloring which is a major improvement over |
| Chaitin's coloring. Therefore IRA does not spill allocnos at |
| this point. There is some freedom in the order of putting |
| allocnos on the stack which can affect the final result of |
| the allocation. IRA uses some heuristics to improve the |
| order. The major one is to form *threads* from colorable |
| allocnos and push them on the stack by threads. Thread is a |
| set of non-conflicting colorable allocnos connected by |
| copies. The thread contains allocnos from the colorable |
| bucket or colorable allocnos already pushed onto the coloring |
| stack. Pushing thread allocnos one after another onto the |
| stack increases chances of removing copies when the allocnos |
| get the same hard reg. |
| |
| We also use a modification of Chaitin-Briggs algorithm which |
| works for intersected register classes of allocnos. To |
| figure out trivial colorability of allocnos, the mentioned |
| above tree of hard register sets is used. To get an idea how |
| the algorithm works in i386 example, let us consider an |
| allocno to which any general hard register can be assigned. |
| If the allocno conflicts with eight allocnos to which only |
| EAX register can be assigned, given allocno is still |
| trivially colorable because all conflicting allocnos might be |
| assigned only to EAX and all other general hard registers are |
| still free. |
| |
| To get an idea of the used trivial colorability criterion, it |
| is also useful to read article "Graph-Coloring Register |
| Allocation for Irregular Architectures" by Michael D. Smith |
| and Glen Holloway. Major difference between the article |
| approach and approach used in IRA is that Smith's approach |
| takes register classes only from machine description and IRA |
| calculate register classes from intermediate code too |
| (e.g. an explicit usage of hard registers in RTL code for |
| parameter passing can result in creation of additional |
| register classes which contain or exclude the hard |
| registers). That makes IRA approach useful for improving |
| coloring even for architectures with regular register files |
| and in fact some benchmarking shows the improvement for |
| regular class architectures is even bigger than for irregular |
| ones. Another difference is that Smith's approach chooses |
| intersection of classes of all insn operands in which a given |
| pseudo occurs. IRA can use bigger classes if it is still |
| more profitable than memory usage. |
| |
| * Popping the allocnos from the stack and assigning them hard |
| registers. If IRA cannot assign a hard register to an |
| allocno and the allocno is coalesced, IRA undoes the |
| coalescing and puts the uncoalesced allocnos onto the stack in |
| the hope that some such allocnos will get a hard register |
| separately. If IRA fails to assign hard register or memory |
| is more profitable for it, IRA spills the allocno. IRA |
| assigns the allocno the hard-register with minimal full |
| allocation cost which reflects the cost of usage of the |
| hard-register for the allocno and cost of usage of the |
| hard-register for allocnos conflicting with given allocno. |
| |
| * Chaitin-Briggs coloring assigns as many pseudos as possible |
| to hard registers. After coloring we try to improve |
| allocation with cost point of view. We improve the |
| allocation by spilling some allocnos and assigning the freed |
| hard registers to other allocnos if it decreases the overall |
| allocation cost. |
| |
| * After allocno assigning in the region, IRA modifies the hard |
| register and memory costs for the corresponding allocnos in |
| the subregions to reflect the cost of possible loads, stores, |
| or moves on the border of the region and its subregions. |
| When default regional allocation algorithm is used |
| (-fira-algorithm=mixed), IRA just propagates the assignment |
| for allocnos if the register pressure in the region for the |
| corresponding pressure class is less than number of available |
| hard registers for given pressure class. |
| |
| o Spill/restore code moving. When IRA performs an allocation |
| by traversing regions in top-down order, it does not know what |
| happens below in the region tree. Therefore, sometimes IRA |
| misses opportunities to perform a better allocation. A simple |
| optimization tries to improve allocation in a region having |
| subregions and containing in another region. If the |
| corresponding allocnos in the subregion are spilled, it spills |
| the region allocno if it is profitable. The optimization |
| implements a simple iterative algorithm performing profitable |
| transformations while they are still possible. It is fast in |
| practice, so there is no real need for a better time complexity |
| algorithm. |
| |
| o Code change. After coloring, two allocnos representing the |
| same pseudo-register outside and inside a region respectively |
| may be assigned to different locations (hard-registers or |
| memory). In this case IRA creates and uses a new |
| pseudo-register inside the region and adds code to move allocno |
| values on the region's borders. This is done during top-down |
| traversal of the regions (file ira-emit.c). In some |
| complicated cases IRA can create a new allocno to move allocno |
| values (e.g. when a swap of values stored in two hard-registers |
| is needed). At this stage, the new allocno is marked as |
| spilled. IRA still creates the pseudo-register and the moves |
| on the region borders even when both allocnos were assigned to |
| the same hard-register. If the reload pass spills a |
| pseudo-register for some reason, the effect will be smaller |
| because another allocno will still be in the hard-register. In |
| most cases, this is better then spilling both allocnos. If |
| reload does not change the allocation for the two |
| pseudo-registers, the trivial move will be removed by |
| post-reload optimizations. IRA does not generate moves for |
| allocnos assigned to the same hard register when the default |
| regional allocation algorithm is used and the register pressure |
| in the region for the corresponding pressure class is less than |
| number of available hard registers for given pressure class. |
| IRA also does some optimizations to remove redundant stores and |
| to reduce code duplication on the region borders. |
| |
| o Flattening internal representation. After changing code, IRA |
| transforms its internal representation for several regions into |
| one region representation (file ira-build.c). This process is |
| called IR flattening. Such process is more complicated than IR |
| rebuilding would be, but is much faster. |
| |
| o After IR flattening, IRA tries to assign hard registers to all |
| spilled allocnos. This is implemented by a simple and fast |
| priority coloring algorithm (see function |
| ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos |
| created during the code change pass can be assigned to hard |
| registers. |
| |
| o At the end IRA calls the reload pass. The reload pass |
| communicates with IRA through several functions in file |
| ira-color.c to improve its decisions in |
| |
| * sharing stack slots for the spilled pseudos based on IRA info |
| about pseudo-register conflicts. |
| |
| * reassigning hard-registers to all spilled pseudos at the end |
| of each reload iteration. |
| |
| * choosing a better hard-register to spill based on IRA info |
| about pseudo-register live ranges and the register pressure |
| in places where the pseudo-register lives. |
| |
| IRA uses a lot of data representing the target processors. These |
| data are initialized in file ira.c. |
| |
| If function has no loops (or the loops are ignored when |
| -fira-algorithm=CB is used), we have classic Chaitin-Briggs |
| coloring (only instead of separate pass of coalescing, we use hard |
| register preferencing). In such case, IRA works much faster |
| because many things are not made (like IR flattening, the |
| spill/restore optimization, and the code change). |
| |
| Literature is worth to read for better understanding the code: |
| |
| o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to |
| Graph Coloring Register Allocation. |
| |
| o David Callahan, Brian Koblenz. Register allocation via |
| hierarchical graph coloring. |
| |
| o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph |
| Coloring Register Allocation: A Study of the Chaitin-Briggs and |
| Callahan-Koblenz Algorithms. |
| |
| o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global |
| Register Allocation Based on Graph Fusion. |
| |
| o Michael D. Smith and Glenn Holloway. Graph-Coloring Register |
| Allocation for Irregular Architectures |
| |
| o Vladimir Makarov. The Integrated Register Allocator for GCC. |
| |
| o Vladimir Makarov. The top-down register allocator for irregular |
| register file architectures. |
| |
| */ |
| |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "df.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "insn-config.h" |
| #include "regs.h" |
| #include "ira.h" |
| #include "ira-int.h" |
| #include "diagnostic-core.h" |
| #include "cfgrtl.h" |
| #include "cfgbuild.h" |
| #include "cfgcleanup.h" |
| #include "expr.h" |
| #include "tree-pass.h" |
| #include "output.h" |
| #include "reload.h" |
| #include "cfgloop.h" |
| #include "lra.h" |
| #include "dce.h" |
| #include "dbgcnt.h" |
| #include "rtl-iter.h" |
| #include "shrink-wrap.h" |
| #include "print-rtl.h" |
| |
| struct target_ira default_target_ira; |
| class target_ira_int default_target_ira_int; |
| #if SWITCHABLE_TARGET |
| struct target_ira *this_target_ira = &default_target_ira; |
| class target_ira_int *this_target_ira_int = &default_target_ira_int; |
| #endif |
| |
| /* A modified value of flag `-fira-verbose' used internally. */ |
| int internal_flag_ira_verbose; |
| |
| /* Dump file of the allocator if it is not NULL. */ |
| FILE *ira_dump_file; |
| |
| /* The number of elements in the following array. */ |
| int ira_spilled_reg_stack_slots_num; |
| |
| /* The following array contains info about spilled pseudo-registers |
| stack slots used in current function so far. */ |
| class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots; |
| |
| /* Correspondingly overall cost of the allocation, overall cost before |
| reload, cost of the allocnos assigned to hard-registers, cost of |
| the allocnos assigned to memory, cost of loads, stores and register |
| move insns generated for pseudo-register live range splitting (see |
| ira-emit.c). */ |
| int64_t ira_overall_cost, overall_cost_before; |
| int64_t ira_reg_cost, ira_mem_cost; |
| int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost; |
| int ira_move_loops_num, ira_additional_jumps_num; |
| |
| /* All registers that can be eliminated. */ |
| |
| HARD_REG_SET eliminable_regset; |
| |
| /* Value of max_reg_num () before IRA work start. This value helps |
| us to recognize a situation when new pseudos were created during |
| IRA work. */ |
| static int max_regno_before_ira; |
| |
| /* Temporary hard reg set used for a different calculation. */ |
| static HARD_REG_SET temp_hard_regset; |
| |
| #define last_mode_for_init_move_cost \ |
| (this_target_ira_int->x_last_mode_for_init_move_cost) |
| |
| |
| /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */ |
| static void |
| setup_reg_mode_hard_regset (void) |
| { |
| int i, m, hard_regno; |
| |
| for (m = 0; m < NUM_MACHINE_MODES; m++) |
| for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++) |
| { |
| CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]); |
| for (i = hard_regno_nregs (hard_regno, (machine_mode) m) - 1; |
| i >= 0; i--) |
| if (hard_regno + i < FIRST_PSEUDO_REGISTER) |
| SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m], |
| hard_regno + i); |
| } |
| } |
| |
| |
| #define no_unit_alloc_regs \ |
| (this_target_ira_int->x_no_unit_alloc_regs) |
| |
| /* The function sets up the three arrays declared above. */ |
| static void |
| setup_class_hard_regs (void) |
| { |
| int cl, i, hard_regno, n; |
| HARD_REG_SET processed_hard_reg_set; |
| |
| ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER); |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| { |
| temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| CLEAR_HARD_REG_SET (processed_hard_reg_set); |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| { |
| ira_non_ordered_class_hard_regs[cl][i] = -1; |
| ira_class_hard_reg_index[cl][i] = -1; |
| } |
| for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| { |
| #ifdef REG_ALLOC_ORDER |
| hard_regno = reg_alloc_order[i]; |
| #else |
| hard_regno = i; |
| #endif |
| if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno)) |
| continue; |
| SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno); |
| if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno)) |
| ira_class_hard_reg_index[cl][hard_regno] = -1; |
| else |
| { |
| ira_class_hard_reg_index[cl][hard_regno] = n; |
| ira_class_hard_regs[cl][n++] = hard_regno; |
| } |
| } |
| ira_class_hard_regs_num[cl] = n; |
| for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| if (TEST_HARD_REG_BIT (temp_hard_regset, i)) |
| ira_non_ordered_class_hard_regs[cl][n++] = i; |
| ira_assert (ira_class_hard_regs_num[cl] == n); |
| } |
| } |
| |
| /* Set up global variables defining info about hard registers for the |
| allocation. These depend on USE_HARD_FRAME_P whose TRUE value means |
| that we can use the hard frame pointer for the allocation. */ |
| static void |
| setup_alloc_regs (bool use_hard_frame_p) |
| { |
| #ifdef ADJUST_REG_ALLOC_ORDER |
| ADJUST_REG_ALLOC_ORDER; |
| #endif |
| no_unit_alloc_regs = fixed_nonglobal_reg_set; |
| if (! use_hard_frame_p) |
| add_to_hard_reg_set (&no_unit_alloc_regs, Pmode, |
| HARD_FRAME_POINTER_REGNUM); |
| setup_class_hard_regs (); |
| } |
| |
| |
| |
| #define alloc_reg_class_subclasses \ |
| (this_target_ira_int->x_alloc_reg_class_subclasses) |
| |
| /* Initialize the table of subclasses of each reg class. */ |
| static void |
| setup_reg_subclasses (void) |
| { |
| int i, j; |
| HARD_REG_SET temp_hard_regset2; |
| |
| for (i = 0; i < N_REG_CLASSES; i++) |
| for (j = 0; j < N_REG_CLASSES; j++) |
| alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES; |
| |
| for (i = 0; i < N_REG_CLASSES; i++) |
| { |
| if (i == (int) NO_REGS) |
| continue; |
| |
| temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs; |
| if (hard_reg_set_empty_p (temp_hard_regset)) |
| continue; |
| for (j = 0; j < N_REG_CLASSES; j++) |
| if (i != j) |
| { |
| enum reg_class *p; |
| |
| temp_hard_regset2 = reg_class_contents[j] & ~no_unit_alloc_regs; |
| if (! hard_reg_set_subset_p (temp_hard_regset, |
| temp_hard_regset2)) |
| continue; |
| p = &alloc_reg_class_subclasses[j][0]; |
| while (*p != LIM_REG_CLASSES) p++; |
| *p = (enum reg_class) i; |
| } |
| } |
| } |
| |
| |
| |
| /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST. */ |
| static void |
| setup_class_subset_and_memory_move_costs (void) |
| { |
| int cl, cl2, mode, cost; |
| HARD_REG_SET temp_hard_regset2; |
| |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| ira_memory_move_cost[mode][NO_REGS][0] |
| = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX; |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| { |
| if (cl != (int) NO_REGS) |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| { |
| ira_max_memory_move_cost[mode][cl][0] |
| = ira_memory_move_cost[mode][cl][0] |
| = memory_move_cost ((machine_mode) mode, |
| (reg_class_t) cl, false); |
| ira_max_memory_move_cost[mode][cl][1] |
| = ira_memory_move_cost[mode][cl][1] |
| = memory_move_cost ((machine_mode) mode, |
| (reg_class_t) cl, true); |
| /* Costs for NO_REGS are used in cost calculation on the |
| 1st pass when the preferred register classes are not |
| known yet. In this case we take the best scenario. */ |
| if (ira_memory_move_cost[mode][NO_REGS][0] |
| > ira_memory_move_cost[mode][cl][0]) |
| ira_max_memory_move_cost[mode][NO_REGS][0] |
| = ira_memory_move_cost[mode][NO_REGS][0] |
| = ira_memory_move_cost[mode][cl][0]; |
| if (ira_memory_move_cost[mode][NO_REGS][1] |
| > ira_memory_move_cost[mode][cl][1]) |
| ira_max_memory_move_cost[mode][NO_REGS][1] |
| = ira_memory_move_cost[mode][NO_REGS][1] |
| = ira_memory_move_cost[mode][cl][1]; |
| } |
| } |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--) |
| { |
| temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| temp_hard_regset2 = reg_class_contents[cl2] & ~no_unit_alloc_regs; |
| ira_class_subset_p[cl][cl2] |
| = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2); |
| if (! hard_reg_set_empty_p (temp_hard_regset2) |
| && hard_reg_set_subset_p (reg_class_contents[cl2], |
| reg_class_contents[cl])) |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| { |
| cost = ira_memory_move_cost[mode][cl2][0]; |
| if (cost > ira_max_memory_move_cost[mode][cl][0]) |
| ira_max_memory_move_cost[mode][cl][0] = cost; |
| cost = ira_memory_move_cost[mode][cl2][1]; |
| if (cost > ira_max_memory_move_cost[mode][cl][1]) |
| ira_max_memory_move_cost[mode][cl][1] = cost; |
| } |
| } |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| { |
| ira_memory_move_cost[mode][cl][0] |
| = ira_max_memory_move_cost[mode][cl][0]; |
| ira_memory_move_cost[mode][cl][1] |
| = ira_max_memory_move_cost[mode][cl][1]; |
| } |
| setup_reg_subclasses (); |
| } |
| |
| |
| |
| /* Define the following macro if allocation through malloc if |
| preferable. */ |
| #define IRA_NO_OBSTACK |
| |
| #ifndef IRA_NO_OBSTACK |
| /* Obstack used for storing all dynamic data (except bitmaps) of the |
| IRA. */ |
| static struct obstack ira_obstack; |
| #endif |
| |
| /* Obstack used for storing all bitmaps of the IRA. */ |
| static struct bitmap_obstack ira_bitmap_obstack; |
| |
| /* Allocate memory of size LEN for IRA data. */ |
| void * |
| ira_allocate (size_t len) |
| { |
| void *res; |
| |
| #ifndef IRA_NO_OBSTACK |
| res = obstack_alloc (&ira_obstack, len); |
| #else |
| res = xmalloc (len); |
| #endif |
| return res; |
| } |
| |
| /* Free memory ADDR allocated for IRA data. */ |
| void |
| ira_free (void *addr ATTRIBUTE_UNUSED) |
| { |
| #ifndef IRA_NO_OBSTACK |
| /* do nothing */ |
| #else |
| free (addr); |
| #endif |
| } |
| |
| |
| /* Allocate and returns bitmap for IRA. */ |
| bitmap |
| ira_allocate_bitmap (void) |
| { |
| return BITMAP_ALLOC (&ira_bitmap_obstack); |
| } |
| |
| /* Free bitmap B allocated for IRA. */ |
| void |
| ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED) |
| { |
| /* do nothing */ |
| } |
| |
| |
| |
| /* Output information about allocation of all allocnos (except for |
| caps) into file F. */ |
| void |
| ira_print_disposition (FILE *f) |
| { |
| int i, n, max_regno; |
| ira_allocno_t a; |
| basic_block bb; |
| |
| fprintf (f, "Disposition:"); |
| max_regno = max_reg_num (); |
| for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) |
| for (a = ira_regno_allocno_map[i]; |
| a != NULL; |
| a = ALLOCNO_NEXT_REGNO_ALLOCNO (a)) |
| { |
| if (n % 4 == 0) |
| fprintf (f, "\n"); |
| n++; |
| fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a)); |
| if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL) |
| fprintf (f, "b%-3d", bb->index); |
| else |
| fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num); |
| if (ALLOCNO_HARD_REGNO (a) >= 0) |
| fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a)); |
| else |
| fprintf (f, " mem"); |
| } |
| fprintf (f, "\n"); |
| } |
| |
| /* Outputs information about allocation of all allocnos into |
| stderr. */ |
| void |
| ira_debug_disposition (void) |
| { |
| ira_print_disposition (stderr); |
| } |
| |
| |
| |
| /* Set up ira_stack_reg_pressure_class which is the biggest pressure |
| register class containing stack registers or NO_REGS if there are |
| no stack registers. To find this class, we iterate through all |
| register pressure classes and choose the first register pressure |
| class containing all the stack registers and having the biggest |
| size. */ |
| static void |
| setup_stack_reg_pressure_class (void) |
| { |
| ira_stack_reg_pressure_class = NO_REGS; |
| #ifdef STACK_REGS |
| { |
| int i, best, size; |
| enum reg_class cl; |
| HARD_REG_SET temp_hard_regset2; |
| |
| CLEAR_HARD_REG_SET (temp_hard_regset); |
| for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) |
| SET_HARD_REG_BIT (temp_hard_regset, i); |
| best = 0; |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| cl = ira_pressure_classes[i]; |
| temp_hard_regset2 = temp_hard_regset & reg_class_contents[cl]; |
| size = hard_reg_set_size (temp_hard_regset2); |
| if (best < size) |
| { |
| best = size; |
| ira_stack_reg_pressure_class = cl; |
| } |
| } |
| } |
| #endif |
| } |
| |
| /* Find pressure classes which are register classes for which we |
| calculate register pressure in IRA, register pressure sensitive |
| insn scheduling, and register pressure sensitive loop invariant |
| motion. |
| |
| To make register pressure calculation easy, we always use |
| non-intersected register pressure classes. A move of hard |
| registers from one register pressure class is not more expensive |
| than load and store of the hard registers. Most likely an allocno |
| class will be a subset of a register pressure class and in many |
| cases a register pressure class. That makes usage of register |
| pressure classes a good approximation to find a high register |
| pressure. */ |
| static void |
| setup_pressure_classes (void) |
| { |
| int cost, i, n, curr; |
| int cl, cl2; |
| enum reg_class pressure_classes[N_REG_CLASSES]; |
| int m; |
| HARD_REG_SET temp_hard_regset2; |
| bool insert_p; |
| |
| if (targetm.compute_pressure_classes) |
| n = targetm.compute_pressure_classes (pressure_classes); |
| else |
| { |
| n = 0; |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| { |
| if (ira_class_hard_regs_num[cl] == 0) |
| continue; |
| if (ira_class_hard_regs_num[cl] != 1 |
| /* A register class without subclasses may contain a few |
| hard registers and movement between them is costly |
| (e.g. SPARC FPCC registers). We still should consider it |
| as a candidate for a pressure class. */ |
| && alloc_reg_class_subclasses[cl][0] < cl) |
| { |
| /* Check that the moves between any hard registers of the |
| current class are not more expensive for a legal mode |
| than load/store of the hard registers of the current |
| class. Such class is a potential candidate to be a |
| register pressure class. */ |
| for (m = 0; m < NUM_MACHINE_MODES; m++) |
| { |
| temp_hard_regset |
| = (reg_class_contents[cl] |
| & ~(no_unit_alloc_regs |
| | ira_prohibited_class_mode_regs[cl][m])); |
| if (hard_reg_set_empty_p (temp_hard_regset)) |
| continue; |
| ira_init_register_move_cost_if_necessary ((machine_mode) m); |
| cost = ira_register_move_cost[m][cl][cl]; |
| if (cost <= ira_max_memory_move_cost[m][cl][1] |
| || cost <= ira_max_memory_move_cost[m][cl][0]) |
| break; |
| } |
| if (m >= NUM_MACHINE_MODES) |
| continue; |
| } |
| curr = 0; |
| insert_p = true; |
| temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| /* Remove so far added pressure classes which are subset of the |
| current candidate class. Prefer GENERAL_REGS as a pressure |
| register class to another class containing the same |
| allocatable hard registers. We do this because machine |
| dependent cost hooks might give wrong costs for the latter |
| class but always give the right cost for the former class |
| (GENERAL_REGS). */ |
| for (i = 0; i < n; i++) |
| { |
| cl2 = pressure_classes[i]; |
| temp_hard_regset2 = (reg_class_contents[cl2] |
| & ~no_unit_alloc_regs); |
| if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2) |
| && (temp_hard_regset != temp_hard_regset2 |
| || cl2 == (int) GENERAL_REGS)) |
| { |
| pressure_classes[curr++] = (enum reg_class) cl2; |
| insert_p = false; |
| continue; |
| } |
| if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset) |
| && (temp_hard_regset2 != temp_hard_regset |
| || cl == (int) GENERAL_REGS)) |
| continue; |
| if (temp_hard_regset2 == temp_hard_regset) |
| insert_p = false; |
| pressure_classes[curr++] = (enum reg_class) cl2; |
| } |
| /* If the current candidate is a subset of a so far added |
| pressure class, don't add it to the list of the pressure |
| classes. */ |
| if (insert_p) |
| pressure_classes[curr++] = (enum reg_class) cl; |
| n = curr; |
| } |
| } |
| #ifdef ENABLE_IRA_CHECKING |
| { |
| HARD_REG_SET ignore_hard_regs; |
| |
| /* Check pressure classes correctness: here we check that hard |
| registers from all register pressure classes contains all hard |
| registers available for the allocation. */ |
| CLEAR_HARD_REG_SET (temp_hard_regset); |
| CLEAR_HARD_REG_SET (temp_hard_regset2); |
| ignore_hard_regs = no_unit_alloc_regs; |
| for (cl = 0; cl < LIM_REG_CLASSES; cl++) |
| { |
| /* For some targets (like MIPS with MD_REGS), there are some |
| classes with hard registers available for allocation but |
| not able to hold value of any mode. */ |
| for (m = 0; m < NUM_MACHINE_MODES; m++) |
| if (contains_reg_of_mode[cl][m]) |
| break; |
| if (m >= NUM_MACHINE_MODES) |
| { |
| ignore_hard_regs |= reg_class_contents[cl]; |
| continue; |
| } |
| for (i = 0; i < n; i++) |
| if ((int) pressure_classes[i] == cl) |
| break; |
| temp_hard_regset2 |= reg_class_contents[cl]; |
| if (i < n) |
| temp_hard_regset |= reg_class_contents[cl]; |
| } |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| /* Some targets (like SPARC with ICC reg) have allocatable regs |
| for which no reg class is defined. */ |
| if (REGNO_REG_CLASS (i) == NO_REGS) |
| SET_HARD_REG_BIT (ignore_hard_regs, i); |
| temp_hard_regset &= ~ignore_hard_regs; |
| temp_hard_regset2 &= ~ignore_hard_regs; |
| ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)); |
| } |
| #endif |
| ira_pressure_classes_num = 0; |
| for (i = 0; i < n; i++) |
| { |
| cl = (int) pressure_classes[i]; |
| ira_reg_pressure_class_p[cl] = true; |
| ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl; |
| } |
| setup_stack_reg_pressure_class (); |
| } |
| |
| /* Set up IRA_UNIFORM_CLASS_P. Uniform class is a register class |
| whose register move cost between any registers of the class is the |
| same as for all its subclasses. We use the data to speed up the |
| 2nd pass of calculations of allocno costs. */ |
| static void |
| setup_uniform_class_p (void) |
| { |
| int i, cl, cl2, m; |
| |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| { |
| ira_uniform_class_p[cl] = false; |
| if (ira_class_hard_regs_num[cl] == 0) |
| continue; |
| /* We cannot use alloc_reg_class_subclasses here because move |
| cost hooks does not take into account that some registers are |
| unavailable for the subtarget. E.g. for i686, INT_SSE_REGS |
| is element of alloc_reg_class_subclasses for GENERAL_REGS |
| because SSE regs are unavailable. */ |
| for (i = 0; (cl2 = reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; i++) |
| { |
| if (ira_class_hard_regs_num[cl2] == 0) |
| continue; |
| for (m = 0; m < NUM_MACHINE_MODES; m++) |
| if (contains_reg_of_mode[cl][m] && contains_reg_of_mode[cl2][m]) |
| { |
| ira_init_register_move_cost_if_necessary ((machine_mode) m); |
| if (ira_register_move_cost[m][cl][cl] |
| != ira_register_move_cost[m][cl2][cl2]) |
| break; |
| } |
| if (m < NUM_MACHINE_MODES) |
| break; |
| } |
| if (cl2 == LIM_REG_CLASSES) |
| ira_uniform_class_p[cl] = true; |
| } |
| } |
| |
| /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM, |
| IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM. |
| |
| Target may have many subtargets and not all target hard registers can |
| be used for allocation, e.g. x86 port in 32-bit mode cannot use |
| hard registers introduced in x86-64 like r8-r15). Some classes |
| might have the same allocatable hard registers, e.g. INDEX_REGS |
| and GENERAL_REGS in x86 port in 32-bit mode. To decrease different |
| calculations efforts we introduce allocno classes which contain |
| unique non-empty sets of allocatable hard-registers. |
| |
| Pseudo class cost calculation in ira-costs.c is very expensive. |
| Therefore we are trying to decrease number of classes involved in |
| such calculation. Register classes used in the cost calculation |
| are called important classes. They are allocno classes and other |
| non-empty classes whose allocatable hard register sets are inside |
| of an allocno class hard register set. From the first sight, it |
| looks like that they are just allocno classes. It is not true. In |
| example of x86-port in 32-bit mode, allocno classes will contain |
| GENERAL_REGS but not LEGACY_REGS (because allocatable hard |
| registers are the same for the both classes). The important |
| classes will contain GENERAL_REGS and LEGACY_REGS. It is done |
| because a machine description insn constraint may refers for |
| LEGACY_REGS and code in ira-costs.c is mostly base on investigation |
| of the insn constraints. */ |
| static void |
| setup_allocno_and_important_classes (void) |
| { |
| int i, j, n, cl; |
| bool set_p; |
| HARD_REG_SET temp_hard_regset2; |
| static enum reg_class classes[LIM_REG_CLASSES + 1]; |
| |
| n = 0; |
| /* Collect classes which contain unique sets of allocatable hard |
| registers. Prefer GENERAL_REGS to other classes containing the |
| same set of hard registers. */ |
| for (i = 0; i < LIM_REG_CLASSES; i++) |
| { |
| temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs; |
| for (j = 0; j < n; j++) |
| { |
| cl = classes[j]; |
| temp_hard_regset2 = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| if (temp_hard_regset == temp_hard_regset2) |
| break; |
| } |
| if (j >= n || targetm.additional_allocno_class_p (i)) |
| classes[n++] = (enum reg_class) i; |
| else if (i == GENERAL_REGS) |
| /* Prefer general regs. For i386 example, it means that |
| we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS |
| (all of them consists of the same available hard |
| registers). */ |
| classes[j] = (enum reg_class) i; |
| } |
| classes[n] = LIM_REG_CLASSES; |
| |
| /* Set up classes which can be used for allocnos as classes |
| containing non-empty unique sets of allocatable hard |
| registers. */ |
| ira_allocno_classes_num = 0; |
| for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++) |
| if (ira_class_hard_regs_num[cl] > 0) |
| ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl; |
| ira_important_classes_num = 0; |
| /* Add non-allocno classes containing to non-empty set of |
| allocatable hard regs. */ |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| if (ira_class_hard_regs_num[cl] > 0) |
| { |
| temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| set_p = false; |
| for (j = 0; j < ira_allocno_classes_num; j++) |
| { |
| temp_hard_regset2 = (reg_class_contents[ira_allocno_classes[j]] |
| & ~no_unit_alloc_regs); |
| if ((enum reg_class) cl == ira_allocno_classes[j]) |
| break; |
| else if (hard_reg_set_subset_p (temp_hard_regset, |
| temp_hard_regset2)) |
| set_p = true; |
| } |
| if (set_p && j >= ira_allocno_classes_num) |
| ira_important_classes[ira_important_classes_num++] |
| = (enum reg_class) cl; |
| } |
| /* Now add allocno classes to the important classes. */ |
| for (j = 0; j < ira_allocno_classes_num; j++) |
| ira_important_classes[ira_important_classes_num++] |
| = ira_allocno_classes[j]; |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| { |
| ira_reg_allocno_class_p[cl] = false; |
| ira_reg_pressure_class_p[cl] = false; |
| } |
| for (j = 0; j < ira_allocno_classes_num; j++) |
| ira_reg_allocno_class_p[ira_allocno_classes[j]] = true; |
| setup_pressure_classes (); |
| setup_uniform_class_p (); |
| } |
| |
| /* Setup translation in CLASS_TRANSLATE of all classes into a class |
| given by array CLASSES of length CLASSES_NUM. The function is used |
| make translation any reg class to an allocno class or to an |
| pressure class. This translation is necessary for some |
| calculations when we can use only allocno or pressure classes and |
| such translation represents an approximate representation of all |
| classes. |
| |
| The translation in case when allocatable hard register set of a |
| given class is subset of allocatable hard register set of a class |
| in CLASSES is pretty simple. We use smallest classes from CLASSES |
| containing a given class. If allocatable hard register set of a |
| given class is not a subset of any corresponding set of a class |
| from CLASSES, we use the cheapest (with load/store point of view) |
| class from CLASSES whose set intersects with given class set. */ |
| static void |
| setup_class_translate_array (enum reg_class *class_translate, |
| int classes_num, enum reg_class *classes) |
| { |
| int cl, mode; |
| enum reg_class aclass, best_class, *cl_ptr; |
| int i, cost, min_cost, best_cost; |
| |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| class_translate[cl] = NO_REGS; |
| |
| for (i = 0; i < classes_num; i++) |
| { |
| aclass = classes[i]; |
| for (cl_ptr = &alloc_reg_class_subclasses[aclass][0]; |
| (cl = *cl_ptr) != LIM_REG_CLASSES; |
| cl_ptr++) |
| if (class_translate[cl] == NO_REGS) |
| class_translate[cl] = aclass; |
| class_translate[aclass] = aclass; |
| } |
| /* For classes which are not fully covered by one of given classes |
| (in other words covered by more one given class), use the |
| cheapest class. */ |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| { |
| if (cl == NO_REGS || class_translate[cl] != NO_REGS) |
| continue; |
| best_class = NO_REGS; |
| best_cost = INT_MAX; |
| for (i = 0; i < classes_num; i++) |
| { |
| aclass = classes[i]; |
| temp_hard_regset = (reg_class_contents[aclass] |
| & reg_class_contents[cl] |
| & ~no_unit_alloc_regs); |
| if (! hard_reg_set_empty_p (temp_hard_regset)) |
| { |
| min_cost = INT_MAX; |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| { |
| cost = (ira_memory_move_cost[mode][aclass][0] |
| + ira_memory_move_cost[mode][aclass][1]); |
| if (min_cost > cost) |
| min_cost = cost; |
| } |
| if (best_class == NO_REGS || best_cost > min_cost) |
| { |
| best_class = aclass; |
| best_cost = min_cost; |
| } |
| } |
| } |
| class_translate[cl] = best_class; |
| } |
| } |
| |
| /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and |
| IRA_PRESSURE_CLASS_TRANSLATE. */ |
| static void |
| setup_class_translate (void) |
| { |
| setup_class_translate_array (ira_allocno_class_translate, |
| ira_allocno_classes_num, ira_allocno_classes); |
| setup_class_translate_array (ira_pressure_class_translate, |
| ira_pressure_classes_num, ira_pressure_classes); |
| } |
| |
| /* Order numbers of allocno classes in original target allocno class |
| array, -1 for non-allocno classes. */ |
| static int allocno_class_order[N_REG_CLASSES]; |
| |
| /* The function used to sort the important classes. */ |
| static int |
| comp_reg_classes_func (const void *v1p, const void *v2p) |
| { |
| enum reg_class cl1 = *(const enum reg_class *) v1p; |
| enum reg_class cl2 = *(const enum reg_class *) v2p; |
| enum reg_class tcl1, tcl2; |
| int diff; |
| |
| tcl1 = ira_allocno_class_translate[cl1]; |
| tcl2 = ira_allocno_class_translate[cl2]; |
| if (tcl1 != NO_REGS && tcl2 != NO_REGS |
| && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0) |
| return diff; |
| return (int) cl1 - (int) cl2; |
| } |
| |
| /* For correct work of function setup_reg_class_relation we need to |
| reorder important classes according to the order of their allocno |
| classes. It places important classes containing the same |
| allocatable hard register set adjacent to each other and allocno |
| class with the allocatable hard register set right after the other |
| important classes with the same set. |
| |
| In example from comments of function |
| setup_allocno_and_important_classes, it places LEGACY_REGS and |
| GENERAL_REGS close to each other and GENERAL_REGS is after |
| LEGACY_REGS. */ |
| static void |
| reorder_important_classes (void) |
| { |
| int i; |
| |
| for (i = 0; i < N_REG_CLASSES; i++) |
| allocno_class_order[i] = -1; |
| for (i = 0; i < ira_allocno_classes_num; i++) |
| allocno_class_order[ira_allocno_classes[i]] = i; |
| qsort (ira_important_classes, ira_important_classes_num, |
| sizeof (enum reg_class), comp_reg_classes_func); |
| for (i = 0; i < ira_important_classes_num; i++) |
| ira_important_class_nums[ira_important_classes[i]] = i; |
| } |
| |
| /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION, |
| IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and |
| IRA_REG_CLASSES_INTERSECT_P. For the meaning of the relations, |
| please see corresponding comments in ira-int.h. */ |
| static void |
| setup_reg_class_relations (void) |
| { |
| int i, cl1, cl2, cl3; |
| HARD_REG_SET intersection_set, union_set, temp_set2; |
| bool important_class_p[N_REG_CLASSES]; |
| |
| memset (important_class_p, 0, sizeof (important_class_p)); |
| for (i = 0; i < ira_important_classes_num; i++) |
| important_class_p[ira_important_classes[i]] = true; |
| for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
| { |
| ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES; |
| for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
| { |
| ira_reg_classes_intersect_p[cl1][cl2] = false; |
| ira_reg_class_intersect[cl1][cl2] = NO_REGS; |
| ira_reg_class_subset[cl1][cl2] = NO_REGS; |
| temp_hard_regset = reg_class_contents[cl1] & ~no_unit_alloc_regs; |
| temp_set2 = reg_class_contents[cl2] & ~no_unit_alloc_regs; |
| if (hard_reg_set_empty_p (temp_hard_regset) |
| && hard_reg_set_empty_p (temp_set2)) |
| { |
| /* The both classes have no allocatable hard registers |
| -- take all class hard registers into account and use |
| reg_class_subunion and reg_class_superunion. */ |
| for (i = 0;; i++) |
| { |
| cl3 = reg_class_subclasses[cl1][i]; |
| if (cl3 == LIM_REG_CLASSES) |
| break; |
| if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2], |
| (enum reg_class) cl3)) |
| ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3; |
| } |
| ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2]; |
| ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2]; |
| continue; |
| } |
| ira_reg_classes_intersect_p[cl1][cl2] |
| = hard_reg_set_intersect_p (temp_hard_regset, temp_set2); |
| if (important_class_p[cl1] && important_class_p[cl2] |
| && hard_reg_set_subset_p (temp_hard_regset, temp_set2)) |
| { |
| /* CL1 and CL2 are important classes and CL1 allocatable |
| hard register set is inside of CL2 allocatable hard |
| registers -- make CL1 a superset of CL2. */ |
| enum reg_class *p; |
| |
| p = &ira_reg_class_super_classes[cl1][0]; |
| while (*p != LIM_REG_CLASSES) |
| p++; |
| *p++ = (enum reg_class) cl2; |
| *p = LIM_REG_CLASSES; |
| } |
| ira_reg_class_subunion[cl1][cl2] = NO_REGS; |
| ira_reg_class_superunion[cl1][cl2] = NO_REGS; |
| intersection_set = (reg_class_contents[cl1] |
| & reg_class_contents[cl2] |
| & ~no_unit_alloc_regs); |
| union_set = ((reg_class_contents[cl1] | reg_class_contents[cl2]) |
| & ~no_unit_alloc_regs); |
| for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++) |
| { |
| temp_hard_regset = reg_class_contents[cl3] & ~no_unit_alloc_regs; |
| if (hard_reg_set_subset_p (temp_hard_regset, intersection_set)) |
| { |
| /* CL3 allocatable hard register set is inside of |
| intersection of allocatable hard register sets |
| of CL1 and CL2. */ |
| if (important_class_p[cl3]) |
| { |
| temp_set2 |
| = (reg_class_contents |
| [ira_reg_class_intersect[cl1][cl2]]); |
| temp_set2 &= ~no_unit_alloc_regs; |
| if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2) |
| /* If the allocatable hard register sets are |
| the same, prefer GENERAL_REGS or the |
| smallest class for debugging |
| purposes. */ |
| || (temp_hard_regset == temp_set2 |
| && (cl3 == GENERAL_REGS |
| || ((ira_reg_class_intersect[cl1][cl2] |
| != GENERAL_REGS) |
| && hard_reg_set_subset_p |
| (reg_class_contents[cl3], |
| reg_class_contents |
| [(int) |
| ira_reg_class_intersect[cl1][cl2]]))))) |
| ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3; |
| } |
| temp_set2 |
| = (reg_class_contents[ira_reg_class_subset[cl1][cl2]] |
| & ~no_unit_alloc_regs); |
| if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2) |
| /* Ignore unavailable hard registers and prefer |
| smallest class for debugging purposes. */ |
| || (temp_hard_regset == temp_set2 |
| && hard_reg_set_subset_p |
| (reg_class_contents[cl3], |
| reg_class_contents |
| [(int) ira_reg_class_subset[cl1][cl2]]))) |
| ira_reg_class_subset[cl1][cl2] = (enum reg_class) cl3; |
| } |
| if (important_class_p[cl3] |
| && hard_reg_set_subset_p (temp_hard_regset, union_set)) |
| { |
| /* CL3 allocatable hard register set is inside of |
| union of allocatable hard register sets of CL1 |
| and CL2. */ |
| temp_set2 |
| = (reg_class_contents[ira_reg_class_subunion[cl1][cl2]] |
| & ~no_unit_alloc_regs); |
| if (ira_reg_class_subunion[cl1][cl2] == NO_REGS |
| || (hard_reg_set_subset_p (temp_set2, temp_hard_regset) |
| |
| && (temp_set2 != temp_hard_regset |
| || cl3 == GENERAL_REGS |
| /* If the allocatable hard register sets are the |
| same, prefer GENERAL_REGS or the smallest |
| class for debugging purposes. */ |
| || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS |
| && hard_reg_set_subset_p |
| (reg_class_contents[cl3], |
| reg_class_contents |
| [(int) ira_reg_class_subunion[cl1][cl2]]))))) |
| ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3; |
| } |
| if (hard_reg_set_subset_p (union_set, temp_hard_regset)) |
| { |
| /* CL3 allocatable hard register set contains union |
| of allocatable hard register sets of CL1 and |
| CL2. */ |
| temp_set2 |
| = (reg_class_contents[ira_reg_class_superunion[cl1][cl2]] |
| & ~no_unit_alloc_regs); |
| if (ira_reg_class_superunion[cl1][cl2] == NO_REGS |
| || (hard_reg_set_subset_p (temp_hard_regset, temp_set2) |
| |
| && (temp_set2 != temp_hard_regset |
| || cl3 == GENERAL_REGS |
| /* If the allocatable hard register sets are the |
| same, prefer GENERAL_REGS or the smallest |
| class for debugging purposes. */ |
| || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS |
| && hard_reg_set_subset_p |
| (reg_class_contents[cl3], |
| reg_class_contents |
| [(int) ira_reg_class_superunion[cl1][cl2]]))))) |
| ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3; |
| } |
| } |
| } |
| } |
| } |
| |
| /* Output all uniform and important classes into file F. */ |
| static void |
| print_uniform_and_important_classes (FILE *f) |
| { |
| int i, cl; |
| |
| fprintf (f, "Uniform classes:\n"); |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| if (ira_uniform_class_p[cl]) |
| fprintf (f, " %s", reg_class_names[cl]); |
| fprintf (f, "\nImportant classes:\n"); |
| for (i = 0; i < ira_important_classes_num; i++) |
| fprintf (f, " %s", reg_class_names[ira_important_classes[i]]); |
| fprintf (f, "\n"); |
| } |
| |
| /* Output all possible allocno or pressure classes and their |
| translation map into file F. */ |
| static void |
| print_translated_classes (FILE *f, bool pressure_p) |
| { |
| int classes_num = (pressure_p |
| ? ira_pressure_classes_num : ira_allocno_classes_num); |
| enum reg_class *classes = (pressure_p |
| ? ira_pressure_classes : ira_allocno_classes); |
| enum reg_class *class_translate = (pressure_p |
| ? ira_pressure_class_translate |
| : ira_allocno_class_translate); |
| int i; |
| |
| fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno"); |
| for (i = 0; i < classes_num; i++) |
| fprintf (f, " %s", reg_class_names[classes[i]]); |
| fprintf (f, "\nClass translation:\n"); |
| for (i = 0; i < N_REG_CLASSES; i++) |
| fprintf (f, " %s -> %s\n", reg_class_names[i], |
| reg_class_names[class_translate[i]]); |
| } |
| |
| /* Output all possible allocno and translation classes and the |
| translation maps into stderr. */ |
| void |
| ira_debug_allocno_classes (void) |
| { |
| print_uniform_and_important_classes (stderr); |
| print_translated_classes (stderr, false); |
| print_translated_classes (stderr, true); |
| } |
| |
| /* Set up different arrays concerning class subsets, allocno and |
| important classes. */ |
| static void |
| find_reg_classes (void) |
| { |
| setup_allocno_and_important_classes (); |
| setup_class_translate (); |
| reorder_important_classes (); |
| setup_reg_class_relations (); |
| } |
| |
| |
| |
| /* Set up the array above. */ |
| static void |
| setup_hard_regno_aclass (void) |
| { |
| int i; |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| { |
| #if 1 |
| ira_hard_regno_allocno_class[i] |
| = (TEST_HARD_REG_BIT (no_unit_alloc_regs, i) |
| ? NO_REGS |
| : ira_allocno_class_translate[REGNO_REG_CLASS (i)]); |
| #else |
| int j; |
| enum reg_class cl; |
| ira_hard_regno_allocno_class[i] = NO_REGS; |
| for (j = 0; j < ira_allocno_classes_num; j++) |
| { |
| cl = ira_allocno_classes[j]; |
| if (ira_class_hard_reg_index[cl][i] >= 0) |
| { |
| ira_hard_regno_allocno_class[i] = cl; |
| break; |
| } |
| } |
| #endif |
| } |
| } |
| |
| |
| |
| /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps. */ |
| static void |
| setup_reg_class_nregs (void) |
| { |
| int i, cl, cl2, m; |
| |
| for (m = 0; m < MAX_MACHINE_MODE; m++) |
| { |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| ira_reg_class_max_nregs[cl][m] |
| = ira_reg_class_min_nregs[cl][m] |
| = targetm.class_max_nregs ((reg_class_t) cl, (machine_mode) m); |
| for (cl = 0; cl < N_REG_CLASSES; cl++) |
| for (i = 0; |
| (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; |
| i++) |
| if (ira_reg_class_min_nregs[cl2][m] |
| < ira_reg_class_min_nregs[cl][m]) |
| ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m]; |
| } |
| } |
| |
| |
| |
| /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON. |
| This function is called once IRA_CLASS_HARD_REGS has been initialized. */ |
| static void |
| setup_prohibited_class_mode_regs (void) |
| { |
| int j, k, hard_regno, cl, last_hard_regno, count; |
| |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| { |
| temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs; |
| for (j = 0; j < NUM_MACHINE_MODES; j++) |
| { |
| count = 0; |
| last_hard_regno = -1; |
| CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]); |
| for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--) |
| { |
| hard_regno = ira_class_hard_regs[cl][k]; |
| if (!targetm.hard_regno_mode_ok (hard_regno, (machine_mode) j)) |
| SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], |
| hard_regno); |
| else if (in_hard_reg_set_p (temp_hard_regset, |
| (machine_mode) j, hard_regno)) |
| { |
| last_hard_regno = hard_regno; |
| count++; |
| } |
| } |
| ira_class_singleton[cl][j] = (count == 1 ? last_hard_regno : -1); |
| } |
| } |
| } |
| |
| /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers |
| spanning from one register pressure class to another one. It is |
| called after defining the pressure classes. */ |
| static void |
| clarify_prohibited_class_mode_regs (void) |
| { |
| int j, k, hard_regno, cl, pclass, nregs; |
| |
| for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
| for (j = 0; j < NUM_MACHINE_MODES; j++) |
| { |
| CLEAR_HARD_REG_SET (ira_useful_class_mode_regs[cl][j]); |
| for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--) |
| { |
| hard_regno = ira_class_hard_regs[cl][k]; |
| if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno)) |
| continue; |
| nregs = hard_regno_nregs (hard_regno, (machine_mode) j); |
| if (hard_regno + nregs > FIRST_PSEUDO_REGISTER) |
| { |
| SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], |
| hard_regno); |
| continue; |
| } |
| pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)]; |
| for (nregs-- ;nregs >= 0; nregs--) |
| if (((enum reg_class) pclass |
| != ira_pressure_class_translate[REGNO_REG_CLASS |
| (hard_regno + nregs)])) |
| { |
| SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], |
| hard_regno); |
| break; |
| } |
| if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], |
| hard_regno)) |
| add_to_hard_reg_set (&ira_useful_class_mode_regs[cl][j], |
| (machine_mode) j, hard_regno); |
| } |
| } |
| } |
| |
| /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST |
| and IRA_MAY_MOVE_OUT_COST for MODE. */ |
| void |
| ira_init_register_move_cost (machine_mode mode) |
| { |
| static unsigned short last_move_cost[N_REG_CLASSES][N_REG_CLASSES]; |
| bool all_match = true; |
| unsigned int i, cl1, cl2; |
| HARD_REG_SET ok_regs; |
| |
| ira_assert (ira_register_move_cost[mode] == NULL |
| && ira_may_move_in_cost[mode] == NULL |
| && ira_may_move_out_cost[mode] == NULL); |
| CLEAR_HARD_REG_SET (ok_regs); |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| if (targetm.hard_regno_mode_ok (i, mode)) |
| SET_HARD_REG_BIT (ok_regs, i); |
| |
| /* Note that we might be asked about the move costs of modes that |
| cannot be stored in any hard register, for example if an inline |
| asm tries to create a register operand with an impossible mode. |
| We therefore can't assert have_regs_of_mode[mode] here. */ |
| for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
| for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
| { |
| int cost; |
| if (!hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl1]) |
| || !hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl2])) |
| { |
| if ((ira_reg_class_max_nregs[cl1][mode] |
| > ira_class_hard_regs_num[cl1]) |
| || (ira_reg_class_max_nregs[cl2][mode] |
| > ira_class_hard_regs_num[cl2])) |
| cost = 65535; |
| else |
| cost = (ira_memory_move_cost[mode][cl1][0] |
| + ira_memory_move_cost[mode][cl2][1]) * 2; |
| } |
| else |
| { |
| cost = register_move_cost (mode, (enum reg_class) cl1, |
| (enum reg_class) cl2); |
| ira_assert (cost < 65535); |
| } |
| all_match &= (last_move_cost[cl1][cl2] == cost); |
| last_move_cost[cl1][cl2] = cost; |
| } |
| if (all_match && last_mode_for_init_move_cost != -1) |
| { |
| ira_register_move_cost[mode] |
| = ira_register_move_cost[last_mode_for_init_move_cost]; |
| ira_may_move_in_cost[mode] |
| = ira_may_move_in_cost[last_mode_for_init_move_cost]; |
| ira_may_move_out_cost[mode] |
| = ira_may_move_out_cost[last_mode_for_init_move_cost]; |
| return; |
| } |
| last_mode_for_init_move_cost = mode; |
| ira_register_move_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); |
| ira_may_move_in_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); |
| ira_may_move_out_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); |
| for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
| for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
| { |
| int cost; |
| enum reg_class *p1, *p2; |
| |
| if (last_move_cost[cl1][cl2] == 65535) |
| { |
| ira_register_move_cost[mode][cl1][cl2] = 65535; |
| ira_may_move_in_cost[mode][cl1][cl2] = 65535; |
| ira_may_move_out_cost[mode][cl1][cl2] = 65535; |
| } |
| else |
| { |
| cost = last_move_cost[cl1][cl2]; |
| |
| for (p2 = ®_class_subclasses[cl2][0]; |
| *p2 != LIM_REG_CLASSES; p2++) |
| if (ira_class_hard_regs_num[*p2] > 0 |
| && (ira_reg_class_max_nregs[*p2][mode] |
| <= ira_class_hard_regs_num[*p2])) |
| cost = MAX (cost, ira_register_move_cost[mode][cl1][*p2]); |
| |
| for (p1 = ®_class_subclasses[cl1][0]; |
| *p1 != LIM_REG_CLASSES; p1++) |
| if (ira_class_hard_regs_num[*p1] > 0 |
| && (ira_reg_class_max_nregs[*p1][mode] |
| <= ira_class_hard_regs_num[*p1])) |
| cost = MAX (cost, ira_register_move_cost[mode][*p1][cl2]); |
| |
| ira_assert (cost <= 65535); |
| ira_register_move_cost[mode][cl1][cl2] = cost; |
| |
| if (ira_class_subset_p[cl1][cl2]) |
| ira_may_move_in_cost[mode][cl1][cl2] = 0; |
| else |
| ira_may_move_in_cost[mode][cl1][cl2] = cost; |
| |
| if (ira_class_subset_p[cl2][cl1]) |
| ira_may_move_out_cost[mode][cl1][cl2] = 0; |
| else |
| ira_may_move_out_cost[mode][cl1][cl2] = cost; |
| } |
| } |
| } |
| |
| |
| |
| /* This is called once during compiler work. It sets up |
| different arrays whose values don't depend on the compiled |
| function. */ |
| void |
| ira_init_once (void) |
| { |
| ira_init_costs_once (); |
| lra_init_once (); |
| |
| ira_use_lra_p = targetm.lra_p (); |
| } |
| |
| /* Free ira_max_register_move_cost, ira_may_move_in_cost and |
| ira_may_move_out_cost for each mode. */ |
| void |
| target_ira_int::free_register_move_costs (void) |
| { |
| int mode, i; |
| |
| /* Reset move_cost and friends, making sure we only free shared |
| table entries once. */ |
| for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
| if (x_ira_register_move_cost[mode]) |
| { |
| for (i = 0; |
| i < mode && (x_ira_register_move_cost[i] |
| != x_ira_register_move_cost[mode]); |
| i++) |
| ; |
| if (i == mode) |
| { |
| free (x_ira_register_move_cost[mode]); |
| free (x_ira_may_move_in_cost[mode]); |
| free (x_ira_may_move_out_cost[mode]); |
| } |
| } |
| memset (x_ira_register_move_cost, 0, sizeof x_ira_register_move_cost); |
| memset (x_ira_may_move_in_cost, 0, sizeof x_ira_may_move_in_cost); |
| memset (x_ira_may_move_out_cost, 0, sizeof x_ira_may_move_out_cost); |
| last_mode_for_init_move_cost = -1; |
| } |
| |
| target_ira_int::~target_ira_int () |
| { |
| free_ira_costs (); |
| free_register_move_costs (); |
| } |
| |
| /* This is called every time when register related information is |
| changed. */ |
| void |
| ira_init (void) |
| { |
| this_target_ira_int->free_register_move_costs (); |
| setup_reg_mode_hard_regset (); |
| setup_alloc_regs (flag_omit_frame_pointer != 0); |
| setup_class_subset_and_memory_move_costs (); |
| setup_reg_class_nregs (); |
| setup_prohibited_class_mode_regs (); |
| find_reg_classes (); |
| clarify_prohibited_class_mode_regs (); |
| setup_hard_regno_aclass (); |
| ira_init_costs (); |
| } |
| |
| |
| #define ira_prohibited_mode_move_regs_initialized_p \ |
| (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p) |
| |
| /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */ |
| static void |
| setup_prohibited_mode_move_regs (void) |
| { |
| int i, j; |
| rtx test_reg1, test_reg2, move_pat; |
| rtx_insn *move_insn; |
| |
| if (ira_prohibited_mode_move_regs_initialized_p) |
| return; |
| ira_prohibited_mode_move_regs_initialized_p = true; |
| test_reg1 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
| test_reg2 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 2); |
| move_pat = gen_rtx_SET (test_reg1, test_reg2); |
| move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, move_pat, 0, -1, 0); |
| for (i = 0; i < NUM_MACHINE_MODES; i++) |
| { |
| SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]); |
| for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
| { |
| if (!targetm.hard_regno_mode_ok (j, (machine_mode) i)) |
| continue; |
| set_mode_and_regno (test_reg1, (machine_mode) i, j); |
| set_mode_and_regno (test_reg2, (machine_mode) i, j); |
| INSN_CODE (move_insn) = -1; |
| recog_memoized (move_insn); |
| if (INSN_CODE (move_insn) < 0) |
| continue; |
| extract_insn (move_insn); |
| /* We don't know whether the move will be in code that is optimized |
| for size or speed, so consider all enabled alternatives. */ |
| if (! constrain_operands (1, get_enabled_alternatives (move_insn))) |
| continue; |
| CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j); |
| } |
| } |
| } |
| |
| |
| |
| /* Extract INSN and return the set of alternatives that we should consider. |
| This excludes any alternatives whose constraints are obviously impossible |
| to meet (e.g. because the constraint requires a constant and the operand |
| is nonconstant). It also excludes alternatives that are bound to need |
| a spill or reload, as long as we have other alternatives that match |
| exactly. */ |
| alternative_mask |
| ira_setup_alts (rtx_insn *insn) |
| { |
| int nop, nalt; |
| bool curr_swapped; |
| const char *p; |
| int commutative = -1; |
| |
| extract_insn (insn); |
| preprocess_constraints (insn); |
| alternative_mask preferred = get_preferred_alternatives (insn); |
| alternative_mask alts = 0; |
| alternative_mask exact_alts = 0; |
| /* Check that the hard reg set is enough for holding all |
| alternatives. It is hard to imagine the situation when the |
| assertion is wrong. */ |
| ira_assert (recog_data.n_alternatives |
| <= (int) MAX (sizeof (HARD_REG_ELT_TYPE) * CHAR_BIT, |
| FIRST_PSEUDO_REGISTER)); |
| for (nop = 0; nop < recog_data.n_operands; nop++) |
| if (recog_data.constraints[nop][0] == '%') |
| { |
| commutative = nop; |
| break; |
| } |
| for (curr_swapped = false;; curr_swapped = true) |
| { |
| for (nalt = 0; nalt < recog_data.n_alternatives; nalt++) |
| { |
| if (!TEST_BIT (preferred, nalt) || TEST_BIT (exact_alts, nalt)) |
| continue; |
| |
| const operand_alternative *op_alt |
| = &recog_op_alt[nalt * recog_data.n_operands]; |
| int this_reject = 0; |
| for (nop = 0; nop < recog_data.n_operands; nop++) |
| { |
| int c, len; |
| |
| this_reject += op_alt[nop].reject; |
| |
| rtx op = recog_data.operand[nop]; |
| p = op_alt[nop].constraint; |
| if (*p == 0 || *p == ',') |
| continue; |
| |
| bool win_p = false; |
| do |
| switch (c = *p, len = CONSTRAINT_LEN (c, p), c) |
| { |
| case '#': |
| case ',': |
| c = '\0'; |
| /* FALLTHRU */ |
| case '\0': |
| len = 0; |
| break; |
| |
| case '%': |
| /* The commutative modifier is handled above. */ |
| break; |
| |
| case '0': case '1': case '2': case '3': case '4': |
| case '5': case '6': case '7': case '8': case '9': |
| { |
| rtx other = recog_data.operand[c - '0']; |
| if (MEM_P (other) |
| ? rtx_equal_p (other, op) |
| : REG_P (op) || SUBREG_P (op)) |
| goto op_success; |
| win_p = true; |
| } |
| break; |
| |
| case 'g': |
| goto op_success; |
| break; |
| |
| default: |
| { |
| enum constraint_num cn = lookup_constraint (p); |
| switch (get_constraint_type (cn)) |
| { |
| case CT_REGISTER: |
| if (reg_class_for_constraint (cn) != NO_REGS) |
| { |
| if (REG_P (op) || SUBREG_P (op)) |
| goto op_success; |
| win_p = true; |
| } |
| break; |
| |
| case CT_CONST_INT: |
| if (CONST_INT_P (op) |
| && (insn_const_int_ok_for_constraint |
| (INTVAL (op), cn))) |
| goto op_success; |
| break; |
| |
| case CT_ADDRESS: |
| goto op_success; |
| |
| case CT_MEMORY: |
| case CT_SPECIAL_MEMORY: |
| if (MEM_P (op)) |
| goto op_success; |
| win_p = true; |
| break; |
| |
| case CT_FIXED_FORM: |
| if (constraint_satisfied_p (op, cn)) |
| goto op_success; |
| break; |
| } |
| break; |
| } |
| } |
| while (p += len, c); |
| if (!win_p) |
| break; |
| /* We can make the alternative match by spilling a register |
| to memory or loading something into a register. Count a |
| cost of one reload (the equivalent of the '?' constraint). */ |
| this_reject += 6; |
| op_success: |
| ; |
| } |
| |
| if (nop >= recog_data.n_operands) |
| { |
| alts |= ALTERNATIVE_BIT (nalt); |
| if (this_reject == 0) |
| exact_alts |= ALTERNATIVE_BIT (nalt); |
| } |
| } |
| if (commutative < 0) |
| break; |
| /* Swap forth and back to avoid changing recog_data. */ |
| std::swap (recog_data.operand[commutative], |
| recog_data.operand[commutative + 1]); |
| if (curr_swapped) |
| break; |
| } |
| return exact_alts ? exact_alts : alts; |
| } |
| |
| /* Return the number of the output non-early clobber operand which |
| should be the same in any case as operand with number OP_NUM (or |
| negative value if there is no such operand). ALTS is the mask |
| of alternatives that we should consider. */ |
| int |
| ira_get_dup_out_num (int op_num, alternative_mask alts) |
| { |
| int curr_alt, c, original, dup; |
| bool ignore_p, use_commut_op_p; |
| const char *str; |
| |
| if (op_num < 0 || recog_data.n_alternatives == 0) |
| return -1; |
| /* We should find duplications only for input operands. */ |
| if (recog_data.operand_type[op_num] != OP_IN) |
| return -1; |
| str = recog_data.constraints[op_num]; |
| use_commut_op_p = false; |
| for (;;) |
| { |
| rtx op = recog_data.operand[op_num]; |
| |
| for (curr_alt = 0, ignore_p = !TEST_BIT (alts, curr_alt), |
| original = -1;;) |
| { |
| c = *str; |
| if (c == '\0') |
| break; |
| if (c == '#') |
| ignore_p = true; |
| else if (c == ',') |
| { |
| curr_alt++; |
| ignore_p = !TEST_BIT (alts, curr_alt); |
| } |
| else if (! ignore_p) |
| switch (c) |
| { |
| case 'g': |
| goto fail; |
| default: |
| { |
| enum constraint_num cn = lookup_constraint (str); |
| enum reg_class cl = reg_class_for_constraint (cn); |
| if (cl != NO_REGS |
| && !targetm.class_likely_spilled_p (cl)) |
| goto fail; |
| if (constraint_satisfied_p (op, cn)) |
| goto fail; |
| break; |
| } |
| |
| case '0': case '1': case '2': case '3': case '4': |
| case '5': case '6': case '7': case '8': case '9': |
| if (original != -1 && original != c) |
| goto fail; |
| original = c; |
| break; |
| } |
| str += CONSTRAINT_LEN (c, str); |
| } |
| if (original == -1) |
| goto fail; |
| dup = original - '0'; |
| if (recog_data.operand_type[dup] == OP_OUT) |
| return dup; |
| fail: |
| if (use_commut_op_p) |
| break; |
| use_commut_op_p = true; |
| if (recog_data.constraints[op_num][0] == '%') |
| str = recog_data.constraints[op_num + 1]; |
| else if (op_num > 0 && recog_data.constraints[op_num - 1][0] == '%') |
| str = recog_data.constraints[op_num - 1]; |
| else |
| break; |
| } |
| return -1; |
| } |
| |
| |
| |
| /* Search forward to see if the source register of a copy insn dies |
| before either it or the destination register is modified, but don't |
| scan past the end of the basic block. If so, we can replace the |
| source with the destination and let the source die in the copy |
| insn. |
| |
| This will reduce the number of registers live in that range and may |
| enable the destination and the source coalescing, thus often saving |
| one register in addition to a register-register copy. */ |
| |
| static void |
| decrease_live_ranges_number (void) |
| { |
| basic_block bb; |
| rtx_insn *insn; |
| rtx set, src, dest, dest_death, note; |
| rtx_insn *p, *q; |
| int sregno, dregno; |
| |
| if (! flag_expensive_optimizations) |
| return; |
| |
| if (ira_dump_file) |
| fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n"); |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| FOR_BB_INSNS (bb, insn) |
| { |
| set = single_set (insn); |
| if (! set) |
| continue; |
| src = SET_SRC (set); |
| dest = SET_DEST (set); |
| if (! REG_P (src) || ! REG_P (dest) |
| || find_reg_note (insn, REG_DEAD, src)) |
| continue; |
| sregno = REGNO (src); |
| dregno = REGNO (dest); |
| |
| /* We don't want to mess with hard regs if register classes |
| are small. */ |
| if (sregno == dregno |
| || (targetm.small_register_classes_for_mode_p (GET_MODE (src)) |
| && (sregno < FIRST_PSEUDO_REGISTER |
| || dregno < FIRST_PSEUDO_REGISTER)) |
| /* We don't see all updates to SP if they are in an |
| auto-inc memory reference, so we must disallow this |
| optimization on them. */ |
| || sregno == STACK_POINTER_REGNUM |
| || dregno == STACK_POINTER_REGNUM) |
| continue; |
| |
| dest_death = NULL_RTX; |
| |
| for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) |
| { |
| if (! INSN_P (p)) |
| continue; |
| if (BLOCK_FOR_INSN (p) != bb) |
| break; |
| |
| if (reg_set_p (src, p) || reg_set_p (dest, p) |
| /* If SRC is an asm-declared register, it must not be |
| replaced in any asm. Unfortunately, the REG_EXPR |
| tree for the asm variable may be absent in the SRC |
| rtx, so we can't check the actual register |
| declaration easily (the asm operand will have it, |
| though). To avoid complicating the test for a rare |
| case, we just don't perform register replacement |
| for a hard reg mentioned in an asm. */ |
| || (sregno < FIRST_PSEUDO_REGISTER |
| && asm_noperands (PATTERN (p)) >= 0 |
| && reg_overlap_mentioned_p (src, PATTERN (p))) |
| /* Don't change hard registers used by a call. */ |
| || (CALL_P (p) && sregno < FIRST_PSEUDO_REGISTER |
| && find_reg_fusage (p, USE, src)) |
| /* Don't change a USE of a register. */ |
| || (GET_CODE (PATTERN (p)) == USE |
| && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0)))) |
| break; |
| |
| /* See if all of SRC dies in P. This test is slightly |
| more conservative than it needs to be. */ |
| if ((note = find_regno_note (p, REG_DEAD, sregno)) |
| && GET_MODE (XEXP (note, 0)) == GET_MODE (src)) |
| { |
| int failed = 0; |
| |
| /* We can do the optimization. Scan forward from INSN |
| again, replacing regs as we go. Set FAILED if a |
| replacement can't be done. In that case, we can't |
| move the death note for SRC. This should be |
| rare. */ |
| |
| /* Set to stop at next insn. */ |
| for (q = next_real_insn (insn); |
| q != next_real_insn (p); |
| q = next_real_insn (q)) |
| { |
| if (reg_overlap_mentioned_p (src, PATTERN (q))) |
| { |
| /* If SRC is a hard register, we might miss |
| some overlapping registers with |
| validate_replace_rtx, so we would have to |
| undo it. We can't if DEST is present in |
| the insn, so fail in that combination of |
| cases. */ |
| if (sregno < FIRST_PSEUDO_REGISTER |
| && reg_mentioned_p (dest, PATTERN (q))) |
| failed = 1; |
| |
| /* Attempt to replace all uses. */ |
| else if (!validate_replace_rtx (src, dest, q)) |
| failed = 1; |
| |
| /* If this succeeded, but some part of the |
| register is still present, undo the |
| replacement. */ |
| else if (sregno < FIRST_PSEUDO_REGISTER |
| && reg_overlap_mentioned_p (src, PATTERN (q))) |
| { |
| validate_replace_rtx (dest, src, q); |
| failed = 1; |
| } |
| } |
| |
| /* If DEST dies here, remove the death note and |
| save it for later. Make sure ALL of DEST dies |
| here; again, this is overly conservative. */ |
| if (! dest_death |
| && (dest_death = find_regno_note (q, REG_DEAD, dregno))) |
| { |
| if (GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest)) |
| remove_note (q, dest_death); |
| else |
| { |
| failed = 1; |
| dest_death = 0; |
| } |
| } |
| } |
| |
| if (! failed) |
| { |
| /* Move death note of SRC from P to INSN. */ |
| remove_note (p, note); |
| XEXP (note, 1) = REG_NOTES (insn); |
| REG_NOTES (insn) = note; |
| } |
| |
| /* DEST is also dead if INSN has a REG_UNUSED note for |
| DEST. */ |
| if (! dest_death |
| && (dest_death |
| = find_regno_note (insn, REG_UNUSED, dregno))) |
| { |
| PUT_REG_NOTE_KIND (dest_death, REG_DEAD); |
| remove_note (insn, dest_death); |
| } |
| |
| /* Put death note of DEST on P if we saw it die. */ |
| if (dest_death) |
| { |
| XEXP (dest_death, 1) = REG_NOTES (p); |
| REG_NOTES (p) = dest_death; |
| } |
| break; |
| } |
| |
| /* If SRC is a hard register which is set or killed in |
| some other way, we can't do this optimization. */ |
| else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src)) |
| break; |
| } |
| } |
| } |
| |
| |
| |
| /* Return nonzero if REGNO is a particularly bad choice for reloading X. */ |
| static bool |
| ira_bad_reload_regno_1 (int regno, rtx x) |
| { |
| int x_regno, n, i; |
| ira_allocno_t a; |
| enum reg_class pref; |
| |
| /* We only deal with pseudo regs. */ |
| if (! x || GET_CODE (x) != REG) |
| return false; |
| |
| x_regno = REGNO (x); |
| if (x_regno < FIRST_PSEUDO_REGISTER) |
| return false; |
| |
| /* If the pseudo prefers REGNO explicitly, then do not consider |
| REGNO a bad spill choice. */ |
| pref = reg_preferred_class (x_regno); |
| if (reg_class_size[pref] == 1) |
| return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno); |
| |
| /* If the pseudo conflicts with REGNO, then we consider REGNO a |
| poor choice for a reload regno. */ |
| a = ira_regno_allocno_map[x_regno]; |
| n = ALLOCNO_NUM_OBJECTS (a); |
| for (i = 0; i < n; i++) |
| { |
| ira_object_t obj = ALLOCNO_OBJECT (a, i); |
| if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno)) |
| return true; |
| } |
| return false; |
| } |
| |
| /* Return nonzero if REGNO is a particularly bad choice for reloading |
| IN or OUT. */ |
| bool |
| ira_bad_reload_regno (int regno, rtx in, rtx out) |
| { |
| return (ira_bad_reload_regno_1 (regno, in) |
| || ira_bad_reload_regno_1 (regno, out)); |
| } |
| |
| /* Add register clobbers from asm statements. */ |
| static void |
| compute_regs_asm_clobbered (void) |
| { |
| basic_block bb; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| rtx_insn *insn; |
| FOR_BB_INSNS_REVERSE (bb, insn) |
| { |
| df_ref def; |
| |
| if (NONDEBUG_INSN_P (insn) && asm_noperands (PATTERN (insn)) >= 0) |
| FOR_EACH_INSN_DEF (def, insn) |
| { |
| unsigned int dregno = DF_REF_REGNO (def); |
| if (HARD_REGISTER_NUM_P (dregno)) |
| add_to_hard_reg_set (&crtl->asm_clobbers, |
| GET_MODE (DF_REF_REAL_REG (def)), |
| dregno); |
| } |
| } |
| } |
| } |
| |
| |
| /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and |
| REGS_EVER_LIVE. */ |
| void |
| ira_setup_eliminable_regset (void) |
| { |
| int i; |
| static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; |
| int fp_reg_count = hard_regno_nregs (HARD_FRAME_POINTER_REGNUM, Pmode); |
| |
| /* Setup is_leaf as frame_pointer_required may use it. This function |
| is called by sched_init before ira if scheduling is enabled. */ |
| crtl->is_leaf = leaf_function_p (); |
| |
| /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore |
| sp for alloca. So we can't eliminate the frame pointer in that |
| case. At some point, we should improve this by emitting the |
| sp-adjusting insns for this case. */ |
| frame_pointer_needed |
| = (! flag_omit_frame_pointer |
| || (cfun->calls_alloca && EXIT_IGNORE_STACK) |
| /* We need the frame pointer to catch stack overflow exceptions if |
| the stack pointer is moving (as for the alloca case just above). */ |
| || (STACK_CHECK_MOVING_SP |
| && flag_stack_check |
| && flag_exceptions |
| && cfun->can_throw_non_call_exceptions) |
| || crtl->accesses_prior_frames |
| || (SUPPORTS_STACK_ALIGNMENT && crtl->stack_realign_needed) |
| || targetm.frame_pointer_required ()); |
| |
| /* The chance that FRAME_POINTER_NEEDED is changed from inspecting |
| RTL is very small. So if we use frame pointer for RA and RTL |
| actually prevents this, we will spill pseudos assigned to the |
| frame pointer in LRA. */ |
| |
| if (frame_pointer_needed) |
| for (i = 0; i < fp_reg_count; i++) |
| df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true); |
| |
| ira_no_alloc_regs = no_unit_alloc_regs; |
| CLEAR_HARD_REG_SET (eliminable_regset); |
| |
| compute_regs_asm_clobbered (); |
| |
| /* Build the regset of all eliminable registers and show we can't |
| use those that we already know won't be eliminated. */ |
| for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) |
| { |
| bool cannot_elim |
| = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to) |
| || (eliminables[i].to == STACK_POINTER_REGNUM && frame_pointer_needed)); |
| |
| if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from)) |
| { |
| SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from); |
| |
| if (cannot_elim) |
| SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from); |
| } |
| else if (cannot_elim) |
| error ("%s cannot be used in %<asm%> here", |
| reg_names[eliminables[i].from]); |
| else |
| df_set_regs_ever_live (eliminables[i].from, true); |
| } |
| if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) |
| { |
| for (i = 0; i < fp_reg_count; i++) |
| if (global_regs[HARD_FRAME_POINTER_REGNUM + i]) |
| /* Nothing to do: the register is already treated as live |
| where appropriate, and cannot be eliminated. */ |
| ; |
| else if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, |
| HARD_FRAME_POINTER_REGNUM + i)) |
| { |
| SET_HARD_REG_BIT (eliminable_regset, |
| HARD_FRAME_POINTER_REGNUM + i); |
| if (frame_pointer_needed) |
| SET_HARD_REG_BIT (ira_no_alloc_regs, |
| HARD_FRAME_POINTER_REGNUM + i); |
| } |
| else if (frame_pointer_needed) |
| error ("%s cannot be used in %<asm%> here", |
| reg_names[HARD_FRAME_POINTER_REGNUM + i]); |
| else |
| df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true); |
| } |
| } |
| |
| |
| |
| /* Vector of substitutions of register numbers, |
| used to map pseudo regs into hardware regs. |
| This is set up as a result of register allocation. |
| Element N is the hard reg assigned to pseudo reg N, |
| or is -1 if no hard reg was assigned. |
| If N is a hard reg number, element N is N. */ |
| short *reg_renumber; |
| |
| /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from |
| the allocation found by IRA. */ |
| static void |
| setup_reg_renumber (void) |
| { |
| int regno, hard_regno; |
| ira_allocno_t a; |
| ira_allocno_iterator ai; |
| |
| caller_save_needed = 0; |
| FOR_EACH_ALLOCNO (a, ai) |
| { |
| if (ira_use_lra_p && ALLOCNO_CAP_MEMBER (a) != NULL) |
| continue; |
| /* There are no caps at this point. */ |
| ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL); |
| if (! ALLOCNO_ASSIGNED_P (a)) |
| /* It can happen if A is not referenced but partially anticipated |
| somewhere in a region. */ |
| ALLOCNO_ASSIGNED_P (a) = true; |
| ira_free_allocno_updated_costs (a); |
| hard_regno = ALLOCNO_HARD_REGNO (a); |
| regno = ALLOCNO_REGNO (a); |
| reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno); |
| if (hard_regno >= 0) |
| { |
| int i, nwords; |
| enum reg_class pclass; |
| ira_object_t obj; |
| |
| pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)]; |
| nwords = ALLOCNO_NUM_OBJECTS (a); |
| for (i = 0; i < nwords; i++) |
| { |
| obj = ALLOCNO_OBJECT (a, i); |
| OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |
| |= ~reg_class_contents[pclass]; |
| } |
| if (ira_need_caller_save_p (a, hard_regno)) |
| { |
| ira_assert (!optimize || flag_caller_saves |
| || (ALLOCNO_CALLS_CROSSED_NUM (a) |
| == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)) |
| || regno >= ira_reg_equiv_len |
| || ira_equiv_no_lvalue_p (regno)); |
| caller_save_needed = 1; |
| } |
| } |
| } |
| } |
| |
| /* Set up allocno assignment flags for further allocation |
| improvements. */ |
| static void |
| setup_allocno_assignment_flags (void) |
| { |
| int hard_regno; |
| ira_allocno_t a; |
| ira_allocno_iterator ai; |
| |
| FOR_EACH_ALLOCNO (a, ai) |
| { |
| if (! ALLOCNO_ASSIGNED_P (a)) |
| /* It can happen if A is not referenced but partially anticipated |
| somewhere in a region. */ |
| ira_free_allocno_updated_costs (a); |
| hard_regno = ALLOCNO_HARD_REGNO (a); |
| /* Don't assign hard registers to allocnos which are destination |
| of removed store at the end of loop. It has no sense to keep |
| the same value in different hard registers. It is also |
| impossible to assign hard registers correctly to such |
| allocnos because the cost info and info about intersected |
| calls are incorrect for them. */ |
| ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0 |
| || ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p |
| || (ALLOCNO_MEMORY_COST (a) |
| - ALLOCNO_CLASS_COST (a)) < 0); |
| ira_assert |
| (hard_regno < 0 |
| || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a), |
| reg_class_contents[ALLOCNO_CLASS (a)])); |
| } |
| } |
| |
| /* Evaluate overall allocation cost and the costs for using hard |
| registers and memory for allocnos. */ |
| static void |
| calculate_allocation_cost (void) |
| { |
| int hard_regno, cost; |
| ira_allocno_t a; |
| ira_allocno_iterator ai; |
| |
| ira_overall_cost = ira_reg_cost = ira_mem_cost = 0; |
| FOR_EACH_ALLOCNO (a, ai) |
| { |
| hard_regno = ALLOCNO_HARD_REGNO (a); |
| ira_assert (hard_regno < 0 |
| || (ira_hard_reg_in_set_p |
| (hard_regno, ALLOCNO_MODE (a), |
| reg_class_contents[ALLOCNO_CLASS (a)]))); |
| if (hard_regno < 0) |
| { |
| cost = ALLOCNO_MEMORY_COST (a); |
| ira_mem_cost += cost; |
| } |
| else if (ALLOCNO_HARD_REG_COSTS (a) != NULL) |
| { |
| cost = (ALLOCNO_HARD_REG_COSTS (a) |
| [ira_class_hard_reg_index |
| [ALLOCNO_CLASS (a)][hard_regno]]); |
| ira_reg_cost += cost; |
| } |
| else |
| { |
| cost = ALLOCNO_CLASS_COST (a); |
| ira_reg_cost += cost; |
| } |
| ira_overall_cost += cost; |
| } |
| |
| if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL) |
| { |
| fprintf (ira_dump_file, |
| "+++Costs: overall %" PRId64 |
| ", reg %" PRId64 |
| ", mem %" PRId64 |
| ", ld %" PRId64 |
| ", st %" PRId64 |
| ", move %" PRId64, |
| ira_overall_cost, ira_reg_cost, ira_mem_cost, |
| ira_load_cost, ira_store_cost, ira_shuffle_cost); |
| fprintf (ira_dump_file, "\n+++ move loops %d, new jumps %d\n", |
| ira_move_loops_num, ira_additional_jumps_num); |
| } |
| |
| } |
| |
| #ifdef ENABLE_IRA_CHECKING |
| /* Check the correctness of the allocation. We do need this because |
| of complicated code to transform more one region internal |
| representation into one region representation. */ |
| static void |
| check_allocation (void) |
| { |
| ira_allocno_t a; |
| int hard_regno, nregs, conflict_nregs; |
| ira_allocno_iterator ai; |
| |
| FOR_EACH_ALLOCNO (a, ai) |
| { |
| int n = ALLOCNO_NUM_OBJECTS (a); |
| int i; |
| |
| if (ALLOCNO_CAP_MEMBER (a) != NULL |
| || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0) |
| continue; |
| nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a)); |
| if (nregs == 1) |
| /* We allocated a single hard register. */ |
| n = 1; |
| else if (n > 1) |
| /* We allocated multiple hard registers, and we will test |
| conflicts in a granularity of single hard regs. */ |
| nregs = 1; |
| |
| for (i = 0; i < n; i++) |
| { |
| ira_object_t obj = ALLOCNO_OBJECT (a, i); |
| ira_object_t conflict_obj; |
| ira_object_conflict_iterator oci; |
| int this_regno = hard_regno; |
| if (n > 1) |
| { |
| if (REG_WORDS_BIG_ENDIAN) |
| this_regno += n - i - 1; |
| else |
| this_regno += i; |
| } |
| FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci) |
| { |
| ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj); |
| int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a); |
| if (conflict_hard_regno < 0) |
| continue; |
| |
| conflict_nregs = hard_regno_nregs (conflict_hard_regno, |
| ALLOCNO_MODE (conflict_a)); |
| |
| if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1 |
| && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a)) |
| { |
| if (REG_WORDS_BIG_ENDIAN) |
| conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a) |
| - OBJECT_SUBWORD (conflict_obj) - 1); |
| else |
| conflict_hard_regno += OBJECT_SUBWORD (conflict_obj); |
| conflict_nregs = 1; |
| } |
| |
| if ((conflict_hard_regno <= this_regno |
| && this_regno < conflict_hard_regno + conflict_nregs) |
| || (this_regno <= conflict_hard_regno |
| && conflict_hard_regno < this_regno + nregs)) |
| { |
| fprintf (stderr, "bad allocation for %d and %d\n", |
| ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a)); |
| gcc_unreachable (); |
| } |
| } |
| } |
| } |
| } |
| #endif |
| |
| /* Allocate REG_EQUIV_INIT. Set up it from IRA_REG_EQUIV which should |
| be already calculated. */ |
| static void |
| setup_reg_equiv_init (void) |
| { |
| int i; |
| int max_regno = max_reg_num (); |
| |
| for (i = 0; i < max_regno; i++) |
| reg_equiv_init (i) = ira_reg_equiv[i].init_insns; |
| } |
| |
| /* Update equiv regno from movement of FROM_REGNO to TO_REGNO. INSNS |
| are insns which were generated for such movement. It is assumed |
| that FROM_REGNO and TO_REGNO always have the same value at the |
| point of any move containing such registers. This function is used |
| to update equiv info for register shuffles on the region borders |
| and for caller save/restore insns. */ |
| void |
| ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns) |
| { |
| rtx_insn *insn; |
| rtx x, note; |
| |
| if (! ira_reg_equiv[from_regno].defined_p |
| && (! ira_reg_equiv[to_regno].defined_p |
| || ((x = ira_reg_equiv[to_regno].memory) != NULL_RTX |
| && ! MEM_READONLY_P (x)))) |
| return; |
| insn = insns; |
| if (NEXT_INSN (insn) != NULL_RTX) |
| { |
| if (! ira_reg_equiv[to_regno].defined_p) |
| { |
| ira_assert (ira_reg_equiv[to_regno].init_insns == NULL_RTX); |
| return; |
| } |
| ira_reg_equiv[to_regno].defined_p = false; |
| ira_reg_equiv[to_regno].memory |
| = ira_reg_equiv[to_regno].constant |
| = ira_reg_equiv[to_regno].invariant |
| = ira_reg_equiv[to_regno].init_insns = NULL; |
| if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) |
| fprintf (ira_dump_file, |
| " Invalidating equiv info for reg %d\n", to_regno); |
| return; |
| } |
| /* It is possible that FROM_REGNO still has no equivalence because |
| in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd |
| insn was not processed yet. */ |
| if (ira_reg_equiv[from_regno].defined_p) |
| { |
| ira_reg_equiv[to_regno].defined_p = true; |
| if ((x = ira_reg_equiv[from_regno].memory) != NULL_RTX) |
| { |
| ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX |
| && ira_reg_equiv[from_regno].constant == NULL_RTX); |
| ira_assert (ira_reg_equiv[to_regno].memory == NULL_RTX |
| || rtx_equal_p (ira_reg_equiv[to_regno].memory, x)); |
| ira_reg_equiv[to_regno].memory = x; |
| if (! MEM_READONLY_P (x)) |
| /* We don't add the insn to insn init list because memory |
| equivalence is just to say what memory is better to use |
| when the pseudo is spilled. */ |
| return; |
| } |
| else if ((x = ira_reg_equiv[from_regno].constant) != NULL_RTX) |
| { |
| ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX); |
| ira_assert (ira_reg_equiv[to_regno].constant == NULL_RTX |
| || rtx_equal_p (ira_reg_equiv[to_regno].constant, x)); |
| ira_reg_equiv[to_regno].constant = x; |
| } |
| else |
| { |
| x = ira_reg_equiv[from_regno].invariant; |
| ira_assert (x != NULL_RTX); |
| ira_assert (ira_reg_equiv[to_regno].invariant == NULL_RTX |
| || rtx_equal_p (ira_reg_equiv[to_regno].invariant, x)); |
| ira_reg_equiv[to_regno].invariant = x; |
| } |
| if (find_reg_note (insn, REG_EQUIV, x) == NULL_RTX) |
| { |
| note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (x)); |
| gcc_assert (note != NULL_RTX); |
| if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) |
| { |
| fprintf (ira_dump_file, |
| " Adding equiv note to insn %u for reg %d ", |
| INSN_UID (insn), to_regno); |
| dump_value_slim (ira_dump_file, x, 1); |
| fprintf (ira_dump_file, "\n"); |
| } |
| } |
| } |
| ira_reg_equiv[to_regno].init_insns |
| = gen_rtx_INSN_LIST (VOIDmode, insn, |
| ira_reg_equiv[to_regno].init_insns); |
| if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) |
| fprintf (ira_dump_file, |
| " Adding equiv init move insn %u to reg %d\n", |
| INSN_UID (insn), to_regno); |
| } |
| |
| /* Fix values of array REG_EQUIV_INIT after live range splitting done |
| by IRA. */ |
| static void |
| fix_reg_equiv_init (void) |
| { |
| int max_regno = max_reg_num (); |
| int i, new_regno, max; |
| rtx set; |
| rtx_insn_list *x, *next, *prev; |
| rtx_insn *insn; |
| |
| if (max_regno_before_ira < max_regno) |
| { |
| max = vec_safe_length (reg_equivs); |
| grow_reg_equivs (); |
| for (i = FIRST_PSEUDO_REGISTER; i < max; i++) |
| for (prev = NULL, x = reg_equiv_init (i); |
| x != NULL_RTX; |
| x = next) |
| { |
| next = x->next (); |
| insn = x->insn (); |
| set = single_set (insn); |
| ira_assert (set != NULL_RTX |
| && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set)))); |
| if (REG_P (SET_DEST (set)) |
| && ((int) REGNO (SET_DEST (set)) == i |
| || (int) ORIGINAL_REGNO (SET_DEST (set)) == i)) |
| new_regno = REGNO (SET_DEST (set)); |
| else if (REG_P (SET_SRC (set)) |
| && ((int) REGNO (SET_SRC (set)) == i |
| || (int) ORIGINAL_REGNO (SET_SRC (set)) == i)) |
| new_regno = REGNO (SET_SRC (set)); |
| else |
| gcc_unreachable (); |
| if (new_regno == i) |
| prev = x; |
| else |
| { |
| /* Remove the wrong list element. */ |
| if (prev == NULL_RTX) |
| reg_equiv_init (i) = next; |
| else |
| XEXP (prev, 1) = next; |
| XEXP (x, 1) = reg_equiv_init (new_regno); |
| reg_equiv_init (new_regno) = x; |
| } |
| } |
| } |
| } |
| |
| #ifdef ENABLE_IRA_CHECKING |
| /* Print redundant memory-memory copies. */ |
| static void |
| print_redundant_copies (void) |
| { |
| int hard_regno; |
| ira_allocno_t a; |
| ira_copy_t cp, next_cp; |
| ira_allocno_iterator ai; |
| |
| FOR_EACH_ALLOCNO (a, ai) |
| { |
| if (ALLOCNO_CAP_MEMBER (a) != NULL) |
| /* It is a cap. */ |
| continue; |
| hard_regno = ALLOCNO_HARD_REGNO (a); |
| if (hard_regno >= 0) |
| continue; |
| for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp) |
| if (cp->first == a) |
| next_cp = cp->next_first_allocno_copy; |
| else |
| { |
| next_cp = cp->next_second_allocno_copy; |
| if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL |
| && cp->insn != NULL_RTX |
| && ALLOCNO_HARD_REGNO (cp->first) == hard_regno) |
| fprintf (ira_dump_file, |
| " Redundant move from %d(freq %d):%d\n", |
| INSN_UID (cp->insn), cp->freq, hard_regno); |
| } |
| } |
| } |
| #endif |
| |
| /* Setup preferred and alternative classes for new pseudo-registers |
| created by IRA starting with START. */ |
| static void |
| setup_preferred_alternate_classes_for_new_pseudos (int start) |
| { |
| int i, old_regno; |
| int max_regno = max_reg_num (); |
| |
| for (i = start; i < max_regno; i++) |
| { |
| old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]); |
| ira_assert (i != old_regno); |
| setup_reg_classes (i, reg_preferred_class (old_regno), |
| reg_alternate_class (old_regno), |
| reg_allocno_class (old_regno)); |
| if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL) |
| fprintf (ira_dump_file, |
| " New r%d: setting preferred %s, alternative %s\n", |
| i, reg_class_names[reg_preferred_class (old_regno)], |
| reg_class_names[reg_alternate_class (old_regno)]); |
| } |
| } |
| |
| |
| /* The number of entries allocated in reg_info. */ |
| static int allocated_reg_info_size; |
| |
| /* Regional allocation can create new pseudo-registers. This function |
| expands some arrays for pseudo-registers. */ |
| static void |
| expand_reg_info (void) |
| { |
| int i; |
| int size = max_reg_num (); |
| |
| resize_reg_info (); |
| for (i = allocated_reg_info_size; i < size; i++) |
| setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS); |
| setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size); |
| allocated_reg_info_size = size; |
| } |
| |
| /* Return TRUE if there is too high register pressure in the function. |
| It is used to decide when stack slot sharing is worth to do. */ |
| static bool |
| too_high_register_pressure_p (void) |
| { |
| int i; |
| enum reg_class pclass; |
| |
| for (i = 0; i < ira_pressure_classes_num; i++) |
| { |
| pclass = ira_pressure_classes[i]; |
| if (ira_loop_tree_root->reg_pressure[pclass] > 10000) |
| return true; |
| } |
| return false; |
| } |
| |
| |
| |
| /* Indicate that hard register number FROM was eliminated and replaced with |
| an offset from hard register number TO. The status of hard registers live |
| at the start of a basic block is updated by replacing a use of FROM with |
| a use of TO. */ |
| |
| void |
| mark_elimination (int from, int to) |
| { |
| basic_block bb; |
| bitmap r; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| r = DF_LR_IN (bb); |
| if (bitmap_bit_p (r, from)) |
| { |
| bitmap_clear_bit (r, from); |
| bitmap_set_bit (r, to); |
| } |
| if (! df_live) |
| continue; |
| r = DF_LIVE_IN (bb); |
| if (bitmap_bit_p (r, from)) |
| { |
| bitmap_clear_bit (r, from); |
| bitmap_set_bit (r, to); |
| } |
| } |
| } |
| |
| |
| |
| /* The length of the following array. */ |
| int ira_reg_equiv_len; |
| |
| /* Info about equiv. info for each register. */ |
| struct ira_reg_equiv_s *ira_reg_equiv; |
| |
| /* Expand ira_reg_equiv if necessary. */ |
| void |
| ira_expand_reg_equiv (void) |
| { |
| int old = ira_reg_equiv_len; |
| |
| if (ira_reg_equiv_len > max_reg_num ()) |
| return; |
| ira_reg_equiv_len = max_reg_num () * 3 / 2 + 1; |
| ira_reg_equiv |
| = (struct ira_reg_equiv_s *) xrealloc (ira_reg_equiv, |
| ira_reg_equiv_len |
| * sizeof (struct ira_reg_equiv_s)); |
| gcc_assert (old < ira_reg_equiv_len); |
| memset (ira_reg_equiv + old, 0, |
| sizeof (struct ira_reg_equiv_s) * (ira_reg_equiv_len - old)); |
| } |
| |
| static void |
| init_reg_equiv (void) |
| { |
| ira_reg_equiv_len = 0; |
| ira_reg_equiv = NULL; |
| ira_expand_reg_equiv (); |
| } |
| |
| static void |
| finish_reg_equiv (void) |
| { |
| free (ira_reg_equiv); |
| } |
| |
| |
| |
| struct equivalence |
| { |
| /* Set when a REG_EQUIV note is found or created. Use to |
| keep track of what memory accesses might be created later, |
| e.g. by reload. */ |
| rtx replacement; |
| rtx *src_p; |
| |
| /* The list of each instruction which initializes this register. |
| |
| NULL indicates we know nothing about this register's equivalence |
| properties. |
| |
| An INSN_LIST with a NULL insn indicates this pseudo is already |
| known to not have a valid equivalence. */ |
| rtx_insn_list *init_insns; |
| |
| /* Loop depth is used to recognize equivalences which appear |
| to be present within the same loop (or in an inner loop). */ |
| short loop_depth; |
| /* Nonzero if this had a preexisting REG_EQUIV note. */ |
| unsigned char is_arg_equivalence : 1; |
| /* Set when an attempt should be made to replace a register |
| with the associated src_p entry. */ |
| unsigned char replace : 1; |
| /* Set if this register has no known equivalence. */ |
| unsigned char no_equiv : 1; |
| /* Set if this register is mentioned in a paradoxical subreg. */ |
| unsigned char pdx_subregs : 1; |
| }; |
| |
| /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence |
| structure for that register. */ |
| static struct equivalence *reg_equiv; |
| |
| /* Used for communication between the following two functions. */ |
| struct equiv_mem_data |
| { |
| /* A MEM that we wish to ensure remains unchanged. */ |
| rtx equiv_mem; |
| |
| /* Set true if EQUIV_MEM is modified. */ |
| bool equiv_mem_modified; |
| }; |
| |
| /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified. |
| Called via note_stores. */ |
| static void |
| validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED, |
| void *data) |
| { |
| struct equiv_mem_data *info = (struct equiv_mem_data *) data; |
| |
| if ((REG_P (dest) |
| && reg_overlap_mentioned_p (dest, info->equiv_mem)) |
| || (MEM_P (dest) |
| && anti_dependence (info->equiv_mem, dest))) |
| info->equiv_mem_modified = true; |
| } |
| |
| enum valid_equiv { valid_none, valid_combine, valid_reload }; |
| |
| /* Verify that no store between START and the death of REG invalidates |
| MEMREF. MEMREF is invalidated by modifying a register used in MEMREF, |
| by storing into an overlapping memory location, or with a non-const |
| CALL_INSN. |
| |
| Return VALID_RELOAD if MEMREF remains valid for both reload and |
| combine_and_move insns, VALID_COMBINE if only valid for |
| combine_and_move_insns, and VALID_NONE otherwise. */ |
| static enum valid_equiv |
| validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref) |
| { |
| rtx_insn *insn; |
| rtx note; |
| struct equiv_mem_data info = { memref, false }; |
| enum valid_equiv ret = valid_reload; |
| |
| /* If the memory reference has side effects or is volatile, it isn't a |
| valid equivalence. */ |
| if (side_effects_p (memref)) |
| return valid_none; |
| |
| for (insn = start; insn; insn = NEXT_INSN (insn)) |
| { |
| if (!INSN_P (insn)) |
| continue; |
| |
| if (find_reg_note (insn, REG_DEAD, reg)) |
| return ret; |
| |
| if (CALL_P (insn)) |
| { |
| /* We can combine a reg def from one insn into a reg use in |
| another over a call if the memory is readonly or the call |
| const/pure. However, we can't set reg_equiv notes up for |
| reload over any call. The problem is the equivalent form |
| may reference a pseudo which gets assigned a call |
| clobbered hard reg. When we later replace REG with its |
| equivalent form, the value in the call-clobbered reg has |
| been changed and all hell breaks loose. */ |
| ret = valid_combine; |
| if (!MEM_READONLY_P (memref) |
| && !RTL_CONST_OR_PURE_CALL_P (insn)) |
| return valid_none; |
| } |
| |
| note_stores (insn, validate_equiv_mem_from_store, &info); |
| if (info.equiv_mem_modified) |
| return valid_none; |
| |
| /* If a register mentioned in MEMREF is modified via an |
| auto-increment, we lose the equivalence. Do the same if one |
| dies; although we could extend the life, it doesn't seem worth |
| the trouble. */ |
| |
| for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) |
| if ((REG_NOTE_KIND (note) == REG_INC |
| || REG_NOTE_KIND (note) == REG_DEAD) |
| && REG_P (XEXP (note, 0)) |
| && reg_overlap_mentioned_p (XEXP (note, 0), memref)) |
| return valid_none; |
| } |
| |
| return valid_none; |
| } |
| |
| /* Returns zero if X is known to be invariant. */ |
| static int |
| equiv_init_varies_p (rtx x) |
| { |
| RTX_CODE code = GET_CODE (x); |
| int i; |
| const char *fmt; |
| |
| switch (code) |
| { |
| case MEM: |
| return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0)); |
| |
| case CONST: |
| CASE_CONST_ANY: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| return 0; |
| |
| case REG: |
| return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0); |
| |
| case ASM_OPERANDS: |
| if (MEM_VOLATILE_P (x)) |
| return 1; |
| |
| /* Fall through. */ |
| |
| default: |
| break; |
| } |
| |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| if (fmt[i] == 'e') |
| { |
| if (equiv_init_varies_p (XEXP (x, i))) |
| return 1; |
| } |
| else if (fmt[i] == 'E') |
| { |
| int j; |
| for (j = 0; j < XVECLEN (x, i); j++) |
| if (equiv_init_varies_p (XVECEXP (x, i, j))) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Returns nonzero if X (used to initialize register REGNO) is movable. |
| X is only movable if the registers it uses have equivalent initializations |
| which appear to be within the same loop (or in an inner loop) and movable |
| or if they are not candidates for local_alloc and don't vary. */ |
| static int |
| equiv_init_movable_p (rtx x, int regno) |
| { |
| int i, j; |
| const char *fmt; |
| enum rtx_code code = GET_CODE (x); |
| |
| switch (code) |
| { |
| case SET: |
| return equiv_init_movable_p (SET_SRC (x), regno); |
| |
| case CC0: |
| case CLOBBER: |
| return 0; |
| |
| case PRE_INC: |
| case PRE_DEC: |
| case POST_INC: |
| case POST_DEC: |
| case PRE_MODIFY: |
| case POST_MODIFY: |
| return 0; |
| |
| case REG: |
| return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth |
| && reg_equiv[REGNO (x)].replace) |
| || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS |
| && ! rtx_varies_p (x, 0))); |
| |
| case UNSPEC_VOLATILE: |
| return 0; |
| |
| case ASM_OPERANDS: |
| if (MEM_VOLATILE_P (x)) |
| return 0; |
| |
| /* Fall through. */ |
| |
| default: |
| break; |
| } |
| |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| switch (fmt[i]) |
| { |
| case 'e': |
| if (! equiv_init_movable_p (XEXP (x, i), regno)) |
| return 0; |
| break; |
| case 'E': |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| if (! equiv_init_movable_p (XVECEXP (x, i, j), regno)) |
| return 0; |
| break; |
| } |
| |
| return 1; |
| } |
| |
| static bool memref_referenced_p (rtx memref, rtx x, bool read_p); |
| |
| /* Auxiliary function for memref_referenced_p. Process setting X for |
| MEMREF store. */ |
| static bool |
| process_set_for_memref_referenced_p (rtx memref, rtx x) |
| { |
| /* If we are setting a MEM, it doesn't count (its address does), but any |
| other SET_DEST that has a MEM in it is referencing the MEM. */ |
| if (MEM_P (x)) |
| { |
| if (memref_referenced_p (memref, XEXP (x, 0), true)) |
| return true; |
| } |
| else if (memref_referenced_p (memref, x, false)) |
| return true; |
| |
| return false; |
| } |
| |
| /* TRUE if X references a memory location (as a read if READ_P) that |
| would be affected by a store to MEMREF. */ |
| static bool |
| memref_referenced_p (rtx memref, rtx x, bool read_p) |
| { |
| int i, j; |
| const char *fmt; |
| enum rtx_code code = GET_CODE (x); |
| |
| switch (code) |
| { |
| case CONST: |
| case LABEL_REF: |
| case SYMBOL_REF: |
| CASE_CONST_ANY: |
| case PC: |
| case CC0: |
| c
|