| /* Global common subexpression elimination/Partial redundancy elimination |
| and global constant/copy propagation for GNU compiler. |
| Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 |
| Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 2, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING. If not, write to the Free |
| Software Foundation, 59 Temple Place - Suite 330, Boston, MA |
| 02111-1307, USA. */ |
| |
| /* TODO |
| - reordering of memory allocation and freeing to be more space efficient |
| - do rough calc of how many regs are needed in each block, and a rough |
| calc of how many regs are available in each class and use that to |
| throttle back the code in cases where RTX_COST is minimal. |
| - a store to the same address as a load does not kill the load if the |
| source of the store is also the destination of the load. Handling this |
| allows more load motion, particularly out of loops. |
| - ability to realloc sbitmap vectors would allow one initial computation |
| of reg_set_in_block with only subsequent additions, rather than |
| recomputing it for each pass |
| |
| */ |
| |
| /* References searched while implementing this. |
| |
| Compilers Principles, Techniques and Tools |
| Aho, Sethi, Ullman |
| Addison-Wesley, 1988 |
| |
| Global Optimization by Suppression of Partial Redundancies |
| E. Morel, C. Renvoise |
| communications of the acm, Vol. 22, Num. 2, Feb. 1979 |
| |
| A Portable Machine-Independent Global Optimizer - Design and Measurements |
| Frederick Chow |
| Stanford Ph.D. thesis, Dec. 1983 |
| |
| A Fast Algorithm for Code Movement Optimization |
| D.M. Dhamdhere |
| SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988 |
| |
| A Solution to a Problem with Morel and Renvoise's |
| Global Optimization by Suppression of Partial Redundancies |
| K-H Drechsler, M.P. Stadel |
| ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988 |
| |
| Practical Adaptation of the Global Optimization |
| Algorithm of Morel and Renvoise |
| D.M. Dhamdhere |
| ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991 |
| |
| Efficiently Computing Static Single Assignment Form and the Control |
| Dependence Graph |
| R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck |
| ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991 |
| |
| Lazy Code Motion |
| J. Knoop, O. Ruthing, B. Steffen |
| ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI |
| |
| What's In a Region? Or Computing Control Dependence Regions in Near-Linear |
| Time for Reducible Flow Control |
| Thomas Ball |
| ACM Letters on Programming Languages and Systems, |
| Vol. 2, Num. 1-4, Mar-Dec 1993 |
| |
| An Efficient Representation for Sparse Sets |
| Preston Briggs, Linda Torczon |
| ACM Letters on Programming Languages and Systems, |
| Vol. 2, Num. 1-4, Mar-Dec 1993 |
| |
| A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion |
| K-H Drechsler, M.P. Stadel |
| ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993 |
| |
| Partial Dead Code Elimination |
| J. Knoop, O. Ruthing, B. Steffen |
| ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 |
| |
| Effective Partial Redundancy Elimination |
| P. Briggs, K.D. Cooper |
| ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 |
| |
| The Program Structure Tree: Computing Control Regions in Linear Time |
| R. Johnson, D. Pearson, K. Pingali |
| ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 |
| |
| Optimal Code Motion: Theory and Practice |
| J. Knoop, O. Ruthing, B. Steffen |
| ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994 |
| |
| The power of assignment motion |
| J. Knoop, O. Ruthing, B. Steffen |
| ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI |
| |
| Global code motion / global value numbering |
| C. Click |
| ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI |
| |
| Value Driven Redundancy Elimination |
| L.T. Simpson |
| Rice University Ph.D. thesis, Apr. 1996 |
| |
| Value Numbering |
| L.T. Simpson |
| Massively Scalar Compiler Project, Rice University, Sep. 1996 |
| |
| High Performance Compilers for Parallel Computing |
| Michael Wolfe |
| Addison-Wesley, 1996 |
| |
| Advanced Compiler Design and Implementation |
| Steven Muchnick |
| Morgan Kaufmann, 1997 |
| |
| Building an Optimizing Compiler |
| Robert Morgan |
| Digital Press, 1998 |
| |
| People wishing to speed up the code here should read: |
| Elimination Algorithms for Data Flow Analysis |
| B.G. Ryder, M.C. Paull |
| ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986 |
| |
| How to Analyze Large Programs Efficiently and Informatively |
| D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck |
| ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI |
| |
| People wishing to do something different can find various possibilities |
| in the above papers and elsewhere. |
| */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "toplev.h" |
| |
| #include "rtl.h" |
| #include "tree.h" |
| #include "tm_p.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "flags.h" |
| #include "real.h" |
| #include "insn-config.h" |
| #include "recog.h" |
| #include "basic-block.h" |
| #include "output.h" |
| #include "function.h" |
| #include "expr.h" |
| #include "except.h" |
| #include "ggc.h" |
| #include "params.h" |
| #include "cselib.h" |
| #include "intl.h" |
| #include "obstack.h" |
| |
| /* Propagate flow information through back edges and thus enable PRE's |
| moving loop invariant calculations out of loops. |
| |
| Originally this tended to create worse overall code, but several |
| improvements during the development of PRE seem to have made following |
| back edges generally a win. |
| |
| Note much of the loop invariant code motion done here would normally |
| be done by loop.c, which has more heuristics for when to move invariants |
| out of loops. At some point we might need to move some of those |
| heuristics into gcse.c. */ |
| |
| /* We support GCSE via Partial Redundancy Elimination. PRE optimizations |
| are a superset of those done by GCSE. |
| |
| We perform the following steps: |
| |
| 1) Compute basic block information. |
| |
| 2) Compute table of places where registers are set. |
| |
| 3) Perform copy/constant propagation. |
| |
| 4) Perform global cse. |
| |
| 5) Perform another pass of copy/constant propagation. |
| |
| Two passes of copy/constant propagation are done because the first one |
| enables more GCSE and the second one helps to clean up the copies that |
| GCSE creates. This is needed more for PRE than for Classic because Classic |
| GCSE will try to use an existing register containing the common |
| subexpression rather than create a new one. This is harder to do for PRE |
| because of the code motion (which Classic GCSE doesn't do). |
| |
| Expressions we are interested in GCSE-ing are of the form |
| (set (pseudo-reg) (expression)). |
| Function want_to_gcse_p says what these are. |
| |
| PRE handles moving invariant expressions out of loops (by treating them as |
| partially redundant). |
| |
| Eventually it would be nice to replace cse.c/gcse.c with SSA (static single |
| assignment) based GVN (global value numbering). L. T. Simpson's paper |
| (Rice University) on value numbering is a useful reference for this. |
| |
| ********************** |
| |
| We used to support multiple passes but there are diminishing returns in |
| doing so. The first pass usually makes 90% of the changes that are doable. |
| A second pass can make a few more changes made possible by the first pass. |
| Experiments show any further passes don't make enough changes to justify |
| the expense. |
| |
| A study of spec92 using an unlimited number of passes: |
| [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83, |
| [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2, |
| [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1 |
| |
| It was found doing copy propagation between each pass enables further |
| substitutions. |
| |
| PRE is quite expensive in complicated functions because the DFA can take |
| awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can |
| be modified if one wants to experiment. |
| |
| ********************** |
| |
| The steps for PRE are: |
| |
| 1) Build the hash table of expressions we wish to GCSE (expr_hash_table). |
| |
| 2) Perform the data flow analysis for PRE. |
| |
| 3) Delete the redundant instructions |
| |
| 4) Insert the required copies [if any] that make the partially |
| redundant instructions fully redundant. |
| |
| 5) For other reaching expressions, insert an instruction to copy the value |
| to a newly created pseudo that will reach the redundant instruction. |
| |
| The deletion is done first so that when we do insertions we |
| know which pseudo reg to use. |
| |
| Various papers have argued that PRE DFA is expensive (O(n^2)) and others |
| argue it is not. The number of iterations for the algorithm to converge |
| is typically 2-4 so I don't view it as that expensive (relatively speaking). |
| |
| PRE GCSE depends heavily on the second CSE pass to clean up the copies |
| we create. To make an expression reach the place where it's redundant, |
| the result of the expression is copied to a new register, and the redundant |
| expression is deleted by replacing it with this new register. Classic GCSE |
| doesn't have this problem as much as it computes the reaching defs of |
| each register in each block and thus can try to use an existing register. |
| |
| ********************** |
| |
| A fair bit of simplicity is created by creating small functions for simple |
| tasks, even when the function is only called in one place. This may |
| measurably slow things down [or may not] by creating more function call |
| overhead than is necessary. The source is laid out so that it's trivial |
| to make the affected functions inline so that one can measure what speed |
| up, if any, can be achieved, and maybe later when things settle things can |
| be rearranged. |
| |
| Help stamp out big monolithic functions! */ |
| |
| /* GCSE global vars. */ |
| |
| /* -dG dump file. */ |
| static FILE *gcse_file; |
| |
| /* Note whether or not we should run jump optimization after gcse. We |
| want to do this for two cases. |
| |
| * If we changed any jumps via cprop. |
| |
| * If we added any labels via edge splitting. */ |
| |
| static int run_jump_opt_after_gcse; |
| |
| /* Bitmaps are normally not included in debugging dumps. |
| However it's useful to be able to print them from GDB. |
| We could create special functions for this, but it's simpler to |
| just allow passing stderr to the dump_foo fns. Since stderr can |
| be a macro, we store a copy here. */ |
| static FILE *debug_stderr; |
| |
| /* An obstack for our working variables. */ |
| static struct obstack gcse_obstack; |
| |
| struct reg_use {rtx reg_rtx; }; |
| |
| /* Hash table of expressions. */ |
| |
| struct expr |
| { |
| /* The expression (SET_SRC for expressions, PATTERN for assignments). */ |
| rtx expr; |
| /* Index in the available expression bitmaps. */ |
| int bitmap_index; |
| /* Next entry with the same hash. */ |
| struct expr *next_same_hash; |
| /* List of anticipatable occurrences in basic blocks in the function. |
| An "anticipatable occurrence" is one that is the first occurrence in the |
| basic block, the operands are not modified in the basic block prior |
| to the occurrence and the output is not used between the start of |
| the block and the occurrence. */ |
| struct occr *antic_occr; |
| /* List of available occurrence in basic blocks in the function. |
| An "available occurrence" is one that is the last occurrence in the |
| basic block and the operands are not modified by following statements in |
| the basic block [including this insn]. */ |
| struct occr *avail_occr; |
| /* Non-null if the computation is PRE redundant. |
| The value is the newly created pseudo-reg to record a copy of the |
| expression in all the places that reach the redundant copy. */ |
| rtx reaching_reg; |
| }; |
| |
| /* Occurrence of an expression. |
| There is one per basic block. If a pattern appears more than once the |
| last appearance is used [or first for anticipatable expressions]. */ |
| |
| struct occr |
| { |
| /* Next occurrence of this expression. */ |
| struct occr *next; |
| /* The insn that computes the expression. */ |
| rtx insn; |
| /* Nonzero if this [anticipatable] occurrence has been deleted. */ |
| char deleted_p; |
| /* Nonzero if this [available] occurrence has been copied to |
| reaching_reg. */ |
| /* ??? This is mutually exclusive with deleted_p, so they could share |
| the same byte. */ |
| char copied_p; |
| }; |
| |
| /* Expression and copy propagation hash tables. |
| Each hash table is an array of buckets. |
| ??? It is known that if it were an array of entries, structure elements |
| `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is |
| not clear whether in the final analysis a sufficient amount of memory would |
| be saved as the size of the available expression bitmaps would be larger |
| [one could build a mapping table without holes afterwards though]. |
| Someday I'll perform the computation and figure it out. */ |
| |
| struct hash_table |
| { |
| /* The table itself. |
| This is an array of `expr_hash_table_size' elements. */ |
| struct expr **table; |
| |
| /* Size of the hash table, in elements. */ |
| unsigned int size; |
| |
| /* Number of hash table elements. */ |
| unsigned int n_elems; |
| |
| /* Whether the table is expression of copy propagation one. */ |
| int set_p; |
| }; |
| |
| /* Expression hash table. */ |
| static struct hash_table expr_hash_table; |
| |
| /* Copy propagation hash table. */ |
| static struct hash_table set_hash_table; |
| |
| /* Mapping of uids to cuids. |
| Only real insns get cuids. */ |
| static int *uid_cuid; |
| |
| /* Highest UID in UID_CUID. */ |
| static int max_uid; |
| |
| /* Get the cuid of an insn. */ |
| #ifdef ENABLE_CHECKING |
| #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)]) |
| #else |
| #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)]) |
| #endif |
| |
| /* Number of cuids. */ |
| static int max_cuid; |
| |
| /* Mapping of cuids to insns. */ |
| static rtx *cuid_insn; |
| |
| /* Get insn from cuid. */ |
| #define CUID_INSN(CUID) (cuid_insn[CUID]) |
| |
| /* Maximum register number in function prior to doing gcse + 1. |
| Registers created during this pass have regno >= max_gcse_regno. |
| This is named with "gcse" to not collide with global of same name. */ |
| static unsigned int max_gcse_regno; |
| |
| /* Table of registers that are modified. |
| |
| For each register, each element is a list of places where the pseudo-reg |
| is set. |
| |
| For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only |
| requires knowledge of which blocks kill which regs [and thus could use |
| a bitmap instead of the lists `reg_set_table' uses]. |
| |
| `reg_set_table' and could be turned into an array of bitmaps (num-bbs x |
| num-regs) [however perhaps it may be useful to keep the data as is]. One |
| advantage of recording things this way is that `reg_set_table' is fairly |
| sparse with respect to pseudo regs but for hard regs could be fairly dense |
| [relatively speaking]. And recording sets of pseudo-regs in lists speeds |
| up functions like compute_transp since in the case of pseudo-regs we only |
| need to iterate over the number of times a pseudo-reg is set, not over the |
| number of basic blocks [clearly there is a bit of a slow down in the cases |
| where a pseudo is set more than once in a block, however it is believed |
| that the net effect is to speed things up]. This isn't done for hard-regs |
| because recording call-clobbered hard-regs in `reg_set_table' at each |
| function call can consume a fair bit of memory, and iterating over |
| hard-regs stored this way in compute_transp will be more expensive. */ |
| |
| typedef struct reg_set |
| { |
| /* The next setting of this register. */ |
| struct reg_set *next; |
| /* The insn where it was set. */ |
| rtx insn; |
| } reg_set; |
| |
| static reg_set **reg_set_table; |
| |
| /* Size of `reg_set_table'. |
| The table starts out at max_gcse_regno + slop, and is enlarged as |
| necessary. */ |
| static int reg_set_table_size; |
| |
| /* Amount to grow `reg_set_table' by when it's full. */ |
| #define REG_SET_TABLE_SLOP 100 |
| |
| /* This is a list of expressions which are MEMs and will be used by load |
| or store motion. |
| Load motion tracks MEMs which aren't killed by |
| anything except itself. (ie, loads and stores to a single location). |
| We can then allow movement of these MEM refs with a little special |
| allowance. (all stores copy the same value to the reaching reg used |
| for the loads). This means all values used to store into memory must have |
| no side effects so we can re-issue the setter value. |
| Store Motion uses this structure as an expression table to track stores |
| which look interesting, and might be moveable towards the exit block. */ |
| |
| struct ls_expr |
| { |
| struct expr * expr; /* Gcse expression reference for LM. */ |
| rtx pattern; /* Pattern of this mem. */ |
| rtx pattern_regs; /* List of registers mentioned by the mem. */ |
| rtx loads; /* INSN list of loads seen. */ |
| rtx stores; /* INSN list of stores seen. */ |
| struct ls_expr * next; /* Next in the list. */ |
| int invalid; /* Invalid for some reason. */ |
| int index; /* If it maps to a bitmap index. */ |
| unsigned int hash_index; /* Index when in a hash table. */ |
| rtx reaching_reg; /* Register to use when re-writing. */ |
| }; |
| |
| /* Array of implicit set patterns indexed by basic block index. */ |
| static rtx *implicit_sets; |
| |
| /* Head of the list of load/store memory refs. */ |
| static struct ls_expr * pre_ldst_mems = NULL; |
| |
| /* Bitmap containing one bit for each register in the program. |
| Used when performing GCSE to track which registers have been set since |
| the start of the basic block. */ |
| static regset reg_set_bitmap; |
| |
| /* For each block, a bitmap of registers set in the block. |
| This is used by expr_killed_p and compute_transp. |
| It is computed during hash table computation and not by compute_sets |
| as it includes registers added since the last pass (or between cprop and |
| gcse) and it's currently not easy to realloc sbitmap vectors. */ |
| static sbitmap *reg_set_in_block; |
| |
| /* Array, indexed by basic block number for a list of insns which modify |
| memory within that block. */ |
| static rtx * modify_mem_list; |
| bitmap modify_mem_list_set; |
| |
| /* This array parallels modify_mem_list, but is kept canonicalized. */ |
| static rtx * canon_modify_mem_list; |
| bitmap canon_modify_mem_list_set; |
| /* Various variables for statistics gathering. */ |
| |
| /* Memory used in a pass. |
| This isn't intended to be absolutely precise. Its intent is only |
| to keep an eye on memory usage. */ |
| static int bytes_used; |
| |
| /* GCSE substitutions made. */ |
| static int gcse_subst_count; |
| /* Number of copy instructions created. */ |
| static int gcse_create_count; |
| /* Number of constants propagated. */ |
| static int const_prop_count; |
| /* Number of copys propagated. */ |
| static int copy_prop_count; |
| |
| /* These variables are used by classic GCSE. |
| Normally they'd be defined a bit later, but `rd_gen' needs to |
| be declared sooner. */ |
| |
| /* Each block has a bitmap of each type. |
| The length of each blocks bitmap is: |
| |
| max_cuid - for reaching definitions |
| n_exprs - for available expressions |
| |
| Thus we view the bitmaps as 2 dimensional arrays. i.e. |
| rd_kill[block_num][cuid_num] |
| ae_kill[block_num][expr_num] */ |
| |
| /* For reaching defs */ |
| static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out; |
| |
| /* for available exprs */ |
| static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out; |
| |
| /* Objects of this type are passed around by the null-pointer check |
| removal routines. */ |
| struct null_pointer_info |
| { |
| /* The basic block being processed. */ |
| basic_block current_block; |
| /* The first register to be handled in this pass. */ |
| unsigned int min_reg; |
| /* One greater than the last register to be handled in this pass. */ |
| unsigned int max_reg; |
| sbitmap *nonnull_local; |
| sbitmap *nonnull_killed; |
| }; |
| |
| static void compute_can_copy (void); |
| static void *gmalloc (size_t) ATTRIBUTE_MALLOC; |
| static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC; |
| static void *grealloc (void *, size_t); |
| static void *gcse_alloc (unsigned long); |
| static void alloc_gcse_mem (rtx); |
| static void free_gcse_mem (void); |
| static void alloc_reg_set_mem (int); |
| static void free_reg_set_mem (void); |
| static int get_bitmap_width (int, int, int); |
| static void record_one_set (int, rtx); |
| static void replace_one_set (int, rtx, rtx); |
| static void record_set_info (rtx, rtx, void *); |
| static void compute_sets (rtx); |
| static void hash_scan_insn (rtx, struct hash_table *, int); |
| static void hash_scan_set (rtx, rtx, struct hash_table *); |
| static void hash_scan_clobber (rtx, rtx, struct hash_table *); |
| static void hash_scan_call (rtx, rtx, struct hash_table *); |
| static int want_to_gcse_p (rtx); |
| static bool gcse_constant_p (rtx); |
| static int oprs_unchanged_p (rtx, rtx, int); |
| static int oprs_anticipatable_p (rtx, rtx); |
| static int oprs_available_p (rtx, rtx); |
| static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, |
| struct hash_table *); |
| static void insert_set_in_table (rtx, rtx, struct hash_table *); |
| static unsigned int hash_expr (rtx, enum machine_mode, int *, int); |
| static unsigned int hash_expr_1 (rtx, enum machine_mode, int *); |
| static unsigned int hash_string_1 (const char *); |
| static unsigned int hash_set (int, int); |
| static int expr_equiv_p (rtx, rtx); |
| static void record_last_reg_set_info (rtx, int); |
| static void record_last_mem_set_info (rtx); |
| static void record_last_set_info (rtx, rtx, void *); |
| static void compute_hash_table (struct hash_table *); |
| static void alloc_hash_table (int, struct hash_table *, int); |
| static void free_hash_table (struct hash_table *); |
| static void compute_hash_table_work (struct hash_table *); |
| static void dump_hash_table (FILE *, const char *, struct hash_table *); |
| static struct expr *lookup_expr (rtx, struct hash_table *); |
| static struct expr *lookup_set (unsigned int, struct hash_table *); |
| static struct expr *next_set (unsigned int, struct expr *); |
| static void reset_opr_set_tables (void); |
| static int oprs_not_set_p (rtx, rtx); |
| static void mark_call (rtx); |
| static void mark_set (rtx, rtx); |
| static void mark_clobber (rtx, rtx); |
| static void mark_oprs_set (rtx); |
| static void alloc_cprop_mem (int, int); |
| static void free_cprop_mem (void); |
| static void compute_transp (rtx, int, sbitmap *, int); |
| static void compute_transpout (void); |
| static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *, |
| struct hash_table *); |
| static void compute_cprop_data (void); |
| static void find_used_regs (rtx *, void *); |
| static int try_replace_reg (rtx, rtx, rtx); |
| static struct expr *find_avail_set (int, rtx); |
| static int cprop_jump (basic_block, rtx, rtx, rtx, rtx); |
| static void mems_conflict_for_gcse_p (rtx, rtx, void *); |
| static int load_killed_in_block_p (basic_block, int, rtx, int); |
| static void canon_list_insert (rtx, rtx, void *); |
| static int cprop_insn (rtx, int); |
| static int cprop (int); |
| static void find_implicit_sets (void); |
| static int one_cprop_pass (int, int, int); |
| static bool constprop_register (rtx, rtx, rtx, int); |
| static struct expr *find_bypass_set (int, int); |
| static bool reg_killed_on_edge (rtx, edge); |
| static int bypass_block (basic_block, rtx, rtx); |
| static int bypass_conditional_jumps (void); |
| static void alloc_pre_mem (int, int); |
| static void free_pre_mem (void); |
| static void compute_pre_data (void); |
| static int pre_expr_reaches_here_p (basic_block, struct expr *, |
| basic_block); |
| static void insert_insn_end_bb (struct expr *, basic_block, int); |
| static void pre_insert_copy_insn (struct expr *, rtx); |
| static void pre_insert_copies (void); |
| static int pre_delete (void); |
| static int pre_gcse (void); |
| static int one_pre_gcse_pass (int); |
| static void add_label_notes (rtx, rtx); |
| static void alloc_code_hoist_mem (int, int); |
| static void free_code_hoist_mem (void); |
| static void compute_code_hoist_vbeinout (void); |
| static void compute_code_hoist_data (void); |
| static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *); |
| static void hoist_code (void); |
| static int one_code_hoisting_pass (void); |
| static void alloc_rd_mem (int, int); |
| static void free_rd_mem (void); |
| static void handle_rd_kill_set (rtx, int, basic_block); |
| static void compute_kill_rd (void); |
| static void compute_rd (void); |
| static void alloc_avail_expr_mem (int, int); |
| static void free_avail_expr_mem (void); |
| static void compute_ae_gen (struct hash_table *); |
| static int expr_killed_p (rtx, basic_block); |
| static void compute_ae_kill (sbitmap *, sbitmap *, struct hash_table *); |
| static int expr_reaches_here_p (struct occr *, struct expr *, basic_block, |
| int); |
| static rtx computing_insn (struct expr *, rtx); |
| static int def_reaches_here_p (rtx, rtx); |
| static int can_disregard_other_sets (struct reg_set **, rtx, int); |
| static int handle_avail_expr (rtx, struct expr *); |
| static int classic_gcse (void); |
| static int one_classic_gcse_pass (int); |
| static void invalidate_nonnull_info (rtx, rtx, void *); |
| static int delete_null_pointer_checks_1 (unsigned int *, sbitmap *, sbitmap *, |
| struct null_pointer_info *); |
| static rtx process_insert_insn (struct expr *); |
| static int pre_edge_insert (struct edge_list *, struct expr **); |
| static int expr_reaches_here_p_work (struct occr *, struct expr *, |
| basic_block, int, char *); |
| static int pre_expr_reaches_here_p_work (basic_block, struct expr *, |
| basic_block, char *); |
| static struct ls_expr * ldst_entry (rtx); |
| static void free_ldst_entry (struct ls_expr *); |
| static void free_ldst_mems (void); |
| static void print_ldst_list (FILE *); |
| static struct ls_expr * find_rtx_in_ldst (rtx); |
| static int enumerate_ldsts (void); |
| static inline struct ls_expr * first_ls_expr (void); |
| static inline struct ls_expr * next_ls_expr (struct ls_expr *); |
| static int simple_mem (rtx); |
| static void invalidate_any_buried_refs (rtx); |
| static void compute_ld_motion_mems (void); |
| static void trim_ld_motion_mems (void); |
| static void update_ld_motion_stores (struct expr *); |
| static void reg_set_info (rtx, rtx, void *); |
| static void reg_clear_last_set (rtx, rtx, void *); |
| static bool store_ops_ok (rtx, int *); |
| static rtx extract_mentioned_regs (rtx); |
| static rtx extract_mentioned_regs_helper (rtx, rtx); |
| static void find_moveable_store (rtx, int *, int *); |
| static int compute_store_table (void); |
| static bool load_kills_store (rtx, rtx, int); |
| static bool find_loads (rtx, rtx, int); |
| static bool store_killed_in_insn (rtx, rtx, rtx, int); |
| static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *); |
| static bool store_killed_before (rtx, rtx, rtx, basic_block, int *); |
| static void build_store_vectors (void); |
| static void insert_insn_start_bb (rtx, basic_block); |
| static int insert_store (struct ls_expr *, edge); |
| static void remove_reachable_equiv_notes (basic_block, struct ls_expr *); |
| static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *); |
| static void delete_store (struct ls_expr *, basic_block); |
| static void free_store_memory (void); |
| static void store_motion (void); |
| static void free_insn_expr_list_list (rtx *); |
| static void clear_modify_mem_tables (void); |
| static void free_modify_mem_tables (void); |
| static rtx gcse_emit_move_after (rtx, rtx, rtx); |
| static void local_cprop_find_used_regs (rtx *, void *); |
| static bool do_local_cprop (rtx, rtx, int, rtx*); |
| static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*); |
| static void local_cprop_pass (int); |
| static bool is_too_expensive (const char *); |
| |
| |
| /* Entry point for global common subexpression elimination. |
| F is the first instruction in the function. */ |
| |
| int |
| gcse_main (rtx f, FILE *file) |
| { |
| int changed, pass; |
| /* Bytes used at start of pass. */ |
| int initial_bytes_used; |
| /* Maximum number of bytes used by a pass. */ |
| int max_pass_bytes; |
| /* Point to release obstack data from for each pass. */ |
| char *gcse_obstack_bottom; |
| |
| /* We do not construct an accurate cfg in functions which call |
| setjmp, so just punt to be safe. */ |
| if (current_function_calls_setjmp) |
| return 0; |
| |
| /* Assume that we do not need to run jump optimizations after gcse. */ |
| run_jump_opt_after_gcse = 0; |
| |
| /* For calling dump_foo fns from gdb. */ |
| debug_stderr = stderr; |
| gcse_file = file; |
| |
| /* Identify the basic block information for this function, including |
| successors and predecessors. */ |
| max_gcse_regno = max_reg_num (); |
| |
| if (file) |
| dump_flow_info (file); |
| |
| /* Return if there's nothing to do, or it is too expensive. */ |
| if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled"))) |
| return 0; |
| |
| gcc_obstack_init (&gcse_obstack); |
| bytes_used = 0; |
| |
| /* We need alias. */ |
| init_alias_analysis (); |
| /* Record where pseudo-registers are set. This data is kept accurate |
| during each pass. ??? We could also record hard-reg information here |
| [since it's unchanging], however it is currently done during hash table |
| computation. |
| |
| It may be tempting to compute MEM set information here too, but MEM sets |
| will be subject to code motion one day and thus we need to compute |
| information about memory sets when we build the hash tables. */ |
| |
| alloc_reg_set_mem (max_gcse_regno); |
| compute_sets (f); |
| |
| pass = 0; |
| initial_bytes_used = bytes_used; |
| max_pass_bytes = 0; |
| gcse_obstack_bottom = gcse_alloc (1); |
| changed = 1; |
| while (changed && pass < MAX_GCSE_PASSES) |
| { |
| changed = 0; |
| if (file) |
| fprintf (file, "GCSE pass %d\n\n", pass + 1); |
| |
| /* Initialize bytes_used to the space for the pred/succ lists, |
| and the reg_set_table data. */ |
| bytes_used = initial_bytes_used; |
| |
| /* Each pass may create new registers, so recalculate each time. */ |
| max_gcse_regno = max_reg_num (); |
| |
| alloc_gcse_mem (f); |
| |
| /* Don't allow constant propagation to modify jumps |
| during this pass. */ |
| changed = one_cprop_pass (pass + 1, 0, 0); |
| |
| if (optimize_size) |
| changed |= one_classic_gcse_pass (pass + 1); |
| else |
| { |
| changed |= one_pre_gcse_pass (pass + 1); |
| /* We may have just created new basic blocks. Release and |
| recompute various things which are sized on the number of |
| basic blocks. */ |
| if (changed) |
| { |
| free_modify_mem_tables (); |
| modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); |
| canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); |
| } |
| free_reg_set_mem (); |
| alloc_reg_set_mem (max_reg_num ()); |
| compute_sets (f); |
| run_jump_opt_after_gcse = 1; |
| } |
| |
| if (max_pass_bytes < bytes_used) |
| max_pass_bytes = bytes_used; |
| |
| /* Free up memory, then reallocate for code hoisting. We can |
| not re-use the existing allocated memory because the tables |
| will not have info for the insns or registers created by |
| partial redundancy elimination. */ |
| free_gcse_mem (); |
| |
| /* It does not make sense to run code hoisting unless we are optimizing |
| for code size -- it rarely makes programs faster, and can make |
| them bigger if we did partial redundancy elimination (when optimizing |
| for space, we use a classic gcse algorithm instead of partial |
| redundancy algorithms). */ |
| if (optimize_size) |
| { |
| max_gcse_regno = max_reg_num (); |
| alloc_gcse_mem (f); |
| changed |= one_code_hoisting_pass (); |
| free_gcse_mem (); |
| |
| if (max_pass_bytes < bytes_used) |
| max_pass_bytes = bytes_used; |
| } |
| |
| if (file) |
| { |
| fprintf (file, "\n"); |
| fflush (file); |
| } |
| |
| obstack_free (&gcse_obstack, gcse_obstack_bottom); |
| pass++; |
| } |
| |
| /* Do one last pass of copy propagation, including cprop into |
| conditional jumps. */ |
| |
| max_gcse_regno = max_reg_num (); |
| alloc_gcse_mem (f); |
| /* This time, go ahead and allow cprop to alter jumps. */ |
| one_cprop_pass (pass + 1, 1, 0); |
| free_gcse_mem (); |
| |
| if (file) |
| { |
| fprintf (file, "GCSE of %s: %d basic blocks, ", |
| current_function_name (), n_basic_blocks); |
| fprintf (file, "%d pass%s, %d bytes\n\n", |
| pass, pass > 1 ? "es" : "", max_pass_bytes); |
| } |
| |
| obstack_free (&gcse_obstack, NULL); |
| free_reg_set_mem (); |
| /* We are finished with alias. */ |
| end_alias_analysis (); |
| allocate_reg_info (max_reg_num (), FALSE, FALSE); |
| |
| if (!optimize_size && flag_gcse_sm) |
| store_motion (); |
| |
| /* Record where pseudo-registers are set. */ |
| return run_jump_opt_after_gcse; |
| } |
| |
| /* Misc. utilities. */ |
| |
| /* Nonzero for each mode that supports (set (reg) (reg)). |
| This is trivially true for integer and floating point values. |
| It may or may not be true for condition codes. */ |
| static char can_copy[(int) NUM_MACHINE_MODES]; |
| |
| /* Compute which modes support reg/reg copy operations. */ |
| |
| static void |
| compute_can_copy (void) |
| { |
| int i; |
| #ifndef AVOID_CCMODE_COPIES |
| rtx reg, insn; |
| #endif |
| memset (can_copy, 0, NUM_MACHINE_MODES); |
| |
| start_sequence (); |
| for (i = 0; i < NUM_MACHINE_MODES; i++) |
| if (GET_MODE_CLASS (i) == MODE_CC) |
| { |
| #ifdef AVOID_CCMODE_COPIES |
| can_copy[i] = 0; |
| #else |
| reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1); |
| insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg)); |
| if (recog (PATTERN (insn), insn, NULL) >= 0) |
| can_copy[i] = 1; |
| #endif |
| } |
| else |
| can_copy[i] = 1; |
| |
| end_sequence (); |
| } |
| |
| /* Returns whether the mode supports reg/reg copy operations. */ |
| |
| bool |
| can_copy_p (enum machine_mode mode) |
| { |
| static bool can_copy_init_p = false; |
| |
| if (! can_copy_init_p) |
| { |
| compute_can_copy (); |
| can_copy_init_p = true; |
| } |
| |
| return can_copy[mode] != 0; |
| } |
| |
| /* Cover function to xmalloc to record bytes allocated. */ |
| |
| static void * |
| gmalloc (size_t size) |
| { |
| bytes_used += size; |
| return xmalloc (size); |
| } |
| |
| /* Cover function to xcalloc to record bytes allocated. */ |
| |
| static void * |
| gcalloc (size_t nelem, size_t elsize) |
| { |
| bytes_used += nelem * elsize; |
| return xcalloc (nelem, elsize); |
| } |
| |
| /* Cover function to xrealloc. |
| We don't record the additional size since we don't know it. |
| It won't affect memory usage stats much anyway. */ |
| |
| static void * |
| grealloc (void *ptr, size_t size) |
| { |
| return xrealloc (ptr, size); |
| } |
| |
| /* Cover function to obstack_alloc. */ |
| |
| static void * |
| gcse_alloc (unsigned long size) |
| { |
| bytes_used += size; |
| return obstack_alloc (&gcse_obstack, size); |
| } |
| |
| /* Allocate memory for the cuid mapping array, |
| and reg/memory set tracking tables. |
| |
| This is called at the start of each pass. */ |
| |
| static void |
| alloc_gcse_mem (rtx f) |
| { |
| int i; |
| rtx insn; |
| |
| /* Find the largest UID and create a mapping from UIDs to CUIDs. |
| CUIDs are like UIDs except they increase monotonically, have no gaps, |
| and only apply to real insns. */ |
| |
| max_uid = get_max_uid (); |
| uid_cuid = gcalloc (max_uid + 1, sizeof (int)); |
| for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) |
| { |
| if (INSN_P (insn)) |
| uid_cuid[INSN_UID (insn)] = i++; |
| else |
| uid_cuid[INSN_UID (insn)] = i; |
| } |
| |
| /* Create a table mapping cuids to insns. */ |
| |
| max_cuid = i; |
| cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx)); |
| for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) |
| if (INSN_P (insn)) |
| CUID_INSN (i++) = insn; |
| |
| /* Allocate vars to track sets of regs. */ |
| reg_set_bitmap = BITMAP_XMALLOC (); |
| |
| /* Allocate vars to track sets of regs, memory per block. */ |
| reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno); |
| /* Allocate array to keep a list of insns which modify memory in each |
| basic block. */ |
| modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); |
| canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); |
| modify_mem_list_set = BITMAP_XMALLOC (); |
| canon_modify_mem_list_set = BITMAP_XMALLOC (); |
| } |
| |
| /* Free memory allocated by alloc_gcse_mem. */ |
| |
| static void |
| free_gcse_mem (void) |
| { |
| free (uid_cuid); |
| free (cuid_insn); |
| |
| BITMAP_XFREE (reg_set_bitmap); |
| |
| sbitmap_vector_free (reg_set_in_block); |
| free_modify_mem_tables (); |
| BITMAP_XFREE (modify_mem_list_set); |
| BITMAP_XFREE (canon_modify_mem_list_set); |
| } |
| |
| /* Many of the global optimization algorithms work by solving dataflow |
| equations for various expressions. Initially, some local value is |
| computed for each expression in each block. Then, the values across the |
| various blocks are combined (by following flow graph edges) to arrive at |
| global values. Conceptually, each set of equations is independent. We |
| may therefore solve all the equations in parallel, solve them one at a |
| time, or pick any intermediate approach. |
| |
| When you're going to need N two-dimensional bitmaps, each X (say, the |
| number of blocks) by Y (say, the number of expressions), call this |
| function. It's not important what X and Y represent; only that Y |
| correspond to the things that can be done in parallel. This function will |
| return an appropriate chunking factor C; you should solve C sets of |
| equations in parallel. By going through this function, we can easily |
| trade space against time; by solving fewer equations in parallel we use |
| less space. */ |
| |
| static int |
| get_bitmap_width (int n, int x, int y) |
| { |
| /* It's not really worth figuring out *exactly* how much memory will |
| be used by a particular choice. The important thing is to get |
| something approximately right. */ |
| size_t max_bitmap_memory = 10 * 1024 * 1024; |
| |
| /* The number of bytes we'd use for a single column of minimum |
| width. */ |
| size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE); |
| |
| /* Often, it's reasonable just to solve all the equations in |
| parallel. */ |
| if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory) |
| return y; |
| |
| /* Otherwise, pick the largest width we can, without going over the |
| limit. */ |
| return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1) |
| / column_size); |
| } |
| |
| /* Compute the local properties of each recorded expression. |
| |
| Local properties are those that are defined by the block, irrespective of |
| other blocks. |
| |
| An expression is transparent in a block if its operands are not modified |
| in the block. |
| |
| An expression is computed (locally available) in a block if it is computed |
| at least once and expression would contain the same value if the |
| computation was moved to the end of the block. |
| |
| An expression is locally anticipatable in a block if it is computed at |
| least once and expression would contain the same value if the computation |
| was moved to the beginning of the block. |
| |
| We call this routine for cprop, pre and code hoisting. They all compute |
| basically the same information and thus can easily share this code. |
| |
| TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local |
| properties. If NULL, then it is not necessary to compute or record that |
| particular property. |
| |
| TABLE controls which hash table to look at. If it is set hash table, |
| additionally, TRANSP is computed as ~TRANSP, since this is really cprop's |
| ABSALTERED. */ |
| |
| static void |
| compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc, struct hash_table *table) |
| { |
| unsigned int i; |
| |
| /* Initialize any bitmaps that were passed in. */ |
| if (transp) |
| { |
| if (table->set_p) |
| sbitmap_vector_zero (transp, last_basic_block); |
| else |
| sbitmap_vector_ones (transp, last_basic_block); |
| } |
| |
| if (comp) |
| sbitmap_vector_zero (comp, last_basic_block); |
| if (antloc) |
| sbitmap_vector_zero (antloc, last_basic_block); |
| |
| for (i = 0; i < table->size; i++) |
| { |
| struct expr *expr; |
| |
| for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash) |
| { |
| int indx = expr->bitmap_index; |
| struct occr *occr; |
| |
| /* The expression is transparent in this block if it is not killed. |
| We start by assuming all are transparent [none are killed], and |
| then reset the bits for those that are. */ |
| if (transp) |
| compute_transp (expr->expr, indx, transp, table->set_p); |
| |
| /* The occurrences recorded in antic_occr are exactly those that |
| we want to set to nonzero in ANTLOC. */ |
| if (antloc) |
| for (occr = expr->antic_occr; occr != NULL; occr = occr->next) |
| { |
| SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx); |
| |
| /* While we're scanning the table, this is a good place to |
| initialize this. */ |
| occr->deleted_p = 0; |
| } |
| |
| /* The occurrences recorded in avail_occr are exactly those that |
| we want to set to nonzero in COMP. */ |
| if (comp) |
| for (occr = expr->avail_occr; occr != NULL; occr = occr->next) |
| { |
| SET_BIT (comp[BLOCK_NUM (occr->insn)], indx); |
| |
| /* While we're scanning the table, this is a good place to |
| initialize this. */ |
| occr->copied_p = 0; |
| } |
| |
| /* While we're scanning the table, this is a good place to |
| initialize this. */ |
| expr->reaching_reg = 0; |
| } |
| } |
| } |
| |
| /* Register set information. |
| |
| `reg_set_table' records where each register is set or otherwise |
| modified. */ |
| |
| static struct obstack reg_set_obstack; |
| |
| static void |
| alloc_reg_set_mem (int n_regs) |
| { |
| reg_set_table_size = n_regs + REG_SET_TABLE_SLOP; |
| reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *)); |
| |
| gcc_obstack_init (®_set_obstack); |
| } |
| |
| static void |
| free_reg_set_mem (void) |
| { |
| free (reg_set_table); |
| obstack_free (®_set_obstack, NULL); |
| } |
| |
| /* An OLD_INSN that used to set REGNO was replaced by NEW_INSN. |
| Update the corresponding `reg_set_table' entry accordingly. |
| We assume that NEW_INSN is not already recorded in reg_set_table[regno]. */ |
| |
| static void |
| replace_one_set (int regno, rtx old_insn, rtx new_insn) |
| { |
| struct reg_set *reg_info; |
| if (regno >= reg_set_table_size) |
| return; |
| for (reg_info = reg_set_table[regno]; reg_info; reg_info = reg_info->next) |
| if (reg_info->insn == old_insn) |
| { |
| reg_info->insn = new_insn; |
| break; |
| } |
| } |
| |
| /* Record REGNO in the reg_set table. */ |
| |
| static void |
| record_one_set (int regno, rtx insn) |
| { |
| /* Allocate a new reg_set element and link it onto the list. */ |
| struct reg_set *new_reg_info; |
| |
| /* If the table isn't big enough, enlarge it. */ |
| if (regno >= reg_set_table_size) |
| { |
| int new_size = regno + REG_SET_TABLE_SLOP; |
| |
| reg_set_table = grealloc (reg_set_table, |
| new_size * sizeof (struct reg_set *)); |
| memset (reg_set_table + reg_set_table_size, 0, |
| (new_size - reg_set_table_size) * sizeof (struct reg_set *)); |
| reg_set_table_size = new_size; |
| } |
| |
| new_reg_info = obstack_alloc (®_set_obstack, sizeof (struct reg_set)); |
| bytes_used += sizeof (struct reg_set); |
| new_reg_info->insn = insn; |
| new_reg_info->next = reg_set_table[regno]; |
| reg_set_table[regno] = new_reg_info; |
| } |
| |
| /* Called from compute_sets via note_stores to handle one SET or CLOBBER in |
| an insn. The DATA is really the instruction in which the SET is |
| occurring. */ |
| |
| static void |
| record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) |
| { |
| rtx record_set_insn = (rtx) data; |
| |
| if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER) |
| record_one_set (REGNO (dest), record_set_insn); |
| } |
| |
| /* Scan the function and record each set of each pseudo-register. |
| |
| This is called once, at the start of the gcse pass. See the comments for |
| `reg_set_table' for further documentation. */ |
| |
| static void |
| compute_sets (rtx f) |
| { |
| rtx insn; |
| |
| for (insn = f; insn != 0; insn = NEXT_INSN (insn)) |
| if (INSN_P (insn)) |
| note_stores (PATTERN (insn), record_set_info, insn); |
| } |
| |
| /* Hash table support. */ |
| |
| struct reg_avail_info |
| { |
| basic_block last_bb; |
| int first_set; |
| int last_set; |
| }; |
| |
| static struct reg_avail_info *reg_avail_info; |
| static basic_block current_bb; |
| |
| |
| /* See whether X, the source of a set, is something we want to consider for |
| GCSE. */ |
| |
| static GTY(()) rtx test_insn; |
| static int |
| want_to_gcse_p (rtx x) |
| { |
| int num_clobbers = 0; |
| int icode; |
| |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| case SUBREG: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case CALL: |
| case CONSTANT_P_RTX: |
| return 0; |
| |
| default: |
| break; |
| } |
| |
| /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */ |
| if (general_operand (x, GET_MODE (x))) |
| return 1; |
| else if (GET_MODE (x) == VOIDmode) |
| return 0; |
| |
| /* Otherwise, check if we can make a valid insn from it. First initialize |
| our test insn if we haven't already. */ |
| if (test_insn == 0) |
| { |
| test_insn |
| = make_insn_raw (gen_rtx_SET (VOIDmode, |
| gen_rtx_REG (word_mode, |
| FIRST_PSEUDO_REGISTER * 2), |
| const0_rtx)); |
| NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0; |
| } |
| |
| /* Now make an insn like the one we would make when GCSE'ing and see if |
| valid. */ |
| PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x)); |
| SET_SRC (PATTERN (test_insn)) = x; |
| return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0 |
| && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode))); |
| } |
| |
| /* Return nonzero if the operands of expression X are unchanged from the |
| start of INSN's basic block up to but not including INSN (if AVAIL_P == 0), |
| or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */ |
| |
| static int |
| oprs_unchanged_p (rtx x, rtx insn, int avail_p) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *fmt; |
| |
| if (x == 0) |
| return 1; |
| |
| code = GET_CODE (x); |
| switch (code) |
| { |
| case REG: |
| { |
| struct reg_avail_info *info = ®_avail_info[REGNO (x)]; |
| |
| if (info->last_bb != current_bb) |
| return 1; |
| if (avail_p) |
| return info->last_set < INSN_CUID (insn); |
| else |
| return info->first_set >= INSN_CUID (insn); |
| } |
| |
| case MEM: |
| if (load_killed_in_block_p (current_bb, INSN_CUID (insn), |
| x, avail_p)) |
| return 0; |
| else |
| return oprs_unchanged_p (XEXP (x, 0), insn, avail_p); |
| |
| case PRE_DEC: |
| case PRE_INC: |
| case POST_DEC: |
| case POST_INC: |
| case PRE_MODIFY: |
| case POST_MODIFY: |
| return 0; |
| |
| case PC: |
| case CC0: /*FIXME*/ |
| case CONST: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| case ADDR_VEC: |
| case ADDR_DIFF_VEC: |
| return 1; |
| |
| default: |
| break; |
| } |
| |
| for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| /* If we are about to do the last recursive call needed at this |
| level, change it into iteration. This function is called enough |
| to be worth it. */ |
| if (i == 0) |
| return oprs_unchanged_p (XEXP (x, i), insn, avail_p); |
| |
| else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p)) |
| return 0; |
| } |
| else if (fmt[i] == 'E') |
| for (j = 0; j < XVECLEN (x, i); j++) |
| if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p)) |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| /* Used for communication between mems_conflict_for_gcse_p and |
| load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a |
| conflict between two memory references. */ |
| static int gcse_mems_conflict_p; |
| |
| /* Used for communication between mems_conflict_for_gcse_p and |
| load_killed_in_block_p. A memory reference for a load instruction, |
| mems_conflict_for_gcse_p will see if a memory store conflicts with |
| this memory load. */ |
| static rtx gcse_mem_operand; |
| |
| /* DEST is the output of an instruction. If it is a memory reference, and |
| possibly conflicts with the load found in gcse_mem_operand, then set |
| gcse_mems_conflict_p to a nonzero value. */ |
| |
| static void |
| mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED, |
| void *data ATTRIBUTE_UNUSED) |
| { |
| while (GET_CODE (dest) == SUBREG |
| || GET_CODE (dest) == ZERO_EXTRACT |
| || GET_CODE (dest) == SIGN_EXTRACT |
| || GET_CODE (dest) == STRICT_LOW_PART) |
| dest = XEXP (dest, 0); |
| |
| /* If DEST is not a MEM, then it will not conflict with the load. Note |
| that function calls are assumed to clobber memory, but are handled |
| elsewhere. */ |
| if (GET_CODE (dest) != MEM) |
| return; |
| |
| /* If we are setting a MEM in our list of specially recognized MEMs, |
| don't mark as killed this time. */ |
| |
| if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL) |
| { |
| if (!find_rtx_in_ldst (dest)) |
| gcse_mems_conflict_p = 1; |
| return; |
| } |
| |
| if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand, |
| rtx_addr_varies_p)) |
| gcse_mems_conflict_p = 1; |
| } |
| |
| /* Return nonzero if the expression in X (a memory reference) is killed |
| in block BB before or after the insn with the CUID in UID_LIMIT. |
| AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills |
| before UID_LIMIT. |
| |
| To check the entire block, set UID_LIMIT to max_uid + 1 and |
| AVAIL_P to 0. */ |
| |
| static int |
| load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p) |
| { |
| rtx list_entry = modify_mem_list[bb->index]; |
| while (list_entry) |
| { |
| rtx setter; |
| /* Ignore entries in the list that do not apply. */ |
| if ((avail_p |
| && INSN_CUID (XEXP (list_entry, 0)) < uid_limit) |
| || (! avail_p |
| && INSN_CUID (XEXP (list_entry, 0)) > uid_limit)) |
| { |
| list_entry = XEXP (list_entry, 1); |
| continue; |
| } |
| |
| setter = XEXP (list_entry, 0); |
| |
| /* If SETTER is a call everything is clobbered. Note that calls |
| to pure functions are never put on the list, so we need not |
| worry about them. */ |
| if (GET_CODE (setter) == CALL_INSN) |
| return 1; |
| |
| /* SETTER must be an INSN of some kind that sets memory. Call |
| note_stores to examine each hunk of memory that is modified. |
| |
| The note_stores interface is pretty limited, so we have to |
| communicate via global variables. Yuk. */ |
| gcse_mem_operand = x; |
| gcse_mems_conflict_p = 0; |
| note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL); |
| if (gcse_mems_conflict_p) |
| return 1; |
| list_entry = XEXP (list_entry, 1); |
| } |
| return 0; |
| } |
| |
| /* Return nonzero if the operands of expression X are unchanged from |
| the start of INSN's basic block up to but not including INSN. */ |
| |
| static int |
| oprs_anticipatable_p (rtx x, rtx insn) |
| { |
| return oprs_unchanged_p (x, insn, 0); |
| } |
| |
| /* Return nonzero if the operands of expression X are unchanged from |
| INSN to the end of INSN's basic block. */ |
| |
| static int |
| oprs_available_p (rtx x, rtx insn) |
| { |
| return oprs_unchanged_p (x, insn, 1); |
| } |
| |
| /* Hash expression X. |
| |
| MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean |
| indicating if a volatile operand is found or if the expression contains |
| something we don't want to insert in the table. HASH_TABLE_SIZE is |
| the current size of the hash table to be probed. |
| |
| ??? One might want to merge this with canon_hash. Later. */ |
| |
| static unsigned int |
| hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p, |
| int hash_table_size) |
| { |
| unsigned int hash; |
| |
| *do_not_record_p = 0; |
| |
| hash = hash_expr_1 (x, mode, do_not_record_p); |
| return hash % hash_table_size; |
| } |
| |
| /* Hash a string. Just add its bytes up. */ |
| |
| static inline unsigned |
| hash_string_1 (const char *ps) |
| { |
| unsigned hash = 0; |
| const unsigned char *p = (const unsigned char *) ps; |
| |
| if (p) |
| while (*p) |
| hash += *p++; |
| |
| return hash; |
| } |
| |
| /* Subroutine of hash_expr to do the actual work. */ |
| |
| static unsigned int |
| hash_expr_1 (rtx x, enum machine_mode mode, int *do_not_record_p) |
| { |
| int i, j; |
| unsigned hash = 0; |
| enum rtx_code code; |
| const char *fmt; |
| |
| /* Used to turn recursion into iteration. We can't rely on GCC's |
| tail-recursion elimination since we need to keep accumulating values |
| in HASH. */ |
| |
| if (x == 0) |
| return hash; |
| |
| repeat: |
| code = GET_CODE (x); |
| switch (code) |
| { |
| case REG: |
| hash += ((unsigned int) REG << 7) + REGNO (x); |
| return hash; |
| |
| case CONST_INT: |
| hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode |
| + (unsigned int) INTVAL (x)); |
| return hash; |
| |
| case CONST_DOUBLE: |
| /* This is like the general case, except that it only counts |
| the integers representing the constant. */ |
| hash += (unsigned int) code + (unsigned int) GET_MODE (x); |
| if (GET_MODE (x) != VOIDmode) |
| for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++) |
| hash += (unsigned int) XWINT (x, i); |
| else |
| hash += ((unsigned int) CONST_DOUBLE_LOW (x) |
| + (unsigned int) CONST_DOUBLE_HIGH (x)); |
| return hash; |
| |
| case CONST_VECTOR: |
| { |
| int units; |
| rtx elt; |
| |
| units = CONST_VECTOR_NUNITS (x); |
| |
| for (i = 0; i < units; ++i) |
| { |
| elt = CONST_VECTOR_ELT (x, i); |
| hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p); |
| } |
| |
| return hash; |
| } |
| |
| /* Assume there is only one rtx object for any given label. */ |
| case LABEL_REF: |
| /* We don't hash on the address of the CODE_LABEL to avoid bootstrap |
| differences and differences between each stage's debugging dumps. */ |
| hash += (((unsigned int) LABEL_REF << 7) |
| + CODE_LABEL_NUMBER (XEXP (x, 0))); |
| return hash; |
| |
| case SYMBOL_REF: |
| { |
| /* Don't hash on the symbol's address to avoid bootstrap differences. |
| Different hash values may cause expressions to be recorded in |
| different orders and thus different registers to be used in the |
| final assembler. This also avoids differences in the dump files |
| between various stages. */ |
| unsigned int h = 0; |
| const unsigned char *p = (const unsigned char *) XSTR (x, 0); |
| |
| while (*p) |
| h += (h << 7) + *p++; /* ??? revisit */ |
| |
| hash += ((unsigned int) SYMBOL_REF << 7) + h; |
| return hash; |
| } |
| |
| case MEM: |
| if (MEM_VOLATILE_P (x)) |
| { |
| *do_not_record_p = 1; |
| return 0; |
| } |
| |
| hash += (unsigned int) MEM; |
| /* We used alias set for hashing, but this is not good, since the alias |
| set may differ in -fprofile-arcs and -fbranch-probabilities compilation |
| causing the profiles to fail to match. */ |
| x = XEXP (x, 0); |
| goto repeat; |
| |
| case PRE_DEC: |
| case PRE_INC: |
| case POST_DEC: |
| case POST_INC: |
| case PC: |
| case CC0: |
| case CALL: |
| case UNSPEC_VOLATILE: |
| *do_not_record_p = 1; |
| return 0; |
| |
| case ASM_OPERANDS: |
| if (MEM_VOLATILE_P (x)) |
| { |
| *do_not_record_p = 1; |
| return 0; |
| } |
| else |
| { |
| /* We don't want to take the filename and line into account. */ |
| hash += (unsigned) code + (unsigned) GET_MODE (x) |
| + hash_string_1 (ASM_OPERANDS_TEMPLATE (x)) |
| + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x)) |
| + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x); |
| |
| if (ASM_OPERANDS_INPUT_LENGTH (x)) |
| { |
| for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) |
| { |
| hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i), |
| GET_MODE (ASM_OPERANDS_INPUT (x, i)), |
| do_not_record_p) |
| + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT |
| (x, i))); |
| } |
| |
| hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0)); |
| x = ASM_OPERANDS_INPUT (x, 0); |
| mode = GET_MODE (x); |
| goto repeat; |
| } |
| return hash; |
| } |
| |
| default: |
| break; |
| } |
| |
| hash += (unsigned) code + (unsigned) GET_MODE (x); |
| for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| /* If we are about to do the last recursive call |
| needed at this level, change it into iteration. |
| This function is called enough to be worth it. */ |
| if (i == 0) |
| { |
| x = XEXP (x, i); |
| goto repeat; |
| } |
| |
| hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p); |
| if (*do_not_record_p) |
| return 0; |
| } |
| |
| else if (fmt[i] == 'E') |
| for (j = 0; j < XVECLEN (x, i); j++) |
| { |
| hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p); |
| if (*do_not_record_p) |
| return 0; |
| } |
| |
| else if (fmt[i] == 's') |
| hash += hash_string_1 (XSTR (x, i)); |
| else if (fmt[i] == 'i') |
| hash += (unsigned int) XINT (x, i); |
| else |
| abort (); |
| } |
| |
| return hash; |
| } |
| |
| /* Hash a set of register REGNO. |
| |
| Sets are hashed on the register that is set. This simplifies the PRE copy |
| propagation code. |
| |
| ??? May need to make things more elaborate. Later, as necessary. */ |
| |
| static unsigned int |
| hash_set (int regno, int hash_table_size) |
| { |
| unsigned int hash; |
| |
| hash = regno; |
| return hash % hash_table_size; |
| } |
| |
| /* Return nonzero if exp1 is equivalent to exp2. |
| ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */ |
| |
| static int |
| expr_equiv_p (rtx x, rtx y) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *fmt; |
| |
| if (x == y) |
| return 1; |
| |
| if (x == 0 || y == 0) |
| return 0; |
| |
| code = GET_CODE (x); |
| if (code != GET_CODE (y)) |
| return 0; |
| |
| /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ |
| if (GET_MODE (x) != GET_MODE (y)) |
| return 0; |
| |
| switch (code) |
| { |
| case PC: |
| case CC0: |
| case CONST_INT: |
| return 0; |
| |
| case LABEL_REF: |
| return XEXP (x, 0) == XEXP (y, 0); |
| |
| case SYMBOL_REF: |
| return XSTR (x, 0) == XSTR (y, 0); |
| |
| case REG: |
| return REGNO (x) == REGNO (y); |
| |
| case MEM: |
| /* Can't merge two expressions in different alias sets, since we can |
| decide that the expression is transparent in a block when it isn't, |
| due to it being set with the different alias set. */ |
| if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) |
| return 0; |
| |
| /* A volatile mem should not be considered equivalent to any other. */ |
| if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) |
| return 0; |
| break; |
| |
| /* For commutative operations, check both orders. */ |
| case PLUS: |
| case MULT: |
| case AND: |
| case IOR: |
| case XOR: |
| case NE: |
| case EQ: |
| return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0)) |
| && expr_equiv_p (XEXP (x, 1), XEXP (y, 1))) |
| || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1)) |
| && expr_equiv_p (XEXP (x, 1), XEXP (y, 0)))); |
| |
| case ASM_OPERANDS: |
| /* We don't use the generic code below because we want to |
| disregard filename and line numbers. */ |
| |
| /* A volatile asm isn't equivalent to any other. */ |
| if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) |
| return 0; |
| |
| if (GET_MODE (x) != GET_MODE (y) |
| || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y)) |
| || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x), |
| ASM_OPERANDS_OUTPUT_CONSTRAINT (y)) |
| || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y) |
| || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y)) |
| return 0; |
| |
| if (ASM_OPERANDS_INPUT_LENGTH (x)) |
| { |
| for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) |
| if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i), |
| ASM_OPERANDS_INPUT (y, i)) |
| || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i), |
| ASM_OPERANDS_INPUT_CONSTRAINT (y, i))) |
| return 0; |
| } |
| |
| return 1; |
| |
| default: |
| break; |
| } |
| |
| /* Compare the elements. If any pair of corresponding elements |
| fail to match, return 0 for the whole thing. */ |
| |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| switch (fmt[i]) |
| { |
| case 'e': |
| if (! expr_equiv_p (XEXP (x, i), XEXP (y, i))) |
| return 0; |
| break; |
| |
| case 'E': |
| if (XVECLEN (x, i) != XVECLEN (y, i)) |
| return 0; |
| for (j = 0; j < XVECLEN (x, i); j++) |
| if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j))) |
| return 0; |
| break; |
| |
| case 's': |
| if (strcmp (XSTR (x, i), XSTR (y, i))) |
| return 0; |
| break; |
| |
| case 'i': |
| if (XINT (x, i) != XINT (y, i)) |
| return 0; |
| break; |
| |
| case 'w': |
| if (XWINT (x, i) != XWINT (y, i)) |
| return 0; |
| break; |
| |
| case '0': |
| break; |
| |
| default: |
| abort (); |
| } |
| } |
| |
| return 1; |
| } |
| |
| /* Insert expression X in INSN in the hash TABLE. |
| If it is already present, record it as the last occurrence in INSN's |
| basic block. |
| |
| MODE is the mode of the value X is being stored into. |
| It is only used if X is a CONST_INT. |
| |
| ANTIC_P is nonzero if X is an anticipatable expression. |
| AVAIL_P is nonzero if X is an available expression. */ |
| |
| static void |
| insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p, |
| int avail_p, struct hash_table *table) |
| { |
| int found, do_not_record_p; |
| unsigned int hash; |
| struct expr *cur_expr, *last_expr = NULL; |
| struct occr *antic_occr, *avail_occr; |
| struct occr *last_occr = NULL; |
| |
| hash = hash_expr (x, mode, &do_not_record_p, table->size); |
| |
| /* Do not insert expression in table if it contains volatile operands, |
| or if hash_expr determines the expression is something we don't want |
| to or can't handle. */ |
| if (do_not_record_p) |
| return; |
| |
| cur_expr = table->table[hash]; |
| found = 0; |
| |
| while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x))) |
| { |
| /* If the expression isn't found, save a pointer to the end of |
| the list. */ |
| last_expr = cur_expr; |
| cur_expr = cur_expr->next_same_hash; |
| } |
| |
| if (! found) |
| { |
| cur_expr = gcse_alloc (sizeof (struct expr)); |
| bytes_used += sizeof (struct expr); |
| if (table->table[hash] == NULL) |
| /* This is the first pattern that hashed to this index. */ |
| table->table[hash] = cur_expr; |
| else |
| /* Add EXPR to end of this hash chain. */ |
| last_expr->next_same_hash = cur_expr; |
| |
| /* Set the fields of the expr element. */ |
| cur_expr->expr = x; |
| cur_expr->bitmap_index = table->n_elems++; |
| cur_expr->next_same_hash = NULL; |
| cur_expr->antic_occr = NULL; |
| cur_expr->avail_occr = NULL; |
| } |
| |
| /* Now record the occurrence(s). */ |
| if (antic_p) |
| { |
| antic_occr = cur_expr->antic_occr; |
| |
| /* Search for another occurrence in the same basic block. */ |
| while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn)) |
| { |
| /* If an occurrence isn't found, save a pointer to the end of |
| the list. */ |
| last_occr = antic_occr; |
| antic_occr = antic_occr->next; |
| } |
| |
| if (antic_occr) |
| /* Found another instance of the expression in the same basic block. |
| Prefer the currently recorded one. We want the first one in the |
| block and the block is scanned from start to end. */ |
| ; /* nothing to do */ |
| else |
| { |
| /* First occurrence of this expression in this basic block. */ |
| antic_occr = gcse_alloc (sizeof (struct occr)); |
| bytes_used += sizeof (struct occr); |
| /* First occurrence of this expression in any block? */ |
| if (cur_expr->antic_occr == NULL) |
| cur_expr->antic_occr = antic_occr; |
| else |
| last_occr->next = antic_occr; |
| |
| antic_occr->insn = insn; |
| antic_occr->next = NULL; |
| } |
| } |
| |
| if (avail_p) |
| { |
| avail_occr = cur_expr->avail_occr; |
| |
| /* Search for another occurrence in the same basic block. */ |
| while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn)) |
| { |
| /* If an occurrence isn't found, save a pointer to the end of |
| the list. */ |
| last_occr = avail_occr; |
| avail_occr = avail_occr->next; |
| } |
| |
| if (avail_occr) |
| /* Found another instance of the expression in the same basic block. |
| Prefer this occurrence to the currently recorded one. We want |
| the last one in the block and the block is scanned from start |
| to end. */ |
| avail_occr->insn = insn; |
| else |
| { |
| /* First occurrence of this expression in this basic block. */ |
| avail_occr = gcse_alloc (sizeof (struct occr)); |
| bytes_used += sizeof (struct occr); |
| |
| /* First occurrence of this expression in any block? */ |
| if (cur_expr->avail_occr == NULL) |
| cur_expr->avail_occr = avail_occr; |
| else |
| last_occr->next = avail_occr; |
| |
| avail_occr->insn = insn; |
| avail_occr->next = NULL; |
| } |
| } |
| } |
| |
| /* Insert pattern X in INSN in the hash table. |
| X is a SET of a reg to either another reg or a constant. |
| If it is already present, record it as the last occurrence in INSN's |
| basic block. */ |
| |
| static void |
| insert_set_in_table (rtx x, rtx insn, struct hash_table *table) |
| { |
| int found; |
| unsigned int hash; |
| struct expr *cur_expr, *last_expr = NULL; |
| struct occr *cur_occr, *last_occr = NULL; |
| |
| if (GET_CODE (x) != SET |
| || GET_CODE (SET_DEST (x)) != REG) |
| abort (); |
| |
| hash = hash_set (REGNO (SET_DEST (x)), table->size); |
| |
| cur_expr = table->table[hash]; |
| found = 0; |
| |
| while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x))) |
| { |
| /* If the expression isn't found, save a pointer to the end of |
| the list. */ |
| last_expr = cur_expr; |
| cur_expr = cur_expr->next_same_hash; |
| } |
| |
| if (! found) |
| { |
| cur_expr = gcse_alloc (sizeof (struct expr)); |
| bytes_used += sizeof (struct expr); |
| if (table->table[hash] == NULL) |
| /* This is the first pattern that hashed to this index. */ |
| table->table[hash] = cur_expr; |
| else |
| /* Add EXPR to end of this hash chain. */ |
| last_expr->next_same_hash = cur_expr; |
| |
| /* Set the fields of the expr element. |
| We must copy X because it can be modified when copy propagation is |
| performed on its operands. */ |
| cur_expr->expr = copy_rtx (x); |
| cur_expr->bitmap_index = table->n_elems++; |
| cur_expr->next_same_hash = NULL; |
| cur_expr->antic_occr = NULL; |
| cur_expr->avail_occr = NULL; |
| } |
| |
| /* Now record the occurrence. */ |
| cur_occr = cur_expr->avail_occr; |
| |
| /* Search for another occurrence in the same basic block. */ |
| while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn)) |
| { |
| /* If an occurrence isn't found, save a pointer to the end of |
| the list. */ |
| last_occr = cur_occr; |
| cur_occr = cur_occr->next; |
| } |
| |
| if (cur_occr) |
| /* Found another instance of the expression in the same basic block. |
| Prefer this occurrence to the currently recorded one. We want the |
| last one in the block and the block is scanned from start to end. */ |
| cur_occr->insn = insn; |
| else |
| { |
| /* First occurrence of this expression in this basic block. */ |
| cur_occr = gcse_alloc (sizeof (struct occr)); |
| bytes_used += sizeof (struct occr); |
| |
| /* First occurrence of this expression in any block? */ |
| if (cur_expr->avail_occr == NULL) |
| cur_expr->avail_occr = cur_occr; |
| else |
| last_occr->next = cur_occr; |
| |
| cur_occr->insn = insn; |
| cur_occr->next = NULL; |
| } |
| } |
| |
| /* Determine whether the rtx X should be treated as a constant for |
| the purposes of GCSE's constant propagation. */ |
| |
| static bool |
| gcse_constant_p (rtx x) |
| { |
| /* Consider a COMPARE of two integers constant. */ |
| if (GET_CODE (x) == COMPARE |
| && GET_CODE (XEXP (x, 0)) == CONST_INT |
| && GET_CODE (XEXP (x, 1)) == CONST_INT) |
| return true; |
| |
| |
| /* Consider a COMPARE of the same registers is a constant |
| if they are not floating point registers. */ |
| if (GET_CODE(x) == COMPARE |
| && GET_CODE (XEXP (x, 0)) == REG |
| && GET_CODE (XEXP (x, 1)) == REG |
| && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1)) |
| && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0))) |
| && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1)))) |
| return true; |
| |
| if (GET_CODE (x) == CONSTANT_P_RTX) |
| return false; |
| |
| return CONSTANT_P (x); |
| } |
| |
| /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or |
| expression one). */ |
| |
| static void |
| hash_scan_set (rtx pat, rtx insn, struct hash_table *table) |
| { |
| rtx src = SET_SRC (pat); |
| rtx dest = SET_DEST (pat); |
| rtx note; |
| |
| if (GET_CODE (src) == CALL) |
| hash_scan_call (src, insn, table); |
| |
| else if (GET_CODE (dest) == REG) |
| { |
| unsigned int regno = REGNO (dest); |
| rtx tmp; |
| |
| /* If this is a single set and we are doing constant propagation, |
| see if a REG_NOTE shows this equivalent to a constant. */ |
| if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0 |
| && gcse_constant_p (XEXP (note, 0))) |
| src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src); |
| |
| /* Only record sets of pseudo-regs in the hash table. */ |
| if (! table->set_p |
| && regno >= FIRST_PSEUDO_REGISTER |
| /* Don't GCSE something if we can't do a reg/reg copy. */ |
| && can_copy_p (GET_MODE (dest)) |
| /* GCSE commonly inserts instruction after the insn. We can't |
| do that easily for EH_REGION notes so disable GCSE on these |
| for now. */ |
| && !find_reg_note (insn, REG_EH_REGION, NULL_RTX) |
| /* Is SET_SRC something we want to gcse? */ |
| && want_to_gcse_p (src) |
| /* Don't CSE a nop. */ |
| && ! set_noop_p (pat) |
| /* Don't GCSE if it has attached REG_EQUIV note. |
| At this point this only function parameters should have |
| REG_EQUIV notes and if the argument slot is used somewhere |
| explicitly, it means address of parameter has been taken, |
| so we should not extend the lifetime of the pseudo. */ |
| && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0 |
| || GET_CODE (XEXP (note, 0)) != MEM)) |
| { |
| /* An expression is not anticipatable if its operands are |
| modified before this insn or if this is not the only SET in |
| this insn. */ |
| int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn); |
| /* An expression is not available if its operands are |
| subsequently modified, including this insn. It's also not |
| available if this is a branch, because we can't insert |
| a set after the branch. */ |
| int avail_p = (oprs_available_p (src, insn) |
| && ! JUMP_P (insn)); |
| |
| insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table); |
| } |
| |
| /* Record sets for constant/copy propagation. */ |
| else if (table->set_p |
| && regno >= FIRST_PSEUDO_REGISTER |
| && ((GET_CODE (src) == REG |
| && REGNO (src) >= FIRST_PSEUDO_REGISTER |
| && can_copy_p (GET_MODE (dest)) |
| && REGNO (src) != regno) |
| || gcse_constant_p (src)) |
| /* A copy is not available if its src or dest is subsequently |
| modified. Here we want to search from INSN+1 on, but |
| oprs_available_p searches from INSN on. */ |
| && (insn == BB_END (BLOCK_FOR_INSN (insn)) |
| || ((tmp = next_nonnote_insn (insn)) != NULL_RTX |
| && oprs_available_p (pat, tmp)))) |
| insert_set_in_table (pat, insn, table); |
| } |
| /* In case of store we want to consider the memory value as available in |
| the REG stored in that memory. This makes it possible to remove |
| redundant loads from due to stores to the same location. */ |
| else if (flag_gcse_las && GET_CODE (src) == REG && GET_CODE (dest) == MEM) |
| { |
| unsigned int regno = REGNO (src); |
| |
| /* Do not do this for constant/copy propagation. */ |
| if (! table->set_p |
| /* Only record sets of pseudo-regs in the hash table. */ |
| && regno >= FIRST_PSEUDO_REGISTER |
| /* Don't GCSE something if we can't do a reg/reg copy. */ |
| && can_copy_p (GET_MODE (src)) |
| /* GCSE commonly inserts instruction after the insn. We can't |
| do that easily for EH_REGION notes so disable GCSE on these |
| for now. */ |
| && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX) |
| /* Is SET_DEST something we want to gcse? */ |
| && want_to_gcse_p (dest) |
| /* Don't CSE a nop. */ |
| && ! set_noop_p (pat) |
| /* Don't GCSE if it has attached REG_EQUIV note. |
| At this point this only function parameters should have |
| REG_EQUIV notes and if the argument slot is used somewhere |
| explicitly, it means address of parameter has been taken, |
| so we should not extend the lifetime of the pseudo. */ |
| && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0 |
| || GET_CODE (XEXP (note, 0)) != MEM)) |
| { |
| /* Stores are never anticipatable. */ |
| int antic_p = 0; |
| /* An expression is not available if its operands are |
| subsequently modified, including this insn. It's also not |
| available if this is a branch, because we can't insert |
| a set after the branch. */ |
| int avail_p = oprs_available_p (dest, insn) |
| && ! JUMP_P (insn); |
| |
| /* Record the memory expression (DEST) in the hash table. */ |
| insert_expr_in_table (dest, GET_MODE (dest), insn, |
| antic_p, avail_p, table); |
| } |
| } |
| } |
| |
| static void |
| hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, |
| struct hash_table *table ATTRIBUTE_UNUSED) |
| { |
| /* Currently nothing to do. */ |
| } |
| |
| static void |
| hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, |
| struct hash_table *table ATTRIBUTE_UNUSED) |
| { |
| /* Currently nothing to do. */ |
| } |
| |
| /* Process INSN and add hash table entries as appropriate. |
| |
| Only available expressions that set a single pseudo-reg are recorded. |
| |
| Single sets in a PARALLEL could be handled, but it's an extra complication |
| that isn't dealt with right now. The trick is handling the CLOBBERs that |
| are also in the PARALLEL. Later. |
| |
| If SET_P is nonzero, this is for the assignment hash table, |
| otherwise it is for the expression hash table. |
| If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should |
| not record any expressions. */ |
| |
| static void |
| hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block) |
| { |
| rtx pat = PATTERN (insn); |
| int i; |
| |
| if (in_libcall_block) |
| return; |
| |
| /* Pick out the sets of INSN and for other forms of instructions record |
| what's been modified. */ |
| |
| if (GET_CODE (pat) == SET) |
| hash_scan_set (pat, insn, table); |
| else if (GET_CODE (pat) == PARALLEL) |
| for (i = 0; i < XVECLEN (pat, 0); i++) |
| { |
| rtx x = XVECEXP (pat, 0, i); |
| |
| if (GET_CODE (x) == SET) |
| hash_scan_set (x, insn, table); |
| else if (GET_CODE (x) == CLOBBER) |
| hash_scan_clobber (x, insn, table); |
| else if (GET_CODE (x) == CALL) |
| hash_scan_call (x, insn, table); |
| } |
| |
| else if (GET_CODE (pat) == CLOBBER) |
| hash_scan_clobber (pat, insn, table); |
| else if (GET_CODE (pat) == CALL) |
| hash_scan_call (pat, insn, table); |
| } |
| |
| static void |
| dump_hash_table (FILE *file, const char *name, struct hash_table *table) |
| { |
| int i; |
| /* Flattened out table, so it's printed in proper order. */ |
| struct expr **flat_table; |
| unsigned int *hash_val; |
| struct expr *expr; |
| |
| flat_table = xcalloc (table->n_elems, sizeof (struct expr *)); |
| hash_val = xmalloc (table->n_elems * sizeof (unsigned int)); |
| |
| for (i = 0; i < (int) table->size; i++) |
| for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash) |
| { |
| flat_table[expr->bitmap_index] = expr; |
| hash_val[expr->bitmap_index] = i; |
| } |
| |
| fprintf (file, "%s hash table (%d buckets, %d entries)\n", |
| name, table->size, table->n_elems); |
| |
| for (i = 0; i < (int) table->n_elems; i++) |
| if (flat_table[i] != 0) |
| { |
| expr = flat_table[i]; |
| fprintf (file, "Index %d (hash value %d)\n ", |
| expr->bitmap_index, hash_val[i]); |
| print_rtl (file, expr->expr); |
| fprintf (file, "\n"); |
| } |
| |
| fprintf (file, "\n"); |
| |
| free (flat_table); |
| free (hash_val); |
| } |
| |
| /* Record register first/last/block set information for REGNO in INSN. |
| |
| first_set records the first place in the block where the register |
| is set and is used to compute "anticipatability". |
| |
| last_set records the last place in the block where the register |
| is set and is used to compute "availability". |
| |
| last_bb records the block for which first_set and last_set are |
| valid, as a quick test to invalidate them. |
| |
| reg_set_in_block records whether the register is set in the block |
| and is used to compute "transparency". */ |
| |
| static void |
| record_last_reg_set_info (rtx insn, int regno) |
| { |
| struct reg_avail_info *info = ®_avail_info[regno]; |
| int cuid = INSN_CUID (insn); |
| |
| info->last_set = cuid; |
| if (info->last_bb != current_bb) |
| { |
| info->last_bb = current_bb; |
| info->first_set = cuid; |
| SET_BIT (reg_set_in_block[current_bb->index], regno); |
| } |
| } |
| |
| |
| /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn. |
| Note we store a pair of elements in the list, so they have to be |
| taken off pairwise. */ |
| |
| static void |
| canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED, |
| void * v_insn) |
| { |
| rtx dest_addr, insn; |
| int bb; |
| |
| while (GET_CODE (dest) == SUBREG |
| || GET_CODE (dest) == ZERO_EXTRACT |
| || GET_CODE (dest) == SIGN_EXTRACT |
| || GET_CODE (dest) == STRICT_LOW_PART) |
| dest = XEXP (dest, 0); |
| |
| /* If DEST is not a MEM, then it will not conflict with a load. Note |
| that function calls are assumed to clobber memory, but are handled |
| elsewhere. */ |
| |
| if (GET_CODE (dest) != MEM) |
| return; |
| |
| dest_addr = get_addr (XEXP (dest, 0)); |
| dest_addr = canon_rtx (dest_addr); |
| insn = (rtx) v_insn; |
| bb = BLOCK_NUM (insn); |
| |
| canon_modify_mem_list[bb] = |
| alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]); |
| canon_modify_mem_list[bb] = |
| alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]); |
| bitmap_set_bit (canon_modify_mem_list_set, bb); |
| } |
| |
| /* Record memory modification information for INSN. We do not actually care |
| about the memory location(s) that are set, or even how they are set (consider |
| a CALL_INSN). We merely need to record which insns modify memory. */ |
| |
| static void |
| record_last_mem_set_info (rtx insn) |
| { |
| int bb = BLOCK_NUM (insn); |
| |
| /* load_killed_in_block_p will handle the case of calls clobbering |
| everything. */ |
| modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]); |
| bitmap_set_bit (modify_mem_list_set, bb); |
| |
| if (GET_CODE (insn) == CALL_INSN) |
| { |
| /* Note that traversals of this loop (other than for free-ing) |
| will break after encountering a CALL_INSN. So, there's no |
| need to insert a pair of items, as canon_list_insert does. */ |
| canon_modify_mem_list[bb] = |
| alloc_INSN_LIST (insn, canon_modify_mem_list[bb]); |
| bitmap_set_bit (canon_modify_mem_list_set, bb); |
| } |
| else |
| note_stores (PATTERN (insn), canon_list_insert, (void*) insn); |
| } |
| |
| /* Called from compute_hash_table via note_stores to handle one |
| SET or CLOBBER in an insn. DATA is really the instruction in which |
| the SET is taking place. */ |
| |
| static void |
| record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) |
| { |
| rtx last_set_insn = (rtx) data; |
| |
| if (GET_CODE (dest) == SUBREG) |
| dest = SUBREG_REG (dest); |
| |
| if (GET_CODE (dest) == REG) |
| record_last_reg_set_info (last_set_insn, REGNO (dest)); |
| else if (GET_CODE (dest) == MEM |
| /* Ignore pushes, they clobber nothing. */ |
| && ! push_operand (dest, GET_MODE (dest))) |
| record_last_mem_set_info (last_set_insn); |
| } |
| |
| /* Top level function to create an expression or assignment hash table. |
| |
| Expression entries are placed in the hash table if |
| - they are of the form (set (pseudo-reg) src), |
| - src is something we want to perform GCSE on, |
| - none of the operands are subsequently modified in the block |
| |
| Assignment entries are placed in the hash table if |
| - they are of the form (set (pseudo-reg) src), |
| - src is something we want to perform const/copy propagation on, |
| - none of the operands or target are subsequently modified in the block |
| |
| Currently src must be a pseudo-reg or a const_int. |
| |
| TABLE is the table computed. */ |
| |
| static void |
| compute_hash_table_work (struct hash_table *table) |
| { |
| unsigned int i; |
| |
| /* While we compute the hash table we also compute a bit array of which |
| registers are set in which blocks. |
| ??? This isn't needed during const/copy propagation, but it's cheap to |
| compute. Later. */ |
| sbitmap_vector_zero (reg_set_in_block, last_basic_block); |
| |
| /* re-Cache any INSN_LIST nodes we have allocated. */ |
| clear_modify_mem_tables (); |
| /* Some working arrays used to track first and last set in each block. */ |
| reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info)); |
| |
| for (i = 0; i < max_gcse_regno; ++i) |
| reg_avail_info[i].last_bb = NULL; |
| |
| FOR_EACH_BB (current_bb) |
| { |
| rtx insn; |
| unsigned int regno; |
| int in_libcall_block; |
| |
| /* First pass over the instructions records information used to |
| determine when registers and memory are first and last set. |
| ??? hard-reg reg_set_in_block computation |
| could be moved to compute_sets since they currently don't change. */ |
| |
| for (insn = BB_HEAD (current_bb); |
| insn && insn != NEXT_INSN (BB_END (current_bb)); |
| insn = NEXT_INSN (insn)) |
| { |
| if (! INSN_P (insn)) |
| continue; |
| |
| if (GET_CODE (insn) == CALL_INSN) |
| { |
| bool clobbers_all = false; |
| #ifdef NON_SAVING_SETJMP |
| if (NON_SAVING_SETJMP |
| && find_reg_note (insn, REG_SETJMP, NULL_RTX)) |
| clobbers_all = true; |
| #endif |
| |
| for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
| if (clobbers_all |
| || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) |
| record_last_reg_set_info (insn, regno); |
| |
| mark_call (insn); |
| } |
| |
| note_stores (PATTERN (insn), record_last_set_info, insn); |
| } |
| |
| /* Insert implicit sets in the hash table. */ |
| if (table->set_p |
| && implicit_sets[current_bb->index] != NULL_RTX) |
| hash_scan_set (implicit_sets[current_bb->index], |
| BB_HEAD (current_bb), table); |
| |
| /* The next pass builds the hash table. */ |
| |
| for (insn = BB_HEAD (current_bb), in_libcall_block = 0; |
| insn && insn != NEXT_INSN (BB_END (current_bb)); |
| insn = NEXT_INSN (insn)) |
| if (INSN_P (insn)) |
| { |
| if (find_reg_note (insn, REG_LIBCALL, NULL_RTX)) |
| in_libcall_block = 1; |
| else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX)) |
| in_libcall_block = 0; |
| hash_scan_insn (insn, table, in_libcall_block); |
| if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX)) |
| in_libcall_block = 0; |
| } |
| } |
| |
| free (reg_avail_info); |
| reg_avail_info = NULL; |
| } |
| |
| /* Allocate space for the set/expr hash TABLE. |
| N_INSNS is the number of instructions in the function. |
| It is used to determine the number of buckets to use. |
| SET_P determines whether set or expression table will |
| be created. */ |
| |
| static void |
| alloc_hash_table (int n_insns, struct hash_table *table, int set_p) |
| { |
| int n; |
| |
| table->size = n_insns / 4; |
| if (table->size < 11) |
| table->size = 11; |
| |
| /* Attempt to maintain efficient use of hash table. |
| Making it an odd number is simplest for now. |
| ??? Later take some measurements. */ |
| table->size |= 1; |
| n = table->size * sizeof (struct expr *); |
| table->table = gmalloc (n); |
| table->set_p = set_p; |
| } |
| |
| /* Free things allocated by alloc_hash_table. */ |
| |
| static void |
| free_hash_table (struct hash_table *table) |
| { |
| free (table->table); |
| } |
| |
| /* Compute the hash TABLE for doing copy/const propagation or |
| expression hash table. */ |
| |
| static void |
| compute_hash_table (struct hash_table *table) |
| { |
| /* Initialize count of number of entries in hash table. */ |
| table->n_elems = 0; |
| memset (table->table, 0, table->size * sizeof (struct expr *)); |
| |
| compute_hash_table_work (table); |
| } |
| |
| /* Expression tracking support. */ |
| |
| /* Lookup pattern PAT in the expression TABLE. |
| The result is a pointer to the table entry, or NULL if not found. */ |
| |
| static struct expr * |
| lookup_expr (rtx pat, struct hash_table *table) |
| { |
| int do_not_record_p; |
| unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p, |
| table->size); |
| struct expr *expr; |
| |
| if (do_not_record_p) |
| return NULL; |
| |
| expr = table->table[hash]; |
| |
| while (expr && ! expr_equiv_p (expr->expr, pat)) |
| expr = expr->next_same_hash; |
| |
| return expr; |
| } |
| |
| /* Lookup REGNO in the set TABLE. The result is a pointer to the |
| table entry, or NULL if not found. */ |
| |
| static struct expr * |
| lookup_set (unsigned int regno, struct hash_table *table) |
| { |
| unsigned int hash = hash_set (regno, table->size); |
| struct expr *expr; |
| |
| expr = table->table[hash]; |
| |
| while (expr && REGNO (SET_DEST (expr->expr)) != regno) |
| expr = expr->next_same_hash; |
| |
| return expr; |
| } |
| |
| /* Return the next entry for REGNO in list EXPR. */ |
| |
| static struct expr * |
| next_set (unsigned int regno, struct expr *expr) |
| { |
| do |
| expr = expr->next_same_hash; |
| while (expr && REGNO (SET_DEST (expr->expr)) != regno); |
| |
| return expr; |
| } |
| |
| /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node |
| types may be mixed. */ |
| |
| static void |
| free_insn_expr_list_list (rtx *listp) |
| { |
| rtx list, next; |
| |
| for (list = *listp; list ; list = next) |
| { |
| next = XEXP (list, 1); |
| if (GET_CODE (list) == EXPR_LIST) |
| free_EXPR_LIST_node (list); |
| else |
| free_INSN_LIST_node (list); |
| } |
| |
| *listp = NULL; |
| } |
| |
| /* Clear canon_modify_mem_list and modify_mem_list tables. */ |
| static void |
| clear_modify_mem_tables (void) |
| { |
| int i; |
| |
| EXECUTE_IF_SET_IN_BITMAP |
| (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i)); |
| bitmap_clear (modify_mem_list_set); |
| |
| EXECUTE_IF_SET_IN_BITMAP |
| (canon_modify_mem_list_set, 0, i, |
| free_insn_expr_list_list (canon_modify_mem_list + i)); |
| bitmap_clear (canon_modify_mem_list_set); |
| } |
| |
| /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */ |
| |
| static void |
| free_modify_mem_tables (void) |
| { |
| clear_modify_mem_tables (); |
| free (modify_mem_list); |
| free (canon_modify_mem_list); |
| modify_mem_list = 0; |
| canon_modify_mem_list = 0; |
| } |
| |
| /* Reset tables used to keep track of what's still available [since the |
| start of the block]. */ |
| |
| static void |
| reset_opr_set_tables (void) |
| { |
| /* Maintain a bitmap of which regs have been set since beginning of |
| the block. */ |
| CLEAR_REG_SET (reg_set_bitmap); |
| |
| /* Also keep a record of the last instruction to modify memory. |
| For now this is very trivial, we only record whether any memory |
| location has been modified. */ |
| clear_modify_mem_tables (); |
| } |
| |
| /* Return nonzero if the operands of X are not set before INSN in |
| INSN's basic block. */ |
| |
| static int |
| oprs_not_set_p (rtx x, rtx insn) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *fmt; |
| |
| if (x == 0) |
| return 1; |
| |
| code = GET_CODE (x); |
| switch (code) |
| { |
| case PC: |
| case CC0: |
| case CONST: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| case ADDR_VEC: |
| case ADDR_DIFF_VEC: |
| return 1; |
| |
| case MEM: |
| if (load_killed_in_block_p (BLOCK_FOR_INSN (insn), |
| INSN_CUID (insn), x, 0)) |
| return 0; |
| else |
| return oprs_not_set_p (XEXP (x, 0), insn); |
| |
| case REG: |
| return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x)); |
| |
| default: |
| break; |
| } |
| |
| for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| /* If we are about to do the last recursive call |
| needed at this level, change it into iteration. |
| This function is called enough to be worth it. */ |
| if (i == 0) |
| return oprs_not_set_p (XEXP (x, i), insn); |
| |
| if (! oprs_not_set_p (XEXP (x, i), insn)) |
| return 0; |
| } |
| else if (fmt[i] == 'E') |
| for (j = 0; j < XVECLEN (x, i); j++) |
| if (! oprs_not_set_p (XVECEXP (x, i, j), insn)) |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| /* Mark things set by a CALL. */ |
| |
| static void |
| mark_call (rtx insn) |
| { |
| if (! CONST_OR_PURE_CALL_P (insn)) |
| record_last_mem_set_info (insn); |
| } |
| |
| /* Mark things set by a SET. */ |
| |
| static void |
| mark_set (rtx pat, rtx insn) |
| { |
| rtx dest = SET_DEST (pat); |
| |
| while (GET_CODE (dest) == SUBREG |
| || GET_CODE (dest) == ZERO_EXTRACT |
| || GET_CODE (dest) == SIGN_EXTRACT |
| || GET_CODE (dest) == STRICT_LOW_PART) |
| dest = XEXP (dest, 0); |
| |
| if (GET_CODE (dest) == REG) |
| SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest)); |
| else if (GET_CODE (dest) == MEM) |
| record_last_mem_set_info (insn); |
| |
| if (GET_CODE (SET_SRC (pat)) == CALL) |
| mark_call (insn); |
| } |
| |
| /* Record things set by a CLOBBER. */ |
| |
| static void |
| mark_clobber (rtx pat, rtx insn) |
| { |
| rtx clob = XEXP (pat, 0); |
| |
| while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART) |
| clob = XEXP (clob, 0); |
| |
| if (GET_CODE (clob) == REG) |
| SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob)); |
| else |
| record_last_mem_set_info (insn); |
| } |
| |
| /* Record things set by INSN. |
| This data is used by oprs_not_set_p. */ |
| |
| static void |
| mark_oprs_set (rtx insn) |
| { |
| rtx pat = PATTERN (insn); |
| int i; |
| |
| if (GET_CODE (pat) == SET) |
| mark_set (pat, insn); |
| else if (GET_CODE (pat) == PARALLEL) |
| for (i = 0; i < XVECLEN (pat, 0); i++) |
| { |
| rtx x = XVECEXP (pat, 0, i); |
| |
| if (GET_CODE (x) == SET) |
| mark_set (x, insn); |
| else if (GET_CODE (x) == CLOBBER) |
| mark_clobber (x, insn); |
| else if (GET_CODE (x) == CALL) |
| mark_call (insn); |
| } |
| |
| else if (GET_CODE (pat) == CLOBBER) |
| mark_clobber (pat, insn); |
| else if (GET_CODE (pat) == CALL) |
| mark_call (insn); |
| } |
| |
| |
| /* Classic GCSE reaching definition support. */ |
| |
| /* Allocate reaching def variables. */ |
| |
| static void |
| alloc_rd_mem (int n_blocks, int n_insns) |
| { |
| rd_kill = sbitmap_vector_alloc (n_blocks, n_insns); |
| sbitmap_vector_zero (rd_kill, n_blocks); |
| |
| rd_gen = sbitmap_vector_alloc (n_blocks, n_insns); |
| sbitmap_vector_zero (rd_gen, n_blocks); |
| |
| reaching_defs = sbitmap_vector_alloc (n_blocks, n_insns); |
| sbitmap_vector_zero (reaching_defs, n_blocks); |
| |
| rd_out = sbitmap_vector_alloc (n_blocks, n_insns); |
| sbitmap_vector_zero (rd_out, n_blocks); |
| } |
| |
| /* Free reaching def variables. */ |
| |
| static void |
| free_rd_mem (void) |
| { |
| sbitmap_vector_free (rd_kill); |
| sbitmap_vector_free (rd_gen); |
| sbitmap_vector_free (reaching_defs); |
| sbitmap_vector_free (rd_out); |
| } |
| |
| /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */ |
| |
| static void |
| handle_rd_kill_set (rtx insn, int regno, basic_block bb) |
| { |
| struct reg_set *this_reg; |
| |
| for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next) |
| if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn)) |
| SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn)); |
| } |
| |
| /* Compute the set of kill's for reaching definitions. */ |
| |
| static void |
| compute_kill_rd (void) |
| { |
| int cuid; |
| unsigned int regno; |
| int i; |
| basic_block bb; |
| |
| /* For each block |
| For each set bit in `gen' of the block (i.e each insn which |
| generates a definition in the block) |
| Call the reg set by the insn corresponding to that bit regx |
| Look at the linked list starting at reg_set_table[regx] |
| For each setting of regx in the linked list, which is not in |
| this block |
| Set the bit in `kill' corresponding to that insn. */ |
| FOR_EACH_BB (bb) |
| for (cuid = 0; cuid < max_cuid; cuid++) |
| if (TEST_BIT (rd_gen[bb->index], cuid)) |
| { |
| rtx insn = CUID_INSN (cuid); |
| rtx pat = PATTERN (insn); |
| |
| if (GET_CODE (insn) == CALL_INSN) |
| { |
| for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
| if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) |
| handle_rd_kill_set (insn, regno, bb); |
| } |
| |
| if (GET_CODE (pat) == PARALLEL) |
| { |
| for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) |
| { |
| enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i)); |
| |
| if ((code == SET || code == CLOBBER) |
| && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG) |
| handle_rd_kill_set (insn, |
| REGNO (XEXP (XVECEXP (pat, 0, i), 0)), |
| bb); |
| } |
| } |
| else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG) |
| /* Each setting of this register outside of this block |
| must be marked in the set of kills in this block. */ |
| handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb); |
| } |
| } |
| |
| /* Compute the reaching definitions as in |
| Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman, |
| Chapter 10. It is the same algorithm as used for computing available |
| expressions but applied to the gens and kills of reaching definitions. */ |
| |
| static void |
| compute_rd (void) |
| { |
| int changed, passes; |
| basic_block bb; |
| |
| FOR_EACH_BB (bb) |
| sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/); |
| |
| passes = 0; |
| changed = 1; |
| while (changed) |
| { |
| changed = 0; |
| FOR_EACH_BB (bb) |
| { |
| sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index); |
| changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index], |
| reaching_defs[bb->index], rd_kill[bb->index]); |
| } |
| passes++; |
| } |
| |
| if (gcse_file) |
| fprintf (gcse_file, "reaching def computation: %d passes\n", passes); |
| } |
| |
| /* Classic GCSE available expression support. */ |
| |
| /* Allocate memory for available expression computation. */ |
| |
| static void |
| alloc_avail_expr_mem (int n_blocks, int n_exprs) |
| { |
| ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs); |
| sbitmap_vector_zero (ae_kill, n_blocks); |
| |
| ae_gen = sbitmap_vector_alloc (n_blocks, n_exprs); |
| sbitmap_vector_zero (ae_gen, n_blocks); |
| |
| ae_in = sbitmap_vector_alloc (n_blocks, n_exprs); |
| sbitmap_vector_zero (ae_in, n_blocks); |
| |
| ae_out = sbitmap_vector_alloc (n_blocks, n_exprs); |
| sbitmap_vector_zero (ae_out, n_blocks); |
| } |
| |
| static void |
| free_avail_expr_mem (void) |
| { |
| sbitmap_vector_free (ae_kill); |
| sbitmap_vector_free (ae_gen); |
| sbitmap_vector_free (ae_in); |
| sbitmap_vector_free (ae_out); |
| } |
| |
| /* Compute the set of available expressions generated in each basic block. */ |
| |
| static void |
| compute_ae_gen (struct hash_table *expr_hash_table) |
| { |
| unsigned int i; |
| struct expr *expr; |
| struct occr *occr; |
| |
| /* For each recorded occurrence of each expression, set ae_gen[bb][expr]. |
| This is all we have to do because an expression is not recorded if it |
| is not available, and the only expressions we want to work with are the |
| ones that are recorded. */ |
| for (i = 0; i < expr_hash_table->size; i++) |
| for (expr = expr_hash_table->table[i]; expr != 0; expr = expr->next_same_hash) |
| for (occr = expr->avail_occr; occr != 0; occr = occr->next) |
| SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index); |
| } |
| |
| /* Return nonzero if expression X is killed in BB. */ |
| |
| static int |
| expr_killed_p (rtx x, basic_block bb) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *fmt; |
| |
| if (x == 0) |
| return 1; |
| |
| code = GET_CODE (x); |
| switch (code) |
| { |
| case REG: |
| return TEST_BIT (reg_set_in_block[bb->index], REGNO (x)); |
| |
| case MEM: |
| if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0)) |
| return 1; |
| else |
| return expr_killed_p (XEXP (x, 0), bb); |
| |
| case PC: |
| case CC0: /*FIXME*/ |
| case CONST: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| case ADDR_VEC: |
| case ADDR_DIFF_VEC: |
| return 0; |
| |
| default: |
| break; |
| } |
| |
| for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| { |
| /* If we are about to do the last recursive call |
| needed at this level, change it into iteration. |
| This function is called enough to be worth it. */ |
| if (i == 0) |
| return expr_killed_p (XEXP (x, i), bb); |
| else if (expr_killed_p (XEXP (x, i), bb)) |
| return 1; |
| } |
| else if (fmt[i] == 'E') |
| for (j = 0; j < XVECLEN (x, i); j++) |
| if (expr_killed_p (XVECEXP (x, i, j), bb)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Compute the set of available expressions killed in each basic block. */ |
| |
| static void |
| compute_ae_kill (sbitmap *ae_gen, sbitmap *ae_kill, |
| struct hash_table *expr_hash_table) |
| { |
| basic_block bb; |
| unsigned int i; |
| struct expr *expr; |
| |
| FOR_EACH_BB (bb) |
| for (i = 0; i < expr_hash_table->size; i++) |
| for (expr = expr_hash_table->table[i]; expr; expr = expr->next_same_hash) |
| { |
| /* Skip EXPR if generated in this block. */ |
| if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index)) |
| continue; |
| |
| if (expr_killed_p (expr->expr, bb)) |
| SET_BIT (ae_kill[bb->index], expr->bitmap_index); |
| } |
| } |
| |
| /* Actually perform the Classic GCSE optimizations. */ |
| |
| /* Return nonzero if occurrence OCCR of expression EXPR reaches block BB. |
| |
| CHECK_SELF_LOOP is nonzero if we should consider a block reaching itself |
| as a positive reach. We want to do this when there are two computations |
| of the expression in the block. |
| |
| VISITED is a pointer to a working buffer for tracking which BB's have |
| been visited. It is NULL for the top-level call. |
| |
| We treat reaching expressions that go through blocks containing the same |
| reaching expression as "not reaching". E.g. if EXPR is generated in blocks |
| 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block |
| 2 as not reaching. The intent is to improve the probability of finding |
| only one reaching expression and to reduce register lifetimes by picking |
| the closest such expression. */ |
| |
| static int |
| expr_reaches_here_p_work (struct occr *occr, struct expr *expr, |
| basic_block bb, int check_self_loop, char *visited) |
| { |
| edge pred; |
| |
| for (pred = bb->pred; pred != NULL; pred = pred->pred_next) |
| { |
| basic_block pred_bb = pred->src; |
| |
| if (visited[pred_bb->index]) |
| /* This predecessor has already been visited. Nothing to do. */ |
| ; |
| else if (pred_bb == bb) |
| { |
| /* BB loops on itself. */ |
| if (check_self_loop |
| && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index) |
| && BLOCK_NUM (occr->insn) == pred_bb->index) |
| return 1; |
| |
| visited[pred_bb->index] = 1; |
| } |
| |
| /* Ignore this predecessor if it kills the expression. */ |
| else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index)) |
| visited[pred_bb->index] = 1; |
| |
| /* Does this predecessor generate this expression? */ |
| else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)) |
| { |
| /* Is this the occurrence we're looking for? |
| Note that there's only one generating occurrence per block |
| so we just need to check the block number. */ |
| if (BLOCK_NUM (occr->insn) == pred_bb->index) |
| return 1; |
| |
| visited[pred_bb->index] = 1; |
| } |
| |
| /* Neither gen nor kill. */ |
| else |
| { |
| visited[pred_bb->index] = 1; |
| if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop, |
| visited)) |
| |
| return 1; |
| } |
| } |
| |
| /* All paths have been checked. */ |
| return 0; |
| } |
| |
| /* This wrapper for expr_reaches_here_p_work() is to ensure that any |
| memory allocated for that function is returned. */ |
| |
| static int |
| expr_reaches_here_p (struct occr *occr, struct expr *expr, basic_block bb, |
| int check_self_loop) |
| { |
| int rval; |
| char *visited = xcalloc (last_basic_block, 1); |
| |
| rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited); |
| |
| free (visited); |
| return rval; |
| } |
| |
| /* Return the instruction that computes EXPR that reaches INSN's basic block. |
| If there is more than one such instruction, return NULL. |
| |
| Called only by handle_avail_expr. */ |
| |
| static rtx |
| computing_insn (struct expr *expr, rtx insn) |
| { |
| basic_block bb = BLOCK_FOR_INSN (insn); |
| |
| if (expr->avail_occr->next == NULL) |
| { |
| if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb) |
| /* The available expression is actually itself |
| (i.e. a loop in the flow graph) so do nothing. */ |
| return NULL; |
| |
| /* (FIXME) Case that we found a pattern that was created by |
| a substitution that took place. */ |
| return expr->avail_occr->insn; |
| } |
| else |
| { |
| /* Pattern is computed more than once. |
| Search backwards from this insn to see how many of these |
| computations actually reach this insn. */ |
| struct occr *occr; |
| rtx insn_computes_expr = NULL; |
| int can_reach = 0; |
| |
| for (occr = expr->avail_occr; occr != NULL; occr = occr->next) |
| { |
| if (BLOCK_FOR_INSN (occr->insn) == bb) |
| { |
| /* The expression is generated in this block. |
| The only time we care about this is when the expression |
| is generated later in the block [and thus there's a loop]. |
| We let the normal cse pass handle the other cases. */ |
| if (INSN_CUID (insn) < INSN_CUID (occr->insn) |
| && expr_reaches_here_p (occ
|