| /* Variable tracking routines for the GNU compiler. |
| Copyright (C) 2002-2021 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it |
| under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
| License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* This file contains the variable tracking pass. It computes where |
| variables are located (which registers or where in memory) at each position |
| in instruction stream and emits notes describing the locations. |
| Debug information (DWARF2 location lists) is finally generated from |
| these notes. |
| With this debug information, it is possible to show variables |
| even when debugging optimized code. |
| |
| How does the variable tracking pass work? |
| |
| First, it scans RTL code for uses, stores and clobbers (register/memory |
| references in instructions), for call insns and for stack adjustments |
| separately for each basic block and saves them to an array of micro |
| operations. |
| The micro operations of one instruction are ordered so that |
| pre-modifying stack adjustment < use < use with no var < call insn < |
| < clobber < set < post-modifying stack adjustment |
| |
| Then, a forward dataflow analysis is performed to find out how locations |
| of variables change through code and to propagate the variable locations |
| along control flow graph. |
| The IN set for basic block BB is computed as a union of OUT sets of BB's |
| predecessors, the OUT set for BB is copied from the IN set for BB and |
| is changed according to micro operations in BB. |
| |
| The IN and OUT sets for basic blocks consist of a current stack adjustment |
| (used for adjusting offset of variables addressed using stack pointer), |
| the table of structures describing the locations of parts of a variable |
| and for each physical register a linked list for each physical register. |
| The linked list is a list of variable parts stored in the register, |
| i.e. it is a list of triplets (reg, decl, offset) where decl is |
| REG_EXPR (reg) and offset is REG_OFFSET (reg). The linked list is used for |
| effective deleting appropriate variable parts when we set or clobber the |
| register. |
| |
| There may be more than one variable part in a register. The linked lists |
| should be pretty short so it is a good data structure here. |
| For example in the following code, register allocator may assign same |
| register to variables A and B, and both of them are stored in the same |
| register in CODE: |
| |
| if (cond) |
| set A; |
| else |
| set B; |
| CODE; |
| if (cond) |
| use A; |
| else |
| use B; |
| |
| Finally, the NOTE_INSN_VAR_LOCATION notes describing the variable locations |
| are emitted to appropriate positions in RTL code. Each such a note describes |
| the location of one variable at the point in instruction stream where the |
| note is. There is no need to emit a note for each variable before each |
| instruction, we only emit these notes where the location of variable changes |
| (this means that we also emit notes for changes between the OUT set of the |
| previous block and the IN set of the current block). |
| |
| The notes consist of two parts: |
| 1. the declaration (from REG_EXPR or MEM_EXPR) |
| 2. the location of a variable - it is either a simple register/memory |
| reference (for simple variables, for example int), |
| or a parallel of register/memory references (for a large variables |
| which consist of several parts, for example long long). |
| |
| */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "cfghooks.h" |
| #include "alloc-pool.h" |
| #include "tree-pass.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "insn-config.h" |
| #include "regs.h" |
| #include "emit-rtl.h" |
| #include "recog.h" |
| #include "diagnostic.h" |
| #include "varasm.h" |
| #include "stor-layout.h" |
| #include "cfgrtl.h" |
| #include "cfganal.h" |
| #include "reload.h" |
| #include "calls.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "cselib.h" |
| #include "tree-pretty-print.h" |
| #include "rtl-iter.h" |
| #include "fibonacci_heap.h" |
| #include "print-rtl.h" |
| #include "function-abi.h" |
| |
| typedef fibonacci_heap <long, basic_block_def> bb_heap_t; |
| |
| /* var-tracking.c assumes that tree code with the same value as VALUE rtx code |
| has no chance to appear in REG_EXPR/MEM_EXPRs and isn't a decl. |
| Currently the value is the same as IDENTIFIER_NODE, which has such |
| a property. If this compile time assertion ever fails, make sure that |
| the new tree code that equals (int) VALUE has the same property. */ |
| extern char check_value_val[(int) VALUE == (int) IDENTIFIER_NODE ? 1 : -1]; |
| |
| /* Type of micro operation. */ |
| enum micro_operation_type |
| { |
| MO_USE, /* Use location (REG or MEM). */ |
| MO_USE_NO_VAR,/* Use location which is not associated with a variable |
| or the variable is not trackable. */ |
| MO_VAL_USE, /* Use location which is associated with a value. */ |
| MO_VAL_LOC, /* Use location which appears in a debug insn. */ |
| MO_VAL_SET, /* Set location associated with a value. */ |
| MO_SET, /* Set location. */ |
| MO_COPY, /* Copy the same portion of a variable from one |
| location to another. */ |
| MO_CLOBBER, /* Clobber location. */ |
| MO_CALL, /* Call insn. */ |
| MO_ADJUST /* Adjust stack pointer. */ |
| |
| }; |
| |
| static const char * const ATTRIBUTE_UNUSED |
| micro_operation_type_name[] = { |
| "MO_USE", |
| "MO_USE_NO_VAR", |
| "MO_VAL_USE", |
| "MO_VAL_LOC", |
| "MO_VAL_SET", |
| "MO_SET", |
| "MO_COPY", |
| "MO_CLOBBER", |
| "MO_CALL", |
| "MO_ADJUST" |
| }; |
| |
| /* Where shall the note be emitted? BEFORE or AFTER the instruction. |
| Notes emitted as AFTER_CALL are to take effect during the call, |
| rather than after the call. */ |
| enum emit_note_where |
| { |
| EMIT_NOTE_BEFORE_INSN, |
| EMIT_NOTE_AFTER_INSN, |
| EMIT_NOTE_AFTER_CALL_INSN |
| }; |
| |
| /* Structure holding information about micro operation. */ |
| struct micro_operation |
| { |
| /* Type of micro operation. */ |
| enum micro_operation_type type; |
| |
| /* The instruction which the micro operation is in, for MO_USE, |
| MO_USE_NO_VAR, MO_CALL and MO_ADJUST, or the subsequent |
| instruction or note in the original flow (before any var-tracking |
| notes are inserted, to simplify emission of notes), for MO_SET |
| and MO_CLOBBER. */ |
| rtx_insn *insn; |
| |
| union { |
| /* Location. For MO_SET and MO_COPY, this is the SET that |
| performs the assignment, if known, otherwise it is the target |
| of the assignment. For MO_VAL_USE and MO_VAL_SET, it is a |
| CONCAT of the VALUE and the LOC associated with it. For |
| MO_VAL_LOC, it is a CONCAT of the VALUE and the VAR_LOCATION |
| associated with it. */ |
| rtx loc; |
| |
| /* Stack adjustment. */ |
| HOST_WIDE_INT adjust; |
| } u; |
| }; |
| |
| |
| /* A declaration of a variable, or an RTL value being handled like a |
| declaration. */ |
| typedef void *decl_or_value; |
| |
| /* Return true if a decl_or_value DV is a DECL or NULL. */ |
| static inline bool |
| dv_is_decl_p (decl_or_value dv) |
| { |
| return !dv || (int) TREE_CODE ((tree) dv) != (int) VALUE; |
| } |
| |
| /* Return true if a decl_or_value is a VALUE rtl. */ |
| static inline bool |
| dv_is_value_p (decl_or_value dv) |
| { |
| return dv && !dv_is_decl_p (dv); |
| } |
| |
| /* Return the decl in the decl_or_value. */ |
| static inline tree |
| dv_as_decl (decl_or_value dv) |
| { |
| gcc_checking_assert (dv_is_decl_p (dv)); |
| return (tree) dv; |
| } |
| |
| /* Return the value in the decl_or_value. */ |
| static inline rtx |
| dv_as_value (decl_or_value dv) |
| { |
| gcc_checking_assert (dv_is_value_p (dv)); |
| return (rtx)dv; |
| } |
| |
| /* Return the opaque pointer in the decl_or_value. */ |
| static inline void * |
| dv_as_opaque (decl_or_value dv) |
| { |
| return dv; |
| } |
| |
| |
| /* Description of location of a part of a variable. The content of a physical |
| register is described by a chain of these structures. |
| The chains are pretty short (usually 1 or 2 elements) and thus |
| chain is the best data structure. */ |
| struct attrs |
| { |
| /* Pointer to next member of the list. */ |
| attrs *next; |
| |
| /* The rtx of register. */ |
| rtx loc; |
| |
| /* The declaration corresponding to LOC. */ |
| decl_or_value dv; |
| |
| /* Offset from start of DECL. */ |
| HOST_WIDE_INT offset; |
| }; |
| |
| /* Structure for chaining the locations. */ |
| struct location_chain |
| { |
| /* Next element in the chain. */ |
| location_chain *next; |
| |
| /* The location (REG, MEM or VALUE). */ |
| rtx loc; |
| |
| /* The "value" stored in this location. */ |
| rtx set_src; |
| |
| /* Initialized? */ |
| enum var_init_status init; |
| }; |
| |
| /* A vector of loc_exp_dep holds the active dependencies of a one-part |
| DV on VALUEs, i.e., the VALUEs expanded so as to form the current |
| location of DV. Each entry is also part of VALUE' s linked-list of |
| backlinks back to DV. */ |
| struct loc_exp_dep |
| { |
| /* The dependent DV. */ |
| decl_or_value dv; |
| /* The dependency VALUE or DECL_DEBUG. */ |
| rtx value; |
| /* The next entry in VALUE's backlinks list. */ |
| struct loc_exp_dep *next; |
| /* A pointer to the pointer to this entry (head or prev's next) in |
| the doubly-linked list. */ |
| struct loc_exp_dep **pprev; |
| }; |
| |
| |
| /* This data structure holds information about the depth of a variable |
| expansion. */ |
| struct expand_depth |
| { |
| /* This measures the complexity of the expanded expression. It |
| grows by one for each level of expansion that adds more than one |
| operand. */ |
| int complexity; |
| /* This counts the number of ENTRY_VALUE expressions in an |
| expansion. We want to minimize their use. */ |
| int entryvals; |
| }; |
| |
| /* Type for dependencies actively used when expand FROM into cur_loc. */ |
| typedef vec<loc_exp_dep, va_heap, vl_embed> deps_vec; |
| |
| /* This data structure is allocated for one-part variables at the time |
| of emitting notes. */ |
| struct onepart_aux |
| { |
| /* Doubly-linked list of dependent DVs. These are DVs whose cur_loc |
| computation used the expansion of this variable, and that ought |
| to be notified should this variable change. If the DV's cur_loc |
| expanded to NULL, all components of the loc list are regarded as |
| active, so that any changes in them give us a chance to get a |
| location. Otherwise, only components of the loc that expanded to |
| non-NULL are regarded as active dependencies. */ |
| loc_exp_dep *backlinks; |
| /* This holds the LOC that was expanded into cur_loc. We need only |
| mark a one-part variable as changed if the FROM loc is removed, |
| or if it has no known location and a loc is added, or if it gets |
| a change notification from any of its active dependencies. */ |
| rtx from; |
| /* The depth of the cur_loc expression. */ |
| expand_depth depth; |
| /* Dependencies actively used when expand FROM into cur_loc. */ |
| deps_vec deps; |
| }; |
| |
| /* Structure describing one part of variable. */ |
| struct variable_part |
| { |
| /* Chain of locations of the part. */ |
| location_chain *loc_chain; |
| |
| /* Location which was last emitted to location list. */ |
| rtx cur_loc; |
| |
| union variable_aux |
| { |
| /* The offset in the variable, if !var->onepart. */ |
| HOST_WIDE_INT offset; |
| |
| /* Pointer to auxiliary data, if var->onepart and emit_notes. */ |
| struct onepart_aux *onepaux; |
| } aux; |
| }; |
| |
| /* Maximum number of location parts. */ |
| #define MAX_VAR_PARTS 16 |
| |
| /* Enumeration type used to discriminate various types of one-part |
| variables. */ |
| enum onepart_enum |
| { |
| /* Not a one-part variable. */ |
| NOT_ONEPART = 0, |
| /* A one-part DECL that is not a DEBUG_EXPR_DECL. */ |
| ONEPART_VDECL = 1, |
| /* A DEBUG_EXPR_DECL. */ |
| ONEPART_DEXPR = 2, |
| /* A VALUE. */ |
| ONEPART_VALUE = 3 |
| }; |
| |
| /* Structure describing where the variable is located. */ |
| struct variable |
| { |
| /* The declaration of the variable, or an RTL value being handled |
| like a declaration. */ |
| decl_or_value dv; |
| |
| /* Reference count. */ |
| int refcount; |
| |
| /* Number of variable parts. */ |
| char n_var_parts; |
| |
| /* What type of DV this is, according to enum onepart_enum. */ |
| ENUM_BITFIELD (onepart_enum) onepart : CHAR_BIT; |
| |
| /* True if this variable_def struct is currently in the |
| changed_variables hash table. */ |
| bool in_changed_variables; |
| |
| /* The variable parts. */ |
| variable_part var_part[1]; |
| }; |
| |
| /* Pointer to the BB's information specific to variable tracking pass. */ |
| #define VTI(BB) ((variable_tracking_info *) (BB)->aux) |
| |
| /* Return MEM_OFFSET (MEM) as a HOST_WIDE_INT, or 0 if we can't. */ |
| |
| static inline HOST_WIDE_INT |
| int_mem_offset (const_rtx mem) |
| { |
| HOST_WIDE_INT offset; |
| if (MEM_OFFSET_KNOWN_P (mem) && MEM_OFFSET (mem).is_constant (&offset)) |
| return offset; |
| return 0; |
| } |
| |
| #if CHECKING_P && (GCC_VERSION >= 2007) |
| |
| /* Access VAR's Ith part's offset, checking that it's not a one-part |
| variable. */ |
| #define VAR_PART_OFFSET(var, i) __extension__ \ |
| (*({ variable *const __v = (var); \ |
| gcc_checking_assert (!__v->onepart); \ |
| &__v->var_part[(i)].aux.offset; })) |
| |
| /* Access VAR's one-part auxiliary data, checking that it is a |
| one-part variable. */ |
| #define VAR_LOC_1PAUX(var) __extension__ \ |
| (*({ variable *const __v = (var); \ |
| gcc_checking_assert (__v->onepart); \ |
| &__v->var_part[0].aux.onepaux; })) |
| |
| #else |
| #define VAR_PART_OFFSET(var, i) ((var)->var_part[(i)].aux.offset) |
| #define VAR_LOC_1PAUX(var) ((var)->var_part[0].aux.onepaux) |
| #endif |
| |
| /* These are accessor macros for the one-part auxiliary data. When |
| convenient for users, they're guarded by tests that the data was |
| allocated. */ |
| #define VAR_LOC_DEP_LST(var) (VAR_LOC_1PAUX (var) \ |
| ? VAR_LOC_1PAUX (var)->backlinks \ |
| : NULL) |
| #define VAR_LOC_DEP_LSTP(var) (VAR_LOC_1PAUX (var) \ |
| ? &VAR_LOC_1PAUX (var)->backlinks \ |
| : NULL) |
| #define VAR_LOC_FROM(var) (VAR_LOC_1PAUX (var)->from) |
| #define VAR_LOC_DEPTH(var) (VAR_LOC_1PAUX (var)->depth) |
| #define VAR_LOC_DEP_VEC(var) var_loc_dep_vec (var) |
| |
| /* Implements the VAR_LOC_DEP_VEC above as a function to work around |
| a bogus -Wnonnull (PR c/95554). */ |
| |
| static inline deps_vec* |
| var_loc_dep_vec (variable *var) |
| { |
| return VAR_LOC_1PAUX (var) ? &VAR_LOC_1PAUX (var)->deps : NULL; |
| } |
| |
| |
| typedef unsigned int dvuid; |
| |
| /* Return the uid of DV. */ |
| |
| static inline dvuid |
| dv_uid (decl_or_value dv) |
| { |
| if (dv_is_value_p (dv)) |
| return CSELIB_VAL_PTR (dv_as_value (dv))->uid; |
| else |
| return DECL_UID (dv_as_decl (dv)); |
| } |
| |
| /* Compute the hash from the uid. */ |
| |
| static inline hashval_t |
| dv_uid2hash (dvuid uid) |
| { |
| return uid; |
| } |
| |
| /* The hash function for a mask table in a shared_htab chain. */ |
| |
| static inline hashval_t |
| dv_htab_hash (decl_or_value dv) |
| { |
| return dv_uid2hash (dv_uid (dv)); |
| } |
| |
| static void variable_htab_free (void *); |
| |
| /* Variable hashtable helpers. */ |
| |
| struct variable_hasher : pointer_hash <variable> |
| { |
| typedef void *compare_type; |
| static inline hashval_t hash (const variable *); |
| static inline bool equal (const variable *, const void *); |
| static inline void remove (variable *); |
| }; |
| |
| /* The hash function for variable_htab, computes the hash value |
| from the declaration of variable X. */ |
| |
| inline hashval_t |
| variable_hasher::hash (const variable *v) |
| { |
| return dv_htab_hash (v->dv); |
| } |
| |
| /* Compare the declaration of variable X with declaration Y. */ |
| |
| inline bool |
| variable_hasher::equal (const variable *v, const void *y) |
| { |
| decl_or_value dv = CONST_CAST2 (decl_or_value, const void *, y); |
| |
| return (dv_as_opaque (v->dv) == dv_as_opaque (dv)); |
| } |
| |
| /* Free the element of VARIABLE_HTAB (its type is struct variable_def). */ |
| |
| inline void |
| variable_hasher::remove (variable *var) |
| { |
| variable_htab_free (var); |
| } |
| |
| typedef hash_table<variable_hasher> variable_table_type; |
| typedef variable_table_type::iterator variable_iterator_type; |
| |
| /* Structure for passing some other parameters to function |
| emit_note_insn_var_location. */ |
| struct emit_note_data |
| { |
| /* The instruction which the note will be emitted before/after. */ |
| rtx_insn *insn; |
| |
| /* Where the note will be emitted (before/after insn)? */ |
| enum emit_note_where where; |
| |
| /* The variables and values active at this point. */ |
| variable_table_type *vars; |
| }; |
| |
| /* Structure holding a refcounted hash table. If refcount > 1, |
| it must be first unshared before modified. */ |
| struct shared_hash |
| { |
| /* Reference count. */ |
| int refcount; |
| |
| /* Actual hash table. */ |
| variable_table_type *htab; |
| }; |
| |
| /* Structure holding the IN or OUT set for a basic block. */ |
| struct dataflow_set |
| { |
| /* Adjustment of stack offset. */ |
| HOST_WIDE_INT stack_adjust; |
| |
| /* Attributes for registers (lists of attrs). */ |
| attrs *regs[FIRST_PSEUDO_REGISTER]; |
| |
| /* Variable locations. */ |
| shared_hash *vars; |
| |
| /* Vars that is being traversed. */ |
| shared_hash *traversed_vars; |
| }; |
| |
| /* The structure (one for each basic block) containing the information |
| needed for variable tracking. */ |
| struct variable_tracking_info |
| { |
| /* The vector of micro operations. */ |
| vec<micro_operation> mos; |
| |
| /* The IN and OUT set for dataflow analysis. */ |
| dataflow_set in; |
| dataflow_set out; |
| |
| /* The permanent-in dataflow set for this block. This is used to |
| hold values for which we had to compute entry values. ??? This |
| should probably be dynamically allocated, to avoid using more |
| memory in non-debug builds. */ |
| dataflow_set *permp; |
| |
| /* Has the block been visited in DFS? */ |
| bool visited; |
| |
| /* Has the block been flooded in VTA? */ |
| bool flooded; |
| |
| }; |
| |
| /* Alloc pool for struct attrs_def. */ |
| object_allocator<attrs> attrs_pool ("attrs pool"); |
| |
| /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */ |
| |
| static pool_allocator var_pool |
| ("variable_def pool", sizeof (variable) + |
| (MAX_VAR_PARTS - 1) * sizeof (((variable *)NULL)->var_part[0])); |
| |
| /* Alloc pool for struct variable_def with a single var_part entry. */ |
| static pool_allocator valvar_pool |
| ("small variable_def pool", sizeof (variable)); |
| |
| /* Alloc pool for struct location_chain. */ |
| static object_allocator<location_chain> location_chain_pool |
| ("location_chain pool"); |
| |
| /* Alloc pool for struct shared_hash. */ |
| static object_allocator<shared_hash> shared_hash_pool ("shared_hash pool"); |
| |
| /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */ |
| object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool"); |
| |
| /* Changed variables, notes will be emitted for them. */ |
| static variable_table_type *changed_variables; |
| |
| /* Shall notes be emitted? */ |
| static bool emit_notes; |
| |
| /* Values whose dynamic location lists have gone empty, but whose |
| cselib location lists are still usable. Use this to hold the |
| current location, the backlinks, etc, during emit_notes. */ |
| static variable_table_type *dropped_values; |
| |
| /* Empty shared hashtable. */ |
| static shared_hash *empty_shared_hash; |
| |
| /* Scratch register bitmap used by cselib_expand_value_rtx. */ |
| static bitmap scratch_regs = NULL; |
| |
| #ifdef HAVE_window_save |
| struct GTY(()) parm_reg { |
| rtx outgoing; |
| rtx incoming; |
| }; |
| |
| |
| /* Vector of windowed parameter registers, if any. */ |
| static vec<parm_reg, va_gc> *windowed_parm_regs = NULL; |
| #endif |
| |
| /* Variable used to tell whether cselib_process_insn called our hook. */ |
| static bool cselib_hook_called; |
| |
| /* Local function prototypes. */ |
| static void stack_adjust_offset_pre_post (rtx, HOST_WIDE_INT *, |
| HOST_WIDE_INT *); |
| static void insn_stack_adjust_offset_pre_post (rtx_insn *, HOST_WIDE_INT *, |
| HOST_WIDE_INT *); |
| static bool vt_stack_adjustments (void); |
| |
| static void init_attrs_list_set (attrs **); |
| static void attrs_list_clear (attrs **); |
| static attrs *attrs_list_member (attrs *, decl_or_value, HOST_WIDE_INT); |
| static void attrs_list_insert (attrs **, decl_or_value, HOST_WIDE_INT, rtx); |
| static void attrs_list_copy (attrs **, attrs *); |
| static void attrs_list_union (attrs **, attrs *); |
| |
| static variable **unshare_variable (dataflow_set *set, variable **slot, |
| variable *var, enum var_init_status); |
| static void vars_copy (variable_table_type *, variable_table_type *); |
| static tree var_debug_decl (tree); |
| static void var_reg_set (dataflow_set *, rtx, enum var_init_status, rtx); |
| static void var_reg_delete_and_set (dataflow_set *, rtx, bool, |
| enum var_init_status, rtx); |
| static void var_reg_delete (dataflow_set *, rtx, bool); |
| static void var_regno_delete (dataflow_set *, int); |
| static void var_mem_set (dataflow_set *, rtx, enum var_init_status, rtx); |
| static void var_mem_delete_and_set (dataflow_set *, rtx, bool, |
| enum var_init_status, rtx); |
| static void var_mem_delete (dataflow_set *, rtx, bool); |
| |
| static void dataflow_set_init (dataflow_set *); |
| static void dataflow_set_clear (dataflow_set *); |
| static void dataflow_set_copy (dataflow_set *, dataflow_set *); |
| static int variable_union_info_cmp_pos (const void *, const void *); |
| static void dataflow_set_union (dataflow_set *, dataflow_set *); |
| static location_chain *find_loc_in_1pdv (rtx, variable *, |
| variable_table_type *); |
| static bool canon_value_cmp (rtx, rtx); |
| static int loc_cmp (rtx, rtx); |
| static bool variable_part_different_p (variable_part *, variable_part *); |
| static bool onepart_variable_different_p (variable *, variable *); |
| static bool variable_different_p (variable *, variable *); |
| static bool dataflow_set_different (dataflow_set *, dataflow_set *); |
| static void dataflow_set_destroy (dataflow_set *); |
| |
| static bool track_expr_p (tree, bool); |
| static void add_uses_1 (rtx *, void *); |
| static void add_stores (rtx, const_rtx, void *); |
| static bool compute_bb_dataflow (basic_block); |
| static bool vt_find_locations (void); |
| |
| static void dump_attrs_list (attrs *); |
| static void dump_var (variable *); |
| static void dump_vars (variable_table_type *); |
| static void dump_dataflow_set (dataflow_set *); |
| static void dump_dataflow_sets (void); |
| |
| static void set_dv_changed (decl_or_value, bool); |
| static void variable_was_changed (variable *, dataflow_set *); |
| static variable **set_slot_part (dataflow_set *, rtx, variable **, |
| decl_or_value, HOST_WIDE_INT, |
| enum var_init_status, rtx); |
| static void set_variable_part (dataflow_set *, rtx, |
| decl_or_value, HOST_WIDE_INT, |
| enum var_init_status, rtx, enum insert_option); |
| static variable **clobber_slot_part (dataflow_set *, rtx, |
| variable **, HOST_WIDE_INT, rtx); |
| static void clobber_variable_part (dataflow_set *, rtx, |
| decl_or_value, HOST_WIDE_INT, rtx); |
| static variable **delete_slot_part (dataflow_set *, rtx, variable **, |
| HOST_WIDE_INT); |
| static void delete_variable_part (dataflow_set *, rtx, |
| decl_or_value, HOST_WIDE_INT); |
| static void emit_notes_in_bb (basic_block, dataflow_set *); |
| static void vt_emit_notes (void); |
| |
| static void vt_add_function_parameters (void); |
| static bool vt_initialize (void); |
| static void vt_finalize (void); |
| |
| /* Callback for stack_adjust_offset_pre_post, called via for_each_inc_dec. */ |
| |
| static int |
| stack_adjust_offset_pre_post_cb (rtx, rtx op, rtx dest, rtx src, rtx srcoff, |
| void *arg) |
| { |
| if (dest != stack_pointer_rtx) |
| return 0; |
| |
| switch (GET_CODE (op)) |
| { |
| case PRE_INC: |
| case PRE_DEC: |
| ((HOST_WIDE_INT *)arg)[0] -= INTVAL (srcoff); |
| return 0; |
| case POST_INC: |
| case POST_DEC: |
| ((HOST_WIDE_INT *)arg)[1] -= INTVAL (srcoff); |
| return 0; |
| case PRE_MODIFY: |
| case POST_MODIFY: |
| /* We handle only adjustments by constant amount. */ |
| gcc_assert (GET_CODE (src) == PLUS |
| && CONST_INT_P (XEXP (src, 1)) |
| && XEXP (src, 0) == stack_pointer_rtx); |
| ((HOST_WIDE_INT *)arg)[GET_CODE (op) == POST_MODIFY] |
| -= INTVAL (XEXP (src, 1)); |
| return 0; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Given a SET, calculate the amount of stack adjustment it contains |
| PRE- and POST-modifying stack pointer. |
| This function is similar to stack_adjust_offset. */ |
| |
| static void |
| stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre, |
| HOST_WIDE_INT *post) |
| { |
| rtx src = SET_SRC (pattern); |
| rtx dest = SET_DEST (pattern); |
| enum rtx_code code; |
| |
| if (dest == stack_pointer_rtx) |
| { |
| /* (set (reg sp) (plus (reg sp) (const_int))) */ |
| code = GET_CODE (src); |
| if (! (code == PLUS || code == MINUS) |
| || XEXP (src, 0) != stack_pointer_rtx |
| || !CONST_INT_P (XEXP (src, 1))) |
| return; |
| |
| if (code == MINUS) |
| *post += INTVAL (XEXP (src, 1)); |
| else |
| *post -= INTVAL (XEXP (src, 1)); |
| return; |
| } |
| HOST_WIDE_INT res[2] = { 0, 0 }; |
| for_each_inc_dec (pattern, stack_adjust_offset_pre_post_cb, res); |
| *pre += res[0]; |
| *post += res[1]; |
| } |
| |
| /* Given an INSN, calculate the amount of stack adjustment it contains |
| PRE- and POST-modifying stack pointer. */ |
| |
| static void |
| insn_stack_adjust_offset_pre_post (rtx_insn *insn, HOST_WIDE_INT *pre, |
| HOST_WIDE_INT *post) |
| { |
| rtx pattern; |
| |
| *pre = 0; |
| *post = 0; |
| |
| pattern = PATTERN (insn); |
| if (RTX_FRAME_RELATED_P (insn)) |
| { |
| rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX); |
| if (expr) |
| pattern = XEXP (expr, 0); |
| } |
| |
| if (GET_CODE (pattern) == SET) |
| stack_adjust_offset_pre_post (pattern, pre, post); |
| else if (GET_CODE (pattern) == PARALLEL |
| || GET_CODE (pattern) == SEQUENCE) |
| { |
| int i; |
| |
| /* There may be stack adjustments inside compound insns. Search |
| for them. */ |
| for ( i = XVECLEN (pattern, 0) - 1; i >= 0; i--) |
| if (GET_CODE (XVECEXP (pattern, 0, i)) == SET) |
| stack_adjust_offset_pre_post (XVECEXP (pattern, 0, i), pre, post); |
| } |
| } |
| |
| /* Compute stack adjustments for all blocks by traversing DFS tree. |
| Return true when the adjustments on all incoming edges are consistent. |
| Heavily borrowed from pre_and_rev_post_order_compute. */ |
| |
| static bool |
| vt_stack_adjustments (void) |
| { |
| edge_iterator *stack; |
| int sp; |
| |
| /* Initialize entry block. */ |
| VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->visited = true; |
| VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->in.stack_adjust |
| = INCOMING_FRAME_SP_OFFSET; |
| VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out.stack_adjust |
| = INCOMING_FRAME_SP_OFFSET; |
| |
| /* Allocate stack for back-tracking up CFG. */ |
| stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1); |
| sp = 0; |
| |
| /* Push the first edge on to the stack. */ |
| stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs); |
| |
| while (sp) |
| { |
| edge_iterator ei; |
| basic_block src; |
| basic_block dest; |
| |
| /* Look at the edge on the top of the stack. */ |
| ei = stack[sp - 1]; |
| src = ei_edge (ei)->src; |
| dest = ei_edge (ei)->dest; |
| |
| /* Check if the edge destination has been visited yet. */ |
| if (!VTI (dest)->visited) |
| { |
| rtx_insn *insn; |
| HOST_WIDE_INT pre, post, offset; |
| VTI (dest)->visited = true; |
| VTI (dest)->in.stack_adjust = offset = VTI (src)->out.stack_adjust; |
| |
| if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
| for (insn = BB_HEAD (dest); |
| insn != NEXT_INSN (BB_END (dest)); |
| insn = NEXT_INSN (insn)) |
| if (INSN_P (insn)) |
| { |
| insn_stack_adjust_offset_pre_post (insn, &pre, &post); |
| offset += pre + post; |
| } |
| |
| VTI (dest)->out.stack_adjust = offset; |
| |
| if (EDGE_COUNT (dest->succs) > 0) |
| /* Since the DEST node has been visited for the first |
| time, check its successors. */ |
| stack[sp++] = ei_start (dest->succs); |
| } |
| else |
| { |
| /* We can end up with different stack adjustments for the exit block |
| of a shrink-wrapped function if stack_adjust_offset_pre_post |
| doesn't understand the rtx pattern used to restore the stack |
| pointer in the epilogue. For example, on s390(x), the stack |
| pointer is often restored via a load-multiple instruction |
| and so no stack_adjust offset is recorded for it. This means |
| that the stack offset at the end of the epilogue block is the |
| same as the offset before the epilogue, whereas other paths |
| to the exit block will have the correct stack_adjust. |
| |
| It is safe to ignore these differences because (a) we never |
| use the stack_adjust for the exit block in this pass and |
| (b) dwarf2cfi checks whether the CFA notes in a shrink-wrapped |
| function are correct. |
| |
| We must check whether the adjustments on other edges are |
| the same though. */ |
| if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) |
| && VTI (dest)->in.stack_adjust != VTI (src)->out.stack_adjust) |
| { |
| free (stack); |
| return false; |
| } |
| |
| if (! ei_one_before_end_p (ei)) |
| /* Go to the next edge. */ |
| ei_next (&stack[sp - 1]); |
| else |
| /* Return to previous level if there are no more edges. */ |
| sp--; |
| } |
| } |
| |
| free (stack); |
| return true; |
| } |
| |
| /* arg_pointer_rtx resp. frame_pointer_rtx if stack_pointer_rtx or |
| hard_frame_pointer_rtx is being mapped to it and offset for it. */ |
| static rtx cfa_base_rtx; |
| static HOST_WIDE_INT cfa_base_offset; |
| |
| /* Compute a CFA-based value for an ADJUSTMENT made to stack_pointer_rtx |
| or hard_frame_pointer_rtx. */ |
| |
| static inline rtx |
| compute_cfa_pointer (poly_int64 adjustment) |
| { |
| return plus_constant (Pmode, cfa_base_rtx, adjustment + cfa_base_offset); |
| } |
| |
| /* Adjustment for hard_frame_pointer_rtx to cfa base reg, |
| or -1 if the replacement shouldn't be done. */ |
| static poly_int64 hard_frame_pointer_adjustment = -1; |
| |
| /* Data for adjust_mems callback. */ |
| |
| class adjust_mem_data |
| { |
| public: |
| bool store; |
| machine_mode mem_mode; |
| HOST_WIDE_INT stack_adjust; |
| auto_vec<rtx> side_effects; |
| }; |
| |
| /* Helper for adjust_mems. Return true if X is suitable for |
| transformation of wider mode arithmetics to narrower mode. */ |
| |
| static bool |
| use_narrower_mode_test (rtx x, const_rtx subreg) |
| { |
| subrtx_var_iterator::array_type array; |
| FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST) |
| { |
| rtx x = *iter; |
| if (CONSTANT_P (x)) |
| iter.skip_subrtxes (); |
| else |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| if (cselib_lookup (x, GET_MODE (SUBREG_REG (subreg)), 0, VOIDmode)) |
| return false; |
| if (!validate_subreg (GET_MODE (subreg), GET_MODE (x), x, |
| subreg_lowpart_offset (GET_MODE (subreg), |
| GET_MODE (x)))) |
| return false; |
| break; |
| case PLUS: |
| case MINUS: |
| case MULT: |
| break; |
| case ASHIFT: |
| if (GET_MODE (XEXP (x, 1)) != VOIDmode) |
| { |
| enum machine_mode mode = GET_MODE (subreg); |
| rtx op1 = XEXP (x, 1); |
| enum machine_mode op1_mode = GET_MODE (op1); |
| if (GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)) |
| < GET_MODE_PRECISION (as_a <scalar_int_mode> (op1_mode))) |
| { |
| poly_uint64 byte = subreg_lowpart_offset (mode, op1_mode); |
| if (GET_CODE (op1) == SUBREG || GET_CODE (op1) == CONCAT) |
| { |
| if (!simplify_subreg (mode, op1, op1_mode, byte)) |
| return false; |
| } |
| else if (!validate_subreg (mode, op1_mode, op1, byte)) |
| return false; |
| } |
| } |
| iter.substitute (XEXP (x, 0)); |
| break; |
| default: |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /* Transform X into narrower mode MODE from wider mode WMODE. */ |
| |
| static rtx |
| use_narrower_mode (rtx x, scalar_int_mode mode, scalar_int_mode wmode) |
| { |
| rtx op0, op1; |
| if (CONSTANT_P (x)) |
| return lowpart_subreg (mode, x, wmode); |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| return lowpart_subreg (mode, x, wmode); |
| case PLUS: |
| case MINUS: |
| case MULT: |
| op0 = use_narrower_mode (XEXP (x, 0), mode, wmode); |
| op1 = use_narrower_mode (XEXP (x, 1), mode, wmode); |
| return simplify_gen_binary (GET_CODE (x), mode, op0, op1); |
| case ASHIFT: |
| op0 = use_narrower_mode (XEXP (x, 0), mode, wmode); |
| op1 = XEXP (x, 1); |
| /* Ensure shift amount is not wider than mode. */ |
| if (GET_MODE (op1) == VOIDmode) |
| op1 = lowpart_subreg (mode, op1, wmode); |
| else if (GET_MODE_PRECISION (mode) |
| < GET_MODE_PRECISION (as_a <scalar_int_mode> (GET_MODE (op1)))) |
| op1 = lowpart_subreg (mode, op1, GET_MODE (op1)); |
| return simplify_gen_binary (ASHIFT, mode, op0, op1); |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Helper function for adjusting used MEMs. */ |
| |
| static rtx |
| adjust_mems (rtx loc, const_rtx old_rtx, void *data) |
| { |
| class adjust_mem_data *amd = (class adjust_mem_data *) data; |
| rtx mem, addr = loc, tem; |
| machine_mode mem_mode_save; |
| bool store_save; |
| scalar_int_mode tem_mode, tem_subreg_mode; |
| poly_int64 size; |
| switch (GET_CODE (loc)) |
| { |
| case REG: |
| /* Don't do any sp or fp replacements outside of MEM addresses |
| on the LHS. */ |
| if (amd->mem_mode == VOIDmode && amd->store) |
| return loc; |
| if (loc == stack_pointer_rtx |
| && !frame_pointer_needed |
| && cfa_base_rtx) |
| return compute_cfa_pointer (amd->stack_adjust); |
| else if (loc == hard_frame_pointer_rtx |
| && frame_pointer_needed |
| && maybe_ne (hard_frame_pointer_adjustment, -1) |
| && cfa_base_rtx) |
| return compute_cfa_pointer (hard_frame_pointer_adjustment); |
| gcc_checking_assert (loc != virtual_incoming_args_rtx); |
| return loc; |
| case MEM: |
| mem = loc; |
| if (!amd->store) |
| { |
| mem = targetm.delegitimize_address (mem); |
| if (mem != loc && !MEM_P (mem)) |
| return simplify_replace_fn_rtx (mem, old_rtx, adjust_mems, data); |
| } |
| |
| addr = XEXP (mem, 0); |
| mem_mode_save = amd->mem_mode; |
| amd->mem_mode = GET_MODE (mem); |
| store_save = amd->store; |
| amd->store = false; |
| addr = simplify_replace_fn_rtx (addr, old_rtx, adjust_mems, data); |
| amd->store = store_save; |
| amd->mem_mode = mem_mode_save; |
| if (mem == loc) |
| addr = targetm.delegitimize_address (addr); |
| if (addr != XEXP (mem, 0)) |
| mem = replace_equiv_address_nv (mem, addr); |
| if (!amd->store) |
| mem = avoid_constant_pool_reference (mem); |
| return mem; |
| case PRE_INC: |
| case PRE_DEC: |
| size = GET_MODE_SIZE (amd->mem_mode); |
| addr = plus_constant (GET_MODE (loc), XEXP (loc, 0), |
| GET_CODE (loc) == PRE_INC ? size : -size); |
| /* FALLTHRU */ |
| case POST_INC: |
| case POST_DEC: |
| if (addr == loc) |
| addr = XEXP (loc, 0); |
| gcc_assert (amd->mem_mode != VOIDmode && amd->mem_mode != BLKmode); |
| addr = simplify_replace_fn_rtx (addr, old_rtx, adjust_mems, data); |
| size = GET_MODE_SIZE (amd->mem_mode); |
| tem = plus_constant (GET_MODE (loc), XEXP (loc, 0), |
| (GET_CODE (loc) == PRE_INC |
| || GET_CODE (loc) == POST_INC) ? size : -size); |
| store_save = amd->store; |
| amd->store = false; |
| tem = simplify_replace_fn_rtx (tem, old_rtx, adjust_mems, data); |
| amd->store = store_save; |
| amd->side_effects.safe_push (gen_rtx_SET (XEXP (loc, 0), tem)); |
| return addr; |
| case PRE_MODIFY: |
| addr = XEXP (loc, 1); |
| /* FALLTHRU */ |
| case POST_MODIFY: |
| if (addr == loc) |
| addr = XEXP (loc, 0); |
| gcc_assert (amd->mem_mode != VOIDmode); |
| addr = simplify_replace_fn_rtx (addr, old_rtx, adjust_mems, data); |
| store_save = amd->store; |
| amd->store = false; |
| tem = simplify_replace_fn_rtx (XEXP (loc, 1), old_rtx, |
| adjust_mems, data); |
| amd->store = store_save; |
| amd->side_effects.safe_push (gen_rtx_SET (XEXP (loc, 0), tem)); |
| return addr; |
| case SUBREG: |
| /* First try without delegitimization of whole MEMs and |
| avoid_constant_pool_reference, which is more likely to succeed. */ |
| store_save = amd->store; |
| amd->store = true; |
| addr = simplify_replace_fn_rtx (SUBREG_REG (loc), old_rtx, adjust_mems, |
| data); |
| amd->store = store_save; |
| mem = simplify_replace_fn_rtx (addr, old_rtx, adjust_mems, data); |
| if (mem == SUBREG_REG (loc)) |
| { |
| tem = loc; |
| goto finish_subreg; |
| } |
| tem = simplify_gen_subreg (GET_MODE (loc), mem, |
| GET_MODE (SUBREG_REG (loc)), |
| SUBREG_BYTE (loc)); |
| if (tem) |
| goto finish_subreg; |
| tem = simplify_gen_subreg (GET_MODE (loc), addr, |
| GET_MODE (SUBREG_REG (loc)), |
| SUBREG_BYTE (loc)); |
| if (tem == NULL_RTX) |
| tem = gen_rtx_raw_SUBREG (GET_MODE (loc), addr, SUBREG_BYTE (loc)); |
| finish_subreg: |
| if (MAY_HAVE_DEBUG_BIND_INSNS |
| && GET_CODE (tem) == SUBREG |
| && (GET_CODE (SUBREG_REG (tem)) == PLUS |
| || GET_CODE (SUBREG_REG (tem)) == MINUS |
| || GET_CODE (SUBREG_REG (tem)) == MULT |
| || GET_CODE (SUBREG_REG (tem)) == ASHIFT) |
| && is_a <scalar_int_mode> (GET_MODE (tem), &tem_mode) |
| && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (tem)), |
| &tem_subreg_mode) |
| && (GET_MODE_PRECISION (tem_mode) |
| < GET_MODE_PRECISION (tem_subreg_mode)) |
| && subreg_lowpart_p (tem) |
| && use_narrower_mode_test (SUBREG_REG (tem), tem)) |
| return use_narrower_mode (SUBREG_REG (tem), tem_mode, tem_subreg_mode); |
| return tem; |
| case ASM_OPERANDS: |
| /* Don't do any replacements in second and following |
| ASM_OPERANDS of inline-asm with multiple sets. |
| ASM_OPERANDS_INPUT_VEC, ASM_OPERANDS_INPUT_CONSTRAINT_VEC |
| and ASM_OPERANDS_LABEL_VEC need to be equal between |
| all the ASM_OPERANDs in the insn and adjust_insn will |
| fix this up. */ |
| if (ASM_OPERANDS_OUTPUT_IDX (loc) != 0) |
| return loc; |
| break; |
| default: |
| break; |
| } |
| return NULL_RTX; |
| } |
| |
| /* Helper function for replacement of uses. */ |
| |
| static void |
| adjust_mem_uses (rtx *x, void *data) |
| { |
| rtx new_x = simplify_replace_fn_rtx (*x, NULL_RTX, adjust_mems, data); |
| if (new_x != *x) |
| validate_change (NULL_RTX, x, new_x, true); |
| } |
| |
| /* Helper function for replacement of stores. */ |
| |
| static void |
| adjust_mem_stores (rtx loc, const_rtx expr, void *data) |
| { |
| if (MEM_P (loc)) |
| { |
| rtx new_dest = simplify_replace_fn_rtx (SET_DEST (expr), NULL_RTX, |
| adjust_mems, data); |
| if (new_dest != SET_DEST (expr)) |
| { |
| rtx xexpr = CONST_CAST_RTX (expr); |
| validate_change (NULL_RTX, &SET_DEST (xexpr), new_dest, true); |
| } |
| } |
| } |
| |
| /* Simplify INSN. Remove all {PRE,POST}_{INC,DEC,MODIFY} rtxes, |
| replace them with their value in the insn and add the side-effects |
| as other sets to the insn. */ |
| |
| static void |
| adjust_insn (basic_block bb, rtx_insn *insn) |
| { |
| rtx set; |
| |
| #ifdef HAVE_window_save |
| /* If the target machine has an explicit window save instruction, the |
| transformation OUTGOING_REGNO -> INCOMING_REGNO is done there. */ |
| if (RTX_FRAME_RELATED_P (insn) |
| && find_reg_note (insn, REG_CFA_WINDOW_SAVE, NULL_RTX)) |
| { |
| unsigned int i, nregs = vec_safe_length (windowed_parm_regs); |
| rtx rtl = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs * 2)); |
| parm_reg *p; |
| |
| FOR_EACH_VEC_SAFE_ELT (windowed_parm_regs, i, p) |
| { |
| XVECEXP (rtl, 0, i * 2) |
| = gen_rtx_SET (p->incoming, p->outgoing); |
| /* Do not clobber the attached DECL, but only the REG. */ |
| XVECEXP (rtl, 0, i * 2 + 1) |
| = gen_rtx_CLOBBER (GET_MODE (p->outgoing), |
| gen_raw_REG (GET_MODE (p->outgoing), |
| REGNO (p->outgoing))); |
| } |
| |
| validate_change (NULL_RTX, &PATTERN (insn), rtl, true); |
| return; |
| } |
| #endif |
| |
| adjust_mem_data amd; |
| amd.mem_mode = VOIDmode; |
| amd.stack_adjust = -VTI (bb)->out.stack_adjust; |
| |
| amd.store = true; |
| note_stores (insn, adjust_mem_stores, &amd); |
| |
| amd.store = false; |
| if (GET_CODE (PATTERN (insn)) == PARALLEL |
| && asm_noperands (PATTERN (insn)) > 0 |
| && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) |
| { |
| rtx body, set0; |
| int i; |
| |
| /* inline-asm with multiple sets is tiny bit more complicated, |
| because the 3 vectors in ASM_OPERANDS need to be shared between |
| all ASM_OPERANDS in the instruction. adjust_mems will |
| not touch ASM_OPERANDS other than the first one, asm_noperands |
| test above needs to be called before that (otherwise it would fail) |
| and afterwards this code fixes it up. */ |
| note_uses (&PATTERN (insn), adjust_mem_uses, &amd); |
| body = PATTERN (insn); |
| set0 = XVECEXP (body, 0, 0); |
| gcc_checking_assert (GET_CODE (set0) == SET |
| && GET_CODE (SET_SRC (set0)) == ASM_OPERANDS |
| && ASM_OPERANDS_OUTPUT_IDX (SET_SRC (set0)) == 0); |
| for (i = 1; i < XVECLEN (body, 0); i++) |
| if (GET_CODE (XVECEXP (body, 0, i)) != SET) |
| break; |
| else |
| { |
| set = XVECEXP (body, 0, i); |
| gcc_checking_assert (GET_CODE (SET_SRC (set)) == ASM_OPERANDS |
| && ASM_OPERANDS_OUTPUT_IDX (SET_SRC (set)) |
| == i); |
| if (ASM_OPERANDS_INPUT_VEC (SET_SRC (set)) |
| != ASM_OPERANDS_INPUT_VEC (SET_SRC (set0)) |
| || ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set)) |
| != ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set0)) |
| || ASM_OPERANDS_LABEL_VEC (SET_SRC (set)) |
| != ASM_OPERANDS_LABEL_VEC (SET_SRC (set0))) |
| { |
| rtx newsrc = shallow_copy_rtx (SET_SRC (set)); |
| ASM_OPERANDS_INPUT_VEC (newsrc) |
| = ASM_OPERANDS_INPUT_VEC (SET_SRC (set0)); |
| ASM_OPERANDS_INPUT_CONSTRAINT_VEC (newsrc) |
| = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set0)); |
| ASM_OPERANDS_LABEL_VEC (newsrc) |
| = ASM_OPERANDS_LABEL_VEC (SET_SRC (set0)); |
| validate_change (NULL_RTX, &SET_SRC (set), newsrc, true); |
| } |
| } |
| } |
| else |
| note_uses (&PATTERN (insn), adjust_mem_uses, &amd); |
| |
| /* For read-only MEMs containing some constant, prefer those |
| constants. */ |
| set = single_set (insn); |
| if (set && MEM_P (SET_SRC (set)) && MEM_READONLY_P (SET_SRC (set))) |
| { |
| rtx note = find_reg_equal_equiv_note (insn); |
| |
| if (note && CONSTANT_P (XEXP (note, 0))) |
| validate_change (NULL_RTX, &SET_SRC (set), XEXP (note, 0), true); |
| } |
| |
| if (!amd.side_effects.is_empty ()) |
| { |
| rtx *pat, new_pat; |
| int i, oldn; |
| |
| pat = &PATTERN (insn); |
| if (GET_CODE (*pat) == COND_EXEC) |
| pat = &COND_EXEC_CODE (*pat); |
| if (GET_CODE (*pat) == PARALLEL) |
| oldn = XVECLEN (*pat, 0); |
| else |
| oldn = 1; |
| unsigned int newn = amd.side_effects.length (); |
| new_pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (oldn + newn)); |
| if (GET_CODE (*pat) == PARALLEL) |
| for (i = 0; i < oldn; i++) |
| XVECEXP (new_pat, 0, i) = XVECEXP (*pat, 0, i); |
| else |
| XVECEXP (new_pat, 0, 0) = *pat; |
| |
| rtx effect; |
| unsigned int j; |
| FOR_EACH_VEC_ELT_REVERSE (amd.side_effects, j, effect) |
| XVECEXP (new_pat, 0, j + oldn) = effect; |
| validate_change (NULL_RTX, pat, new_pat, true); |
| } |
| } |
| |
| /* Return the DEBUG_EXPR of a DEBUG_EXPR_DECL or the VALUE in DV. */ |
| static inline rtx |
| dv_as_rtx (decl_or_value dv) |
| { |
| tree decl; |
| |
| if (dv_is_value_p (dv)) |
| return dv_as_value (dv); |
| |
| decl = dv_as_decl (dv); |
| |
| gcc_checking_assert (TREE_CODE (decl) == DEBUG_EXPR_DECL); |
| return DECL_RTL_KNOWN_SET (decl); |
| } |
| |
| /* Return nonzero if a decl_or_value must not have more than one |
| variable part. The returned value discriminates among various |
| kinds of one-part DVs ccording to enum onepart_enum. */ |
| static inline onepart_enum |
| dv_onepart_p (decl_or_value dv) |
| { |
| tree decl; |
| |
| if (!MAY_HAVE_DEBUG_BIND_INSNS) |
| return NOT_ONEPART; |
| |
| if (dv_is_value_p (dv)) |
| return ONEPART_VALUE; |
| |
| decl = dv_as_decl (dv); |
| |
| if (TREE_CODE (decl) == DEBUG_EXPR_DECL) |
| return ONEPART_DEXPR; |
| |
| if (target_for_debug_bind (decl) != NULL_TREE) |
| return ONEPART_VDECL; |
| |
| return NOT_ONEPART; |
| } |
| |
| /* Return the variable pool to be used for a dv of type ONEPART. */ |
| static inline pool_allocator & |
| onepart_pool (onepart_enum onepart) |
| { |
| return onepart ? valvar_pool : var_pool; |
| } |
| |
| /* Allocate a variable_def from the corresponding variable pool. */ |
| static inline variable * |
| onepart_pool_allocate (onepart_enum onepart) |
| { |
| return (variable*) onepart_pool (onepart).allocate (); |
| } |
| |
| /* Build a decl_or_value out of a decl. */ |
| static inline decl_or_value |
| dv_from_decl (tree decl) |
| { |
| decl_or_value dv; |
| dv = decl; |
| gcc_checking_assert (dv_is_decl_p (dv)); |
| return dv; |
| } |
| |
| /* Build a decl_or_value out of a value. */ |
| static inline decl_or_value |
| dv_from_value (rtx value) |
| { |
| decl_or_value dv; |
| dv = value; |
| gcc_checking_assert (dv_is_value_p (dv)); |
| return dv; |
| } |
| |
| /* Return a value or the decl of a debug_expr as a decl_or_value. */ |
| static inline decl_or_value |
| dv_from_rtx (rtx x) |
| { |
| decl_or_value dv; |
| |
| switch (GET_CODE (x)) |
| { |
| case DEBUG_EXPR: |
| dv = dv_from_decl (DEBUG_EXPR_TREE_DECL (x)); |
| gcc_checking_assert (DECL_RTL_KNOWN_SET (DEBUG_EXPR_TREE_DECL (x)) == x); |
| break; |
| |
| case VALUE: |
| dv = dv_from_value (x); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| return dv; |
| } |
| |
| extern void debug_dv (decl_or_value dv); |
| |
| DEBUG_FUNCTION void |
| debug_dv (decl_or_value dv) |
| { |
| if (dv_is_value_p (dv)) |
| debug_rtx (dv_as_value (dv)); |
| else |
| debug_generic_stmt (dv_as_decl (dv)); |
| } |
| |
| static void loc_exp_dep_clear (variable *var); |
| |
| /* Free the element of VARIABLE_HTAB (its type is struct variable_def). */ |
| |
| static void |
| variable_htab_free (void *elem) |
| { |
| int i; |
| variable *var = (variable *) elem; |
| location_chain *node, *next; |
| |
| gcc_checking_assert (var->refcount > 0); |
| |
| var->refcount--; |
| if (var->refcount > 0) |
| return; |
| |
| for (i = 0; i < var->n_var_parts; i++) |
| { |
| for (node = var->var_part[i].loc_chain; node; node = next) |
| { |
| next = node->next; |
| delete node; |
| } |
| var->var_part[i].loc_chain = NULL; |
| } |
| if (var->onepart && VAR_LOC_1PAUX (var)) |
| { |
| loc_exp_dep_clear (var); |
| if (VAR_LOC_DEP_LST (var)) |
| VAR_LOC_DEP_LST (var)->pprev = NULL; |
| XDELETE (VAR_LOC_1PAUX (var)); |
| /* These may be reused across functions, so reset |
| e.g. NO_LOC_P. */ |
| if (var->onepart == ONEPART_DEXPR) |
| set_dv_changed (var->dv, true); |
| } |
| onepart_pool (var->onepart).remove (var); |
| } |
| |
| /* Initialize the set (array) SET of attrs to empty lists. */ |
| |
| static void |
| init_attrs_list_set (attrs **set) |
| { |
| int i; |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| set[i] = NULL; |
| } |
| |
| /* Make the list *LISTP empty. */ |
| |
| static void |
| attrs_list_clear (attrs **listp) |
| { |
| attrs *list, *next; |
| |
| for (list = *listp; list; list = next) |
| { |
| next = list->next; |
| delete list; |
| } |
| *listp = NULL; |
| } |
| |
| /* Return true if the pair of DECL and OFFSET is the member of the LIST. */ |
| |
| static attrs * |
| attrs_list_member (attrs *list, decl_or_value dv, HOST_WIDE_INT offset) |
| { |
| for (; list; list = list->next) |
| if (dv_as_opaque (list->dv) == dv_as_opaque (dv) && list->offset == offset) |
| return list; |
| return NULL; |
| } |
| |
| /* Insert the triplet DECL, OFFSET, LOC to the list *LISTP. */ |
| |
| static void |
| attrs_list_insert (attrs **listp, decl_or_value dv, |
| HOST_WIDE_INT offset, rtx loc) |
| { |
| attrs *list = new attrs; |
| list->loc = loc; |
| list->dv = dv; |
| list->offset = offset; |
| list->next = *listp; |
| *listp = list; |
| } |
| |
| /* Copy all nodes from SRC and create a list *DSTP of the copies. */ |
| |
| static void |
| attrs_list_copy (attrs **dstp, attrs *src) |
| { |
| attrs_list_clear (dstp); |
| for (; src; src = src->next) |
| { |
| attrs *n = new attrs; |
| n->loc = src->loc; |
| n->dv = src->dv; |
| n->offset = src->offset; |
| n->next = *dstp; |
| *dstp = n; |
| } |
| } |
| |
| /* Add all nodes from SRC which are not in *DSTP to *DSTP. */ |
| |
| static void |
| attrs_list_union (attrs **dstp, attrs *src) |
| { |
| for (; src; src = src->next) |
| { |
| if (!attrs_list_member (*dstp, src->dv, src->offset)) |
| attrs_list_insert (dstp, src->dv, src->offset, src->loc); |
| } |
| } |
| |
| /* Combine nodes that are not onepart nodes from SRC and SRC2 into |
| *DSTP. */ |
| |
| static void |
| attrs_list_mpdv_union (attrs **dstp, attrs *src, attrs *src2) |
| { |
| gcc_assert (!*dstp); |
| for (; src; src = src->next) |
| { |
| if (!dv_onepart_p (src->dv)) |
| attrs_list_insert (dstp, src->dv, src->offset, src->loc); |
| } |
| for (src = src2; src; src = src->next) |
| { |
| if (!dv_onepart_p (src->dv) |
| && !attrs_list_member (*dstp, src->dv, src->offset)) |
| attrs_list_insert (dstp, src->dv, src->offset, src->loc); |
| } |
| } |
| |
| /* Shared hashtable support. */ |
| |
| /* Return true if VARS is shared. */ |
| |
| static inline bool |
| shared_hash_shared (shared_hash *vars) |
| { |
| return vars->refcount > 1; |
| } |
| |
| /* Return the hash table for VARS. */ |
| |
| static inline variable_table_type * |
| shared_hash_htab (shared_hash *vars) |
| { |
| return vars->htab; |
| } |
| |
| /* Return true if VAR is shared, or maybe because VARS is shared. */ |
| |
| static inline bool |
| shared_var_p (variable *var, shared_hash *vars) |
| { |
| /* Don't count an entry in the changed_variables table as a duplicate. */ |
| return ((var->refcount > 1 + (int) var->in_changed_variables) |
| || shared_hash_shared (vars)); |
| } |
| |
| /* Copy variables into a new hash table. */ |
| |
| static shared_hash * |
| shared_hash_unshare (shared_hash *vars) |
| { |
| shared_hash *new_vars = new shared_hash; |
| gcc_assert (vars->refcount > 1); |
| new_vars->refcount = 1; |
| new_vars->htab = new variable_table_type (vars->htab->elements () + 3); |
| vars_copy (new_vars->htab, vars->htab); |
| vars->refcount--; |
| return new_vars; |
| } |
| |
| /* Increment reference counter on VARS and return it. */ |
| |
| static inline shared_hash * |
| shared_hash_copy (shared_hash *vars) |
| { |
| vars->refcount++; |
| return vars; |
| } |
| |
| /* Decrement reference counter and destroy hash table if not shared |
| anymore. */ |
| |
| static void |
| shared_hash_destroy (shared_hash *vars) |
| { |
| gcc_checking_assert (vars->refcount > 0); |
| if (--vars->refcount == 0) |
| { |
| delete vars->htab; |
| delete vars; |
| } |
| } |
| |
| /* Unshare *PVARS if shared and return slot for DV. If INS is |
| INSERT, insert it if not already present. */ |
| |
| static inline variable ** |
| shared_hash_find_slot_unshare_1 (shared_hash **pvars, decl_or_value dv, |
| hashval_t dvhash, enum insert_option ins) |
| { |
| if (shared_hash_shared (*pvars)) |
| *pvars = shared_hash_unshare (*pvars); |
| return shared_hash_htab (*pvars)->find_slot_with_hash (dv, dvhash, ins); |
| } |
| |
| static inline variable ** |
| shared_hash_find_slot_unshare (shared_hash **pvars, decl_or_value dv, |
| enum insert_option ins) |
| { |
| return shared_hash_find_slot_unshare_1 (pvars, dv, dv_htab_hash (dv), ins); |
| } |
| |
| /* Return slot for DV, if it is already present in the hash table. |
| If it is not present, insert it only VARS is not shared, otherwise |
| return NULL. */ |
| |
| static inline variable ** |
| shared_hash_find_slot_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash) |
| { |
| return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, |
| shared_hash_shared (vars) |
| ? NO_INSERT : INSERT); |
| } |
| |
| static inline variable ** |
| shared_hash_find_slot (shared_hash *vars, decl_or_value dv) |
| { |
| return shared_hash_find_slot_1 (vars, dv, dv_htab_hash (dv)); |
| } |
| |
| /* Return slot for DV only if it is already present in the hash table. */ |
| |
| static inline variable ** |
| shared_hash_find_slot_noinsert_1 (shared_hash *vars, decl_or_value dv, |
| hashval_t dvhash) |
| { |
| return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, NO_INSERT); |
| } |
| |
| static inline variable ** |
| shared_hash_find_slot_noinsert (shared_hash *vars, decl_or_value dv) |
| { |
| return shared_hash_find_slot_noinsert_1 (vars, dv, dv_htab_hash (dv)); |
| } |
| |
| /* Return variable for DV or NULL if not already present in the hash |
| table. */ |
| |
| static inline variable * |
| shared_hash_find_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash) |
| { |
| return shared_hash_htab (vars)->find_with_hash (dv, dvhash); |
| } |
| |
| static inline variable * |
| shared_hash_find (shared_hash *vars, decl_or_value dv) |
| { |
| return shared_hash_find_1 (vars, dv, dv_htab_hash (dv)); |
| } |
| |
| /* Return true if TVAL is better than CVAL as a canonival value. We |
| choose lowest-numbered VALUEs, using the RTX address as a |
| tie-breaker. The idea is to arrange them into a star topology, |
| such that all of them are at most one step away from the canonical |
| value, and the canonical value has backlinks to all of them, in |
| addition to all the actual locations. We don't enforce this |
| topology throughout the entire dataflow analysis, though. |
| */ |
| |
| static inline bool |
| canon_value_cmp (rtx tval, rtx cval) |
| { |
| return !cval |
| || CSELIB_VAL_PTR (tval)->uid < CSELIB_VAL_PTR (cval)->uid; |
| } |
| |
| static bool dst_can_be_shared; |
| |
| /* Return a copy of a variable VAR and insert it to dataflow set SET. */ |
| |
| static variable ** |
| unshare_variable (dataflow_set *set, variable **slot, variable *var, |
| enum var_init_status initialized) |
| { |
| variable *new_var; |
| int i; |
| |
| new_var = onepart_pool_allocate (var->onepart); |
| new_var->dv = var->dv; |
| new_var->refcount = 1; |
| var->refcount--; |
| new_var->n_var_parts = var->n_var_parts; |
| new_var->onepart = var->onepart; |
| new_var->in_changed_variables = false; |
| |
| if (! flag_var_tracking_uninit) |
| initialized = VAR_INIT_STATUS_INITIALIZED; |
| |
| for (i = 0; i < var->n_var_parts; i++) |
| { |
| location_chain *node; |
| location_chain **nextp; |
| |
| if (i == 0 && var->onepart) |
| { |
| /* One-part auxiliary data is only used while emitting |
| notes, so propagate it to the new variable in the active |
| dataflow set. If we're not emitting notes, this will be |
| a no-op. */ |
| gcc_checking_assert (!VAR_LOC_1PAUX (var) || emit_notes); |
| VAR_LOC_1PAUX (new_var) = VAR_LOC_1PAUX (var); |
| VAR_LOC_1PAUX (var) = NULL; |
| } |
| else |
| VAR_PART_OFFSET (new_var, i) = VAR_PART_OFFSET (var, i); |
| nextp = &new_var->var_part[i].loc_chain; |
| for (node = var->var_part[i].loc_chain; node; node = node->next) |
| { |
| location_chain *new_lc; |
| |
| new_lc = new location_chain; |
| new_lc->next = NULL; |
| if (node->init > initialized) |
| new_lc->init = node->init; |
| else |
| new_lc->init = initialized; |
| if (node->set_src && !(MEM_P (node->set_src))) |
| new_lc->set_src = node->set_src; |
| else |
| new_lc->set_src = NULL; |
| new_lc->loc = node->loc; |
| |
| *nextp = new_lc; |
| nextp = &new_lc->next; |
| } |
| |
| new_var->var_part[i].cur_loc = var->var_part[i].cur_loc; |
| } |
| |
| dst_can_be_shared = false; |
| if (shared_hash_shared (set->vars)) |
| slot = shared_hash_find_slot_unshare (&set->vars, var->dv, NO_INSERT); |
| else if (set->traversed_vars && set->vars != set->traversed_vars) |
| slot = shared_hash_find_slot_noinsert (set->vars, var->dv); |
| *slot = new_var; |
| if (var->in_changed_variables) |
| { |
| variable **cslot |
| = changed_variables->find_slot_with_hash (var->dv, |
| dv_htab_hash (var->dv), |
| NO_INSERT); |
| gcc_assert (*cslot == (void *) var); |
| var->in_changed_variables = false; |
| variable_htab_free (var); |
| *cslot = new_var; |
| new_var->in_changed_variables = true; |
| } |
| return slot; |
| } |
| |
| /* Copy all variables from hash table SRC to hash table DST. */ |
| |
| static void |
| vars_copy (variable_table_type *dst, variable_table_type *src) |
| { |
| variable_iterator_type hi; |
| variable *var; |
| |
| FOR_EACH_HASH_TABLE_ELEMENT (*src, var, variable, hi) |
| { |
| variable **dstp; |
| var->refcount++; |
| dstp = dst->find_slot_with_hash (var->dv, dv_htab_hash (var->dv), |
| INSERT); |
| *dstp = var; |
| } |
| } |
| |
| /* Map a decl to its main debug decl. */ |
| |
| static inline tree |
| var_debug_decl (tree decl) |
| { |
| if (decl && VAR_P (decl) && DECL_HAS_DEBUG_EXPR_P (decl)) |
| { |
| tree debugdecl = DECL_DEBUG_EXPR (decl); |
| if (DECL_P (debugdecl)) |
| decl = debugdecl; |
| } |
| |
| return decl; |
| } |
| |
| /* Set the register LOC to contain DV, OFFSET. */ |
| |
| static void |
| var_reg_decl_set (dataflow_set *set, rtx loc, enum var_init_status initialized, |
| decl_or_value dv, HOST_WIDE_INT offset, rtx set_src, |
| enum insert_option iopt) |
| { |
| attrs *node; |
| bool decl_p = dv_is_decl_p (dv); |
| |
| if (decl_p) |
| dv = dv_from_decl (var_debug_decl (dv_as_decl (dv))); |
| |
| for (node = set->regs[REGNO (loc)]; node; node = node->next) |
| if (dv_as_opaque (node->dv) == dv_as_opaque (dv) |
| && node->offset == offset) |
| break; |
| if (!node) |
| attrs_list_insert (&set->regs[REGNO (loc)], dv, offset, loc); |
| set_variable_part (set, loc, dv, offset, initialized, set_src, iopt); |
| } |
| |
| /* Return true if we should track a location that is OFFSET bytes from |
| a variable. Store the constant offset in *OFFSET_OUT if so. */ |
| |
| static bool |
| track_offset_p (poly_int64 offset, HOST_WIDE_INT *offset_out) |
| { |
| HOST_WIDE_INT const_offset; |
| if (!offset.is_constant (&const_offset) |
| || !IN_RANGE (const_offset, 0, MAX_VAR_PARTS - 1)) |
| return false; |
| *offset_out = const_offset; |
| return true; |
| } |
| |
| /* Return the offset of a register that track_offset_p says we |
| should track. */ |
| |
| static HOST_WIDE_INT |
| get_tracked_reg_offset (rtx loc) |
| { |
| HOST_WIDE_INT offset; |
| if (!track_offset_p (REG_OFFSET (loc), &offset)) |
| gcc_unreachable (); |
| return offset; |
| } |
| |
| /* Set the register to contain REG_EXPR (LOC), REG_OFFSET (LOC). */ |
| |
| static void |
| var_reg_set (dataflow_set *set, rtx loc, enum var_init_status initialized, |
| rtx set_src) |
| { |
| tree decl = REG_EXPR (loc); |
| HOST_WIDE_INT offset = get_tracked_reg_offset (loc); |
| |
| var_reg_decl_set (set, loc, initialized, |
| dv_from_decl (decl), offset, set_src, INSERT); |
| } |
| |
| static enum var_init_status |
| get_init_value (dataflow_set *set, rtx loc, decl_or_value dv) |
| { |
| variable *var; |
| int i; |
| enum var_init_status ret_val = VAR_INIT_STATUS_UNKNOWN; |
| |
| if (! flag_var_tracking_uninit) |
| return VAR_INIT_STATUS_INITIALIZED; |
| |
| var = shared_hash_find (set->vars, dv); |
| if (var) |
| { |
| for (i = 0; i < var->n_var_parts && ret_val == VAR_INIT_STATUS_UNKNOWN; i++) |
| { |
| location_chain *nextp; |
| for (nextp = var->var_part[i].loc_chain; nextp; nextp = nextp->next) |
| if (rtx_equal_p (nextp->loc, loc)) |
| { |
| ret_val = nextp->init; |
| break; |
| } |
| } |
| } |
| |
| return ret_val; |
| } |
| |
| /* Delete current content of register LOC in dataflow set SET and set |
| the register to contain REG_EXPR (LOC), REG_OFFSET (LOC). If |
| MODIFY is true, any other live copies of the same variable part are |
| also deleted from the dataflow set, otherwise the variable part is |
| assumed to be copied from another location holding the same |
| part. */ |
| |
| static void |
| var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify, |
| enum var_init_status initialized, rtx set_src) |
| { |
| tree decl = REG_EXPR (loc); |
| HOST_WIDE_INT offset = get_tracked_reg_offset (loc); |
| attrs *node, *next; |
| attrs **nextp; |
| |
| decl = var_debug_decl (decl); |
| |
| if (initialized == VAR_INIT_STATUS_UNKNOWN) |
| initialized = get_init_value (set, loc, dv_from_decl (decl)); |
| |
| nextp = &set->regs[REGNO (loc)]; |
| for (node = *nextp; node; node = next) |
| { |
| next = node->next; |
| if (dv_as_opaque (node->dv) != decl || node->offset != offset) |
| { |
| delete_variable_part (set, node->loc, node->dv, node->offset); |
| delete node; |
| *nextp = next; |
| } |
| else |
| { |
| node->loc = loc; |
| nextp = &node->next; |
| } |
| } |
| if (modify) |
| clobber_variable_part (set, loc, dv_from_decl (decl), offset, set_src); |
| var_reg_set (set, loc, initialized, set_src); |
| } |
| |
| /* Delete the association of register LOC in dataflow set SET with any |
| variables that aren't onepart. If CLOBBER is true, also delete any |
| other live copies of the same variable part, and delete the |
| association with onepart dvs too. */ |
| |
| static void |
| var_reg_delete (dataflow_set *set, rtx loc, bool clobber) |
| { |
| attrs **nextp = &set->regs[REGNO (loc)]; |
| attrs *node, *next; |
| |
| HOST_WIDE_INT offset; |
| if (clobber && track_offset_p (REG_OFFSET (loc), &offset)) |
| { |
| tree decl = REG_EXPR (loc); |
| |
| decl = var_debug_decl (decl); |
| |
| clobber_variable_part (set, NULL, dv_from_decl (decl), offset, NULL); |
| } |
| |
| for (node = *nextp; node; node = next) |
| { |
| next = node->next; |
| if (clobber || !dv_onepart_p (node->dv)) |
| { |
| delete_variable_part (set, node->loc, node->dv, node->offset); |
| delete node; |
| *nextp = next; |
| } |
| else |
| nextp = &node->next; |
| } |
| } |
| |
| /* Delete content of register with number REGNO in dataflow set SET. */ |
| |
| static void |
| var_regno_delete (dataflow_set *set, int regno) |
| { |
| attrs **reg = &set->regs[regno]; |
| attrs *node, *next; |
| |
| for (node = *reg; node; node = next) |
| { |
| next = node->next; |
| delete_variable_part (set, node->loc, node->dv, node->offset); |
| delete node; |
| } |
| *reg = NULL; |
| } |
| |
| /* Return true if I is the negated value of a power of two. */ |
| static bool |
| negative_power_of_two_p (HOST_WIDE_INT i) |
| { |
| unsigned HOST_WIDE_INT x = -(unsigned HOST_WIDE_INT)i; |
| return pow2_or_zerop (x); |
| } |
| |
| /* Strip constant offsets and alignments off of LOC. Return the base |
| expression. */ |
| |
| static rtx |
| vt_get_canonicalize_base (rtx loc) |
| { |
| while ((GET_CODE (loc) == PLUS |
| || GET_CODE (loc) == AND) |
| && GET_CODE (XEXP (loc, 1)) == CONST_INT |
| && (GET_CODE (loc) != AND |
| || negative_power_of_two_p (INTVAL (XEXP (loc, 1))))) |
| loc = XEXP (loc, 0); |
| |
| return loc; |
| } |
| |
| /* This caches canonicalized addresses for VALUEs, computed using |
| information in the global cselib table. */ |
| static hash_map<rtx, rtx> *global_get_addr_cache; |
| |
| /* This caches canonicalized addresses for VALUEs, computed using |
| information from the global cache and information pertaining to a |
| basic block being analyzed. */ |
| static hash_map<rtx, rtx> *local_get_addr_cache; |
| |
| static rtx vt_canonicalize_addr (dataflow_set *, rtx); |
| |
| /* Return the canonical address for LOC, that must be a VALUE, using a |
| cached global equivalence or computing it and storing it in the |
| global cache. */ |
| |
| static rtx |
| get_addr_from_global_cache (rtx const loc) |
| { |
| rtx x; |
| |
| gcc_checking_assert (GET_CODE (loc) == VALUE); |
| |
| bool existed; |
| rtx *slot = &global_get_addr_cache->get_or_insert (loc, &existed); |
| if (existed) |
| return *slot; |
| |
| x = canon_rtx (get_addr (loc)); |
| |
| /* Tentative, avoiding infinite recursion. */ |
| *slot = x; |
| |
| if (x != loc) |
| { |
| rtx nx = vt_canonicalize_addr (NULL, x); |
| if (nx != x) |
| { |
| /* The table may have moved during recursion, recompute |
| SLOT. */ |
| *global_get_addr_cache->get (loc) = x = nx; |
| } |
| } |
| |
| return x; |
| } |
| |
| /* Return the canonical address for LOC, that must be a VALUE, using a |
| cached local equivalence or computing it and storing it in the |
| local cache. */ |
| |
| static rtx |
| get_addr_from_local_cache (dataflow_set *set, rtx const loc) |
| { |
| rtx x; |
| decl_or_value dv; |
| variable *var; |
| location_chain *l; |
| |
| gcc_checking_assert (GET_CODE (loc) == VALUE); |
| |
| bool existed; |
| rtx *slot = &local_get_addr_cache->get_or_insert (loc, &existed); |
| if (existed) |
| return *slot; |
| |
| x = get_addr_from_global_cache (loc); |
| |
| /* Tentative, avoiding infinite recursion. */ |
| *slot = x; |
| |
| /* Recurse to cache local expansion of X, or if we need to search |
| for a VALUE in the expansion. */ |
| if (x != loc) |
| { |
| rtx nx = vt_canonicalize_addr (set, x); |
| if (nx != x) |
| { |
| slot = local_get_addr_cache->get (loc); |
| *slot = x = nx; |
| } |
| return x; |
| } |
| |
| dv = dv_from_rtx (x); |
| var = shared_hash_find (set->vars, dv); |
| if (!var) |
| return x; |
| |
| /* Look for an improved equivalent expression. */ |
| for (l = var->var_part[0].loc_chain; l; l = l->next) |
| { |
| rtx base = vt_get_canonicalize_base (l->loc); |
| if (GET_CODE (base) == VALUE |
| && canon_value_cmp (base, loc)) |
| { |
| rtx nx = vt_canonicalize_addr (set, l->loc); |
| if (x != nx) |
| { |
| slot = local_get_addr_cache->get (loc); |
| *slot = x = nx; |
| } |
| break; |
| } |
| } |
| |
| return x; |
| } |
| |
| /* Canonicalize LOC using equivalences from SET in addition to those |
| in the cselib static table. It expects a VALUE-based expression, |
| and it will only substitute VALUEs with other VALUEs or |
| function-global equivalences, so that, if two addresses have base |
| VALUEs that are locally or globally related in ways that |
| memrefs_conflict_p cares about, they will both canonicalize to |
| expressions that have the same base VALUE. |
| |
| The use of VALUEs as canonical base addresses enables the canonical |
| RTXs to remain unchanged globally, if they resolve to a constant, |
| or throughout a basic block otherwise, so that they can be cached |
| and the cache needs not be invalidated when REGs, MEMs or such |
| change. */ |
| |
| static rtx |
| vt_canonicalize_addr (dataflow_set *set, rtx oloc) |
| { |
| poly_int64 ofst = 0, term; |
| machine_mode mode = GET_MODE (oloc); |
| rtx loc = oloc; |
| rtx x; |
| bool retry = true; |
| |
| while (retry) |
| { |
| while (GET_CODE (loc) == PLUS |
| && poly_int_rtx_p (XEXP (loc, 1), &term)) |
| { |
| ofst += term; |
| loc = XEXP (loc, 0); |
| } |
| |
| /* Alignment operations can't normally be combined, so just |
| canonicalize the base and we're done. We'll normally have |
| only one stack alignment anyway. */ |
| if (GET_CODE (loc) == AND |
| && GET_CODE (XEXP (loc, 1)) == CONST_INT |
| && negative_power_of_two_p (INTVAL (XEXP (loc, 1)))) |
| { |
| x = vt_canonicalize_addr (set, XEXP (loc, 0)); |
| if (x != XEXP (loc, 0)) |
| loc = gen_rtx_AND (mode, x, XEXP (loc, 1)); |
| retry = false; |
| } |
| |
| if (GET_CODE (loc) == VALUE) |
| { |
| if (set) |
| loc = get_addr_from_local_cache (set, loc); |
| else |
| loc = get_addr_from_global_cache (loc); |
| |
| /* Consolidate plus_constants. */ |
| while (maybe_ne (ofst, 0) |
| && GET_CODE (loc) == PLUS |
| && poly_int_rtx_p (XEXP (loc, 1), &term)) |
| { |
| ofst += term; |
| loc = XEXP (loc, 0); |
| } |
| |
| retry = false; |
| } |
| else |
| { |
| x = canon_rtx (loc); |
| if (retry) |
| retry = (x != loc); |
| loc = x; |
| } |
| } |
| |
| /* Add OFST back in. */ |
| if (maybe_ne (ofst, 0)) |
| { |
| /* Don't build new RTL if we can help it. */ |
| if (strip_offset (oloc, &term) == loc && known_eq (term, ofst)) |
| return oloc; |
| |
| loc = plus_constant (mode, loc, ofst); |
| } |
| |
| return loc; |
| } |
| |
| /* Return true iff there's a true dependence between MLOC and LOC. |
| MADDR must be a canonicalized version of MLOC's address. */ |
| |
| static inline bool |
| vt_canon_true_dep (dataflow_set *set, rtx mloc, rtx maddr, rtx loc) |
| { |
| if (GET_CODE (loc) != MEM) |
| return false; |
| |
| rtx addr = vt_canonicalize_addr (set, XEXP (loc, 0)); |
| if (!canon_true_dependence (mloc, GET_MODE (mloc), maddr, loc, addr)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Hold parameters for the hashtab traversal function |
| drop_overlapping_mem_locs, see below. */ |
| |
| struct overlapping_mems |
| { |
| dataflow_set *set; |
| rtx loc, addr; |
| }; |
| |
| /* Remove all MEMs that overlap with COMS->LOC from the location list |
| of a hash table entry for a onepart variable. COMS->ADDR must be a |
| canonicalized form of COMS->LOC's address, and COMS->LOC must be |
| canonicalized itself. */ |
| |
| int |
| drop_overlapping_mem_locs (variable **slot, overlapping_mems *coms) |
| { |
| dataflow_set *set = coms->set; |
| rtx mloc = coms->loc, addr = coms->addr; |
| variable *var = *slot; |
| |
| if (var->onepart != NOT_ONEPART) |
| { |
| location_chain *loc, **locp; |
| bool changed = false; |
| rtx cur_loc; |
| |
| gcc_assert (var->n_var_parts == 1); |
| |
| if (shared_var_p (var, set->vars)) |
| { |
| for (loc = var->var_part[0].loc_chain; loc; loc = loc->next) |
| if (vt_canon_true_dep (set, mloc, addr, loc->loc)) |
| break; |
| |
| if (!loc) |
| return 1; |
| |
| slot = unshare_variable (set, slot, var, VAR_INIT_STATUS_UNKNOWN); |
| var = *slot; |
| gcc_assert (var->n_var_parts == 1); |
| } |
| |
| if (VAR_LOC_1PAUX (var)) |
| cur_loc = VAR_LOC_FROM (var); |
| else |
| cur_loc = var->var_part[0].cur_loc; |
| |
| for (locp = &var->var_part[0].loc_chain, loc = *locp; |
| loc; loc = *locp) |
| { |
| if (!vt_canon_true_dep (set, mloc, addr, loc->loc)) |
| { |
| locp = &loc->next; |
| continue; |
| } |
| |
| *locp = loc->next; |
| /* If we have deleted the location which was last emitted |
| we have to emit new location so add the variable to set |
| of changed variables. */ |
| if (cur_loc == loc->loc) |
| { |
| changed = true; |
| var->var_part[0].cur_loc = NULL; |
| if (VAR_LOC_1PAUX (var)) |
| VAR_LOC_FROM (var) = NULL; |
| } |
| delete loc; |
| } |
| |
| if (!var->var_part[0].loc_chain) |
| { |
| var->n_var_parts--; |
| changed = true; |
| } |
| if (changed) |
| variable_was_changed (var, set); |
| } |
| |
| return 1; |
| } |
| |
| /* Remove from SET all VALUE bindings to MEMs that overlap with LOC. */ |
| |
| static void |
| clobber_overlapping_mems (dataflow_set *set, rtx loc) |
| { |
| struct overlapping_mems coms; |
| |
| gcc_checking_assert (GET_CODE (loc) == MEM); |
| |
| coms.set = set; |
| coms.loc = canon_rtx (loc); |
| coms.addr = vt_canonicalize_addr (set, XEXP (loc, 0)); |
| |
| set->traversed_vars = set->vars; |
| shared_hash_htab (set->vars) |
| ->traverse <overlapping_mems*, drop_overlapping_mem_locs> (&coms); |
| set->traversed_vars = NULL; |
| } |
| |
| /* Set the location of DV, OFFSET as the MEM LOC. */ |
| |
| static void |
| var_mem_decl_set (dataflow_set *set, rtx loc, enum var_init_status initialized, |
| decl_or_value dv, HOST_WIDE_INT offset, rtx set_src, |
| enum insert_option iopt) |
| { |
| if (dv_is_decl_p (dv)) |
| dv = dv_from_decl (var_debug_decl (dv_as_decl (dv))); |
| |
| set_variable_part (set, loc, dv, offset, initialized, set_src, iopt); |
| } |
| |
| /* Set the location part of variable MEM_EXPR (LOC) in dataflow set |
| SET to LOC. |
| Adjust the address first if it is stack pointer based. */ |
| |
| static void |
| var_mem_set (dataflow_set *set, rtx loc, enum var_init_status initialized, |
| rtx set_src) |
| { |
| tree decl = MEM_EXPR (loc); |
| HOST_WIDE_INT offset = int_mem_offset (loc); |
| |
| var_mem_decl_set (set, loc, initialized, |
| dv_from_decl (decl), offset, set_src, INSERT); |
| } |
| |
| /* Delete and set the location part of variable MEM_EXPR (LOC) in |
| dataflow set SET to LOC. If MODIFY is true, any other live copies |
| of the same variable part are also deleted from the dataflow set, |
| otherwise the variable part is assumed to be copied from another |
| location holding the same part. |
| Adjust the address first if it is stack pointer based. */ |
| |
| static void |
| var_mem_delete_and_set (dataflow_set *set, rtx loc, bool modify, |
| enum var_init_status initialized, rtx set_src) |
| { |
| tree decl = MEM_EXPR (loc); |
| HOST_WIDE_INT offset = int_mem_offset (loc); |
| |
| clobber_overlapping_mems (set, loc); |
| decl = var_debug_decl (decl); |
| |
| if (initialized == VAR_INIT_STATUS_UNKNOWN) |
| initialized = get_init_value (set, loc, dv_from_decl (decl)); |
| |
| if (modify) |
| clobber_variable_part (set, NULL, dv_from_decl (decl), offset, set_src); |
| var_mem_set (set, loc, initialized, set_src); |
| } |
| |
| /* Delete the location part LOC from dataflow set SET. If CLOBBER is |
| true, also delete any other live copies of the same variable part. |
| Adjust the address first if it is stack pointer based. */ |
| |
| static void |
| var_mem_delete (dataflow_set *set, rtx loc, bool clobber) |
| { |
| tree decl = MEM_EXPR (loc); |
| HOST_WIDE_INT offset = int_mem_offset (loc); |
| |
| clobber_overlapping_mems (set, loc); |
| decl = var_debug_decl (decl); |
| if (clobber) |
| clobber_variable_part (set, NULL, dv_from_decl (decl), offset, NULL); |
| delete_variable_part (set, loc, dv_from_decl (decl), offset); |
| } |
| |
| /* Return true if LOC should not be expanded for location expressions, |
| or used in them. */ |
| |
| static inline bool |
| unsuitable_loc (rtx loc) |
| { |
| switch (GET_CODE (loc)) |
| { |
| case PC: |
| case SCRATCH: |
| case ASM_INPUT: |
| case ASM_OPERANDS: |
| return true; |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Bind VAL to LOC in SET. If MODIFIED, detach LOC from any values |
| bound to it. */ |
| |
| static inline void |
| val_bind (dataflow_set *set, rtx val, rtx loc, bool modified) |
| { |
| if (REG_P (loc)) |
| { |
| if (modified) |
| var_regno_delete (set, REGNO (loc)); |
| var_reg_decl_set (set, loc, VAR_INIT_STATUS_INITIALIZED, |
| dv_from_value (val), 0, NULL_RTX, INSERT); |
| } |
| else if (MEM_P (loc)) |
| { |
| struct elt_loc_list *l = CSELIB_VAL_PTR (val)->locs; |
| |
| if (modified) |
| clobber_overlapping_mems (set, loc); |
| |
| if (l && GET_CODE (l->loc) == VALUE) |
| l = canonical_cselib_val (CSELIB_VAL_PTR (l->loc))->locs; |
| |
| /* If this MEM is a global constant, we don't need it in the |
| dynamic tables. ??? We should test this before emitting the |
| micro-op in the first place. */ |
| while (l) |
| if (GET_CODE (l->loc) == MEM && XEXP (l->loc, 0) == XEXP (loc, 0)) |
| break; |
| else |
| l = l->next; |
| |
| if (!l) |
| var_mem_decl_set (set, loc, VAR_INIT_STATUS_INITIALIZED, |
| dv_from_value (val), 0, NULL_RTX, INSERT); |
| } |
| else |
| { |
| /* Other kinds of equivalences are necessarily static, at least |
| so long as we do not perform substitutions while merging |
| expressions. */ |
| gcc_unreachable (); |
| set_variable_part (set, loc, dv_from_value (val), 0, |
| VAR_INIT_STATUS_INITIALIZED, NULL_RTX, INSERT); |
| } |
| } |
| |
| /* Bind a value to a location it was just stored in. If MODIFIED |
| holds, assume the location was modified, detaching it from any |
| values bound to it. */ |
| |
| static void |
| val_store (dataflow_set *set, rtx val, rtx loc, rtx_insn *insn, |
| bool modified) |
| { |
| cselib_val *v = CSELIB_VAL_PTR (val); |
| |
| gcc_assert (cselib_preserved_value_p (v)); |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "%i: ", insn ? INSN_UID (insn) : 0); |
| print_inline_rtx (dump_file, loc, 0); |
| fprintf (dump_file, " evaluates to "); |
| print_inline_rtx (dump_file, val, 0); |
| if (v->locs) |
| { |
| struct elt_loc_list *l; |
| for (l = v->locs; l; l = l->next) |
| { |
| fprintf (dump_file, "\n%i: ", INSN_UID (l->setting_insn)); |
| print_inline_rtx (dump_file, l->loc, 0); |
| } |
| } |
| fprintf (dump_file, "\n"); |
| } |
| |
| gcc_checking_assert (!unsuitable_loc (loc)); |
| |
| val_bind (set, val, loc, modified); |
| } |
| |
| /* Clear (canonical address) slots that reference X. */ |
| |
| bool |
| local_get_addr_clear_given_value (rtx const &, rtx *slot, rtx x) |
| { |
| if (vt_get_canonicalize_base (*slot) == x) |
| *slot = NULL; |
| return true; |
| } |
| |
| /* Reset this node, detaching all its equivalences. Return the slot |
| in the variable hash table that holds dv, if there is one. */ |
| |
| static void |
| val_reset (dataflow_set *set, decl_or_value dv) |
| { |
| variable *var = shared_hash_find (set->vars, dv) ; |
| location_chain *node; |
| rtx cval; |
| |
| if (!var || !var->n_var_parts) |
| return; |
| |
| gcc_assert (var->n_var_parts == 1); |
| |
| if (var->onepart == ONEPART_VALUE) |
| { |
| rtx x = dv_as_value (dv); |
| |
| /* Relationships in the global cache don't change, so reset the |
| local cache entry only. */ |
| rtx *slot = local_get_addr_cache->get (x); |
| if (slot) |
| { |
| /* If the value resolved back to itself, odds are that other |
| values may have cached it too. These entries now refer |
| to the old X, so detach them too. Entries that used the |
| old X but resolved to something else remain ok as long as |
| that something else isn't also reset. */ |
| if (*slot == x) |
| local_get_addr_cache |
| ->traverse<rtx, local_get_addr_clear_given_value> (x); |
| *slot = NULL; |
| } |
| } |
| |
| cval = NULL; |
| for (node = var->var_part[0].loc_chain; node; node = node->next) |
| if (GET_CODE (node->loc) == VALUE |
| && canon_value_cmp (node->loc, cval)) |
| cval = node->loc; |
| |
| for (node = var->var_part[0].loc_chain; node; node = node->next) |
| if (GET_CODE (node->loc) == VALUE && cval != node->loc) |
| { |
| /* Redirect the equivalence link to the new canonical |
| value, or simply remove it if it would point at |
| itself. */ |
| if (cval) |
| set_variable_part (set, cval, dv_from_value (node->loc), |
| 0, node->init, node->set_src, NO_INSERT); |
| delete_variable_part (set, dv_as_value (dv), |
| dv_from_value (node->loc), 0); |
| } |
| |
| if (cval) |
| { |
| decl_or_value cdv = dv_from_value (cval); |
| |
| /* Keep the remaining values connected, accumulating links |
| in the canonical value. */ |
| for (node = var->var_part[0].loc_chain; node; node = node->next) |
| { |
| if (node->loc == cval) |
| continue; |
| else if (GET_CODE (node->loc) == REG) |
| var_reg_decl_set (set, node->loc, node->init, cdv, 0, |
| node->set_src, NO_INSERT); |
| else if (GET_CODE (node->loc) == MEM) |
| var_mem_decl_set (set, node->loc, node->init, cdv, 0, |
| node->set_src, NO_INSERT); |
| else |
| set_variable_part (set, node->loc, cdv, 0, |
| node->init, node->set_src, NO_INSERT); |
| } |
| } |
| |
| /* We remove this last, to make sure that the canonical value is not |
| removed to the point of requiring reinsertion. */ |
| if (cval) |
| delete_variable_part (set, dv_as_value (dv), dv_from_value (cval), 0); |
| |
| clobber_variable_part (set, NULL, dv, 0, NULL); |
| } |
| |
| /* Find the values in a given location and map the val to another |
| value, if it is unique, or add the location as one holding the |
| value. */ |
| |
| static void |
| val_resolve (dataflow_set *set, rtx val, rtx loc, rtx_insn *insn) |
| { |
| decl_or_value dv = dv_from_value (val); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| if (insn) |
| fprintf (dump_file, "%i: ", INSN_UID (insn)); |
| else |
| fprintf (dump_file, "head: "); |
| print_inline_rtx (dump_file, val, 0); |
| fputs (" is at ", dump_file); |
| print_inline_rtx (dump_file, loc, 0); |
| fputc ('\n', dump_file); |
| } |
| |
| val_reset (set, dv); |
| |
| gcc_checking_assert (!unsuitable_loc (loc)); |
| |
| if (REG_P (loc)) |
| { |
| attrs *node, *found = NULL; |
| |
| for (node = set->regs[REGNO (loc)]; node; node = node->next) |
| if (dv_is_value_p (node->dv) |
| && GET_MODE (dv_as_value (node->dv)) == GET_MODE (loc)) |
| { |
| found = node; |
| |
| /* Map incoming equivalences. ??? Wouldn't it be nice if |
| we just started sharing the location lists? Maybe a |
| circular list ending at the value itself or some |
| such. */ |
| set_variable_part (set, dv_as_value (node->dv), |
| dv_from_value (val), node->offset, |
| VAR_INIT_STATUS_INITIALIZED, NULL_RTX, INSERT); |
| set_variable_part (set, val, node->dv, node->offset, |
| VAR_INIT_STATUS_INITIALIZED, NULL_RTX, INSERT); |
| } |
| |
| /* If we didn't find any equivalence, we need to remember that |
| this value is held in the named register. */ |
| if (found) |
| return; |
| } |
| /* ??? Attempt to find and merge equivalent MEMs or other |
| expressions too. */ |
| |
| val_bind (set, val, loc, false); |
| } |
| |
| /* Initialize dataflow set SET to be empty. |
| VARS_SIZE is the initial size of hash table VARS. */ |
| |
| static void |
| dataflow_set_init (dataflow_set *set) |
| { |
| init_attrs_list_set (set->regs); |
| set->vars = shared_hash_copy (empty_shared_hash); |
| set->stack_adjust = 0; |
| set->traversed_vars = NULL; |
| } |
| |
| /* Delete the contents of dataflow set SET. */ |
| |
| static void |
| dataflow_set_clear (dataflow_set *set) |
| { |
| int i; |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| attrs_list_clear (&set->regs[i]); |
| |
| shared_hash_destroy (set->vars); |
| set->vars = shared_hash_copy (empty_shared_hash); |
| } |
| |
| /* Copy the contents of dataflow set SRC to DST. */ |
| |
| static void |
| dataflow_set_copy (dataflow_set *dst, dataflow_set *src) |
| { |
| int i; |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| attrs_list_copy (&dst->regs[i], src->regs[i]); |
| |
| shared_hash_destroy (dst->vars); |
| dst->vars = shared_hash_copy (src->vars); |
| dst->stack_adjust = src->stack_adjust; |
| } |
| |
| /* Information for merging lists of locations for a given offset of variable. |
| */ |
| struct variable_union_info |
| { |
| /* Node of the location chain. */ |
| location_chain *lc; |
| |
| /* The sum of positions in the input chains. */ |
| int pos; |
| |
| /* The position in the chain of DST dataflow set. */ |
| int pos_dst; |
| }; |
| |
| /* Buffer for location list sorting and its allocated size. */ |
| static struct variable_union_info *vui_vec; |
| static int vui_allocated; |
| |
| /* Compare function for qsort, order the structures by POS element. */ |
| |
| static int |
| variable_union_info_cmp_pos (const void *n1, const void *n2) |
| { |
| const struct variable_union_info *const i1 = |
| (const struct variable_union_info *) n1; |
| const struct variable_union_info *const i2 = |
| ( const struct variable_union_info *) n2; |
| |
| if (i1->pos != i2->pos) |
| return i1->pos - i2->pos; |
| |
| return (i1->pos_dst - i2->pos_dst); |
| } |
| |
| /* Compute union of location parts of variable *SLOT and the same variable |
| from hash table DATA. Compute "sorted" union of the location chains |
| for common offsets, i.e. the locations of a variable part are sorted by |
| a priority where the priority is the sum of the positions in the 2 chains |
| (if a location is only in one list the position in the second list is |
| defined to be larger than the length of the chains). |
| When we are updating the location parts the newest location is in the |
| beginning of the chain, so when we do the described "sorted" union |
| we keep the newest locations in the beginning. */ |
| |
| static int |
| variable_union (variable *src, dataflow_set *set) |
| { |
| variable *dst; |
| variable **dstp; |
| int i, j, k; |
| |
| dstp = shared_hash_find_slot (set->vars, src->dv); |
| if (!dstp || !*dstp) |
| { |
| src->refcount++; |
| |
| dst_can_be_shared = false; |
| if (!dstp) |
| dstp = shared_hash_find_slot_unshare (&set->vars, src->dv, INSERT); |
| |
| *dstp = src; |
| |
| /* Continue traversing the hash table. */ |
| return 1; |
| } |
| else |
| dst = *dstp; |
| |
| gcc_assert (src->n_var_parts); |
| gcc_checking_assert (src->onepart == dst->onepart); |
| |
| /* We can combine one-part variables very efficiently, because their |
| entries are in canonical order. */ |
| if (src->onepart) |
| { |
| location_chain **nodep, *dnode, *snode; |
| |
| gcc_assert (src->n_var_parts == 1 |
| && dst->n_var_parts == 1); |
| |
| snode = src->var_part[0].loc_chain; |
| gcc_assert (snode); |
| |
| restart_onepart_unshared: |
| nodep = &dst->var_part[0].loc_chain; |
| dnode = *nodep; |
| gcc_assert (dnode); |
| |
| while (snode) |
| { |
| int r = dnode ? loc_cmp (dnode->loc, snode->loc) : 1; |
| |
| if (r > 0) |
| { |
| location_chain *nnode; |
| |
| if (shared_var_p (dst, set->vars)) |
| { |
| dstp = unshare_variable (set, dstp, dst, |
| VAR_INIT_STATUS_INITIALIZED); |
| dst = *dstp; |
| goto restart_onepart_unshared; |
| } |
| |
| *nodep = nnode = new location_chain; |
| nnode->loc = snode->loc; |
| nnode->init = snode->init; |
| if (!snode->set_src || MEM_P (snode->set_src)) |
| nnode->set_src = NULL; |
| else |
| nnode->set_src = snode->set_src; |
| nnode->next = dnode; |
| dnode = nnode; |
| } |
| else if (r == 0) |
| gcc_checking_assert (rtx_equal_p (dnode->loc, snode->loc)); |
| |
| if (r >= 0) |
| snode = snode->next; |
| |
| nodep = &dnode->next; |
| dnode = *nodep; |
| } |
| |
| return 1; |
| } |
| |
| gcc_checking_assert (!src->onepart); |
| |
| /* Count the number of location parts, result is K. */ |
| for (i = 0, j = 0, k = 0; |
| i < src->n_var_parts && j < dst->n_var_parts; k++) |
| { |
| if (VAR_PART_OFFSET (src, i) == VAR_PART_OFFSET (dst, j)) |
| { |
| i++; |
| j++; |
| } |
| else if (VAR_PART_OFFSET (src, i) < VAR_PART_OFFSET (dst, j)) |
| i++; |
| else |
| j++; |
| } |
| k += src->n_var_parts - i; |
| k += dst->n_var_parts - j; |
| |
| /* We track only variables whose size is <= MAX_VAR_PARTS bytes |
| thus there are at most MAX_VAR_PARTS different offsets. */ |
| gcc_checking_assert (dst->onepart ? k == 1 : k <= MAX_VAR_PARTS); |
| |
| if (dst->n_var_parts != k && shared_var_p (dst, set->vars)) |
| { |
| dstp = unshare_variable (set, dstp, dst, VAR_INIT_STATUS_UNKNOWN); |
| dst = *dstp; |
| } |
| |
| i = src->n_var_parts - 1; |
| j = dst->n_var_parts - 1; |
| dst->n_var_parts = k; |
| |
| for (k--; k >= 0; k--) |
| { |
| location_chain *node, *node2; |
| |
| if (i >= 0 && j >= 0 |
| && VAR_PART_OFFSET (src, i) == VAR_PART_OFFSET (dst, j)) |
| { |
| /* Compute the "sorted" union of the chains, i.e. the locations which |
| are in both chains go first, they are sorted by the sum of |
| positions in the chains. */ |
| int dst_l, src_l; |
| int ii, jj, n; |
| struct variable_union_info *vui; |
| |
| /* If DST is shared compare the location chains. |
| If they are different we will modify the chain in DST with |
| high probability so make a copy of DST. */ |
| if (shared_var_p (dst, set->vars)) |
| { |
| for (node = src->var_part[i].loc_chain, |
| node2 = dst->var_part[j].loc_chain; node && node2; |
| node = node->next, node2 = node2->next) |
| { |
| if (!((REG_P (node2->loc) |
| && REG_P (node->loc) |
| && REGNO (node2->loc) == REGNO (node->loc)) |
| || rtx_equal_p (node2->loc, node->loc))) |
| { |
| if (node2->init < node->init) |
| node2->init = node->init; |
| break; |
| } |
| } |
| if (node || node2) |
| { |
| dstp = unshare_variable (set, dstp, dst, |
| VAR_INIT_STATUS_UNKNOWN); |
| dst = (variable *)*dstp; |
| } |
| } |
| |
| src_l = 0; |
| for (node = src->var_part[i].loc_chain; node; node = node->next) |
| src_l++; |
| dst_l = 0; |
| for (node = dst->var_part[j].loc_chain; node; node = node->next) |
| dst_l++; |
| |
| if (dst_l == 1) |
| { |
| /* The most common case, much simpler, no qsort is needed. */ |
| location_chain *dstnode = dst->var_part[j].loc_chain; |
| dst->var_part[k].loc_chain = dstnode; |
| VAR_PART_OFFSET (dst, k) = VAR_PART_OFFSET (dst, j); |
| node2 = dstnode; |
| for (node = src->var_part[i].loc_chain; node; node = node->next) |
| if (!((REG_P (dstnode->loc) |
| && REG_P (node->loc) |
| && REGNO (dstnode->loc) == REGNO (node->loc)) |
| || rtx_equal_p (dstnode->loc, node->loc))) |
| { |
| location_chain *new_node; |
| |
| /* Copy the location from SRC. */ |
| new_node = new location_chain; |
| new_node->loc = node->loc; |
| new_node->init = node->init; |
| if (!node->set_src || MEM_P (node->set_src)) |
| new_node->set_src = NULL; |
| else |
| new_node->set_src = node->set_src; |
| node2->next = new_node; |
| node2 = new_node; |
| } |
| node2->next = NULL; |
| } |
| else |
| { |
| if (src_l + dst_l > vui_allocated) |
| { |
| vui_allocated = MAX (vui_allocated * 2, src_l + dst_l); |
| vui_vec = XRESIZEVEC (struct variable_union_info, vui_vec, |
| vui_allocated); |
| } |
| vui = vui_vec; |
| |
| /* Fill in the locations from DST. */ |
| for (node = dst->var_part[j].loc_chain, jj = 0; node; |
| node = node->next, jj++) |
| { |
| vui[jj].lc = node; |
| vui[jj].pos_dst = jj; |
| |
| /* Pos plus value larger than a sum of 2 valid positions. */ |
| vui[jj].pos = jj + src_l + dst_l; |
| } |
| |
| /* Fill in the locations from SRC. */ |
| n = dst_l; |
| for (node = src->var_part[i].loc_chain, ii = 0; node; |
| node = node->next, ii++) |
| { |
| /* Find location from NODE. */ |
| for (jj = 0; jj < dst_l; jj++) |
| { |
| if ((REG_P (vui[jj].lc->loc) |
| && REG_P (node->loc) |
| && REGNO (vui[jj].lc->loc) == REGNO (node->loc)) |
| || rtx_equal_p (vui[jj].lc->loc, node->loc)) |
| { |
| vui[jj].pos = jj + ii; |
| break; |
| } |
| } |
| if (jj >= dst_l) /* The location has not been found. */ |
| { |
| location_chain *new_node; |
| |
| /* Copy the location from SRC. */ |
| new_node = new location_chain; |
| new_node->loc = node->loc; |
| new_node->init = node->init; |
| if (!node->set_src || MEM_P (node->set_src)) |
| new_node->set_src = NULL; |
| else |
| new_node->set_src = node->set_src; |
| vui[n].lc = new_node; |
| vui[n].pos_dst = src_l + dst_l; |
| vui[n].pos = ii + src_l + dst_l; |
| n++; |
| } |
| } |
| |
| if (dst_l == 2) |
| { |
| /* Special case still very common case. For dst_l == 2 |
| all entries dst_l ... n-1 are sorted, with for i >= dst_l |
| vui[i].pos == i + src_l + dst_l. */ |
| if (vui[0].pos > vui[1].pos) |
| { |
| /* Order should be 1, 0, 2... */ |
| dst->var_part[k].loc_chain = vui[1].lc; |
| vui[1].lc->next = vui[0].lc; |
| if (n >= 3) |
| { |
| vui[0].lc->next = vui[2].lc; |
| vui[n - 1].lc->next = NULL; |
| } |
| else |
| vui[0].lc->next = NULL; |
| ii = 3; |
| } |
| else |
| { |
| dst->var_part[k].loc_chain = vui[0].lc; |
| if (n >= 3 && vui[2].pos < vui[1].pos) |
| { |
| /* Order should be 0, 2, 1, 3... */ |
| vui[0].lc->next = vui[2].lc; |
| vui[2].lc->next = vui[1].lc; |
| if (n >= 4) |
| { |
| vui[1].lc->next = vui[3].lc; |
| vui[n - 1].lc->next = NULL; |
| } |
| else |
| vui[1].lc->next = NULL; |
| ii = 4; |
| } |
| else |
| { |
| /* Order should be 0, 1, 2... */ |
| ii = 1; |
| vui[n - 1].lc->next = NULL; |
| } |
| } |
| for (; ii < n; ii++) |
| vui[ii - 1].lc->next = vui[ii].lc; |
| } |
| else |
| { |
| qsort (vui, n, sizeof (struct variable_union_info), |
| variable_union_info_cmp_pos); |
| |
| /* Reconnect the nodes in sorted order. */ |
| for (ii = 1; ii < n; ii++) |
| vui[ii - 1].lc->next = vui[ii].lc; |
| vui[n - 1].lc->next = NULL; |
| dst->var_part[k].loc_chain = vui[0].lc; |
| } |
| |
| VAR_PART_OFFSET (dst, k) = VAR_PART_OFFSET (dst, j); |
| } |
| i--; |
| j--; |
| } |
| else if ((i >= 0 && j >= 0 |
| && VAR_PART_OFFSET (src, i) < VAR_PART_OFFSET (dst, j)) |
| || i < 0) |
| { |
| dst->var_part[k] = dst->var_part[j]; |
| j--; |
| } |
| else if ((i >= 0 && j >= 0 |
| && VAR_PART_OFFSET (src, i) > VAR_PART_OFFSET (dst, j)) |
| || j < 0) |
| { |
| location_chain **nextp; |
| |
| /* Copy the chain from SRC. */ |
| nextp = &dst->var_part[k].loc_chain; |
| for (node = src->var_part[i].loc_chain; node; node = node->next) |
| { |
| location_chain *new_lc; |
| |
| new_lc = new location_chain; |
| new_lc->next = NULL; |
| new_lc->init = node->init; |
| if (!node->set_src || MEM_P (node->set_src)) |
| new_lc->set_src = NULL; |
| else |
| new_lc->set_src = node->set_src; |
| new_lc->loc = node->loc; |
| |
| *nextp = new_lc; |
| nextp = &new_lc->next; |
| } |
| |
| VAR_PART_OFFSET (dst, k) = VAR_PART_OFFSET (src, i); |
| i--; |
| } |
| dst->var_part[k].cur_loc = NULL; |
| } |
| |
| if (flag_var_tracking_uninit) |
| for (i = 0; i < src->n_var_parts && i < dst->n_var_parts; i++) |
| { |
| location_chain *node, *node2; |
| for (node = src->var_part[i].loc_chain; node; node = node->next) |
| for (node2 = dst->var_part[i].loc_chain; node2; node2 = node2->next) |
| if (rtx_equal_p (node->loc, node2->loc)) |
| { |
| if (node->init > node2->init) |
| node2->init = node->init; |
| } |
| } |
| |
| /* Continue traversing the hash table. */ |
| return 1; |
| } |
| |
| /* Compute union of dataflow sets SRC and DST and store it to DST. */ |
| |
| static void |
| dataflow_set_union (dataflow_set *dst, dataflow_set *src) |
| { |
| int i; |
| |
| for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
| attrs_list_union (&dst->regs[i], src->regs[i]); |
| |
| if (dst->vars == empty_shared_hash) |
| { |
| shared_hash_destroy (dst->vars); |
| dst->vars = shared_hash_copy (src->vars); |
| } |
| else |
| { |
| variable_iterator_type hi; |
| variable *
|