| /* Scalar Replacement of Aggregates (SRA) converts some structure |
| references into scalar references, exposing them to the scalar |
| optimizers. |
| Copyright (C) 2008-2018 Free Software Foundation, Inc. |
| Contributed by Martin Jambor <mjambor@suse.cz> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* This file implements Scalar Reduction of Aggregates (SRA). SRA is run |
| twice, once in the early stages of compilation (early SRA) and once in the |
| late stages (late SRA). The aim of both is to turn references to scalar |
| parts of aggregates into uses of independent scalar variables. |
| |
| The two passes are nearly identical, the only difference is that early SRA |
| does not scalarize unions which are used as the result in a GIMPLE_RETURN |
| statement because together with inlining this can lead to weird type |
| conversions. |
| |
| Both passes operate in four stages: |
| |
| 1. The declarations that have properties which make them candidates for |
| scalarization are identified in function find_var_candidates(). The |
| candidates are stored in candidate_bitmap. |
| |
| 2. The function body is scanned. In the process, declarations which are |
| used in a manner that prevent their scalarization are removed from the |
| candidate bitmap. More importantly, for every access into an aggregate, |
| an access structure (struct access) is created by create_access() and |
| stored in a vector associated with the aggregate. Among other |
| information, the aggregate declaration, the offset and size of the access |
| and its type are stored in the structure. |
| |
| On a related note, assign_link structures are created for every assign |
| statement between candidate aggregates and attached to the related |
| accesses. |
| |
| 3. The vectors of accesses are analyzed. They are first sorted according to |
| their offset and size and then scanned for partially overlapping accesses |
| (i.e. those which overlap but one is not entirely within another). Such |
| an access disqualifies the whole aggregate from being scalarized. |
| |
| If there is no such inhibiting overlap, a representative access structure |
| is chosen for every unique combination of offset and size. Afterwards, |
| the pass builds a set of trees from these structures, in which children |
| of an access are within their parent (in terms of offset and size). |
| |
| Then accesses are propagated whenever possible (i.e. in cases when it |
| does not create a partially overlapping access) across assign_links from |
| the right hand side to the left hand side. |
| |
| Then the set of trees for each declaration is traversed again and those |
| accesses which should be replaced by a scalar are identified. |
| |
| 4. The function is traversed again, and for every reference into an |
| aggregate that has some component which is about to be scalarized, |
| statements are amended and new statements are created as necessary. |
| Finally, if a parameter got scalarized, the scalar replacements are |
| initialized with values from respective parameter aggregates. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "predict.h" |
| #include "alloc-pool.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "cgraph.h" |
| #include "gimple-pretty-print.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "tree-eh.h" |
| #include "stor-layout.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "symbol-summary.h" |
| #include "ipa-param-manipulation.h" |
| #include "ipa-prop.h" |
| #include "params.h" |
| #include "dbgcnt.h" |
| #include "tree-inline.h" |
| #include "ipa-fnsummary.h" |
| #include "ipa-utils.h" |
| #include "builtins.h" |
| |
| /* Enumeration of all aggregate reductions we can do. */ |
| enum sra_mode { SRA_MODE_EARLY_IPA, /* early call regularization */ |
| SRA_MODE_EARLY_INTRA, /* early intraprocedural SRA */ |
| SRA_MODE_INTRA }; /* late intraprocedural SRA */ |
| |
| /* Global variable describing which aggregate reduction we are performing at |
| the moment. */ |
| static enum sra_mode sra_mode; |
| |
| struct assign_link; |
| |
| /* ACCESS represents each access to an aggregate variable (as a whole or a |
| part). It can also represent a group of accesses that refer to exactly the |
| same fragment of an aggregate (i.e. those that have exactly the same offset |
| and size). Such representatives for a single aggregate, once determined, |
| are linked in a linked list and have the group fields set. |
| |
| Moreover, when doing intraprocedural SRA, a tree is built from those |
| representatives (by the means of first_child and next_sibling pointers), in |
| which all items in a subtree are "within" the root, i.e. their offset is |
| greater or equal to offset of the root and offset+size is smaller or equal |
| to offset+size of the root. Children of an access are sorted by offset. |
| |
| Note that accesses to parts of vector and complex number types always |
| represented by an access to the whole complex number or a vector. It is a |
| duty of the modifying functions to replace them appropriately. */ |
| |
| struct access |
| { |
| /* Values returned by `get_ref_base_and_extent' for each component reference |
| If EXPR isn't a component reference just set `BASE = EXPR', `OFFSET = 0', |
| `SIZE = TREE_SIZE (TREE_TYPE (expr))'. */ |
| HOST_WIDE_INT offset; |
| HOST_WIDE_INT size; |
| tree base; |
| |
| /* Expression. It is context dependent so do not use it to create new |
| expressions to access the original aggregate. See PR 42154 for a |
| testcase. */ |
| tree expr; |
| /* Type. */ |
| tree type; |
| |
| /* The statement this access belongs to. */ |
| gimple *stmt; |
| |
| /* Next group representative for this aggregate. */ |
| struct access *next_grp; |
| |
| /* Pointer to the group representative. Pointer to itself if the struct is |
| the representative. */ |
| struct access *group_representative; |
| |
| /* After access tree has been constructed, this points to the parent of the |
| current access, if there is one. NULL for roots. */ |
| struct access *parent; |
| |
| /* If this access has any children (in terms of the definition above), this |
| points to the first one. */ |
| struct access *first_child; |
| |
| /* In intraprocedural SRA, pointer to the next sibling in the access tree as |
| described above. In IPA-SRA this is a pointer to the next access |
| belonging to the same group (having the same representative). */ |
| struct access *next_sibling; |
| |
| /* Pointers to the first and last element in the linked list of assign |
| links. */ |
| struct assign_link *first_link, *last_link; |
| |
| /* Pointer to the next access in the work queue. */ |
| struct access *next_queued; |
| |
| /* Replacement variable for this access "region." Never to be accessed |
| directly, always only by the means of get_access_replacement() and only |
| when grp_to_be_replaced flag is set. */ |
| tree replacement_decl; |
| |
| /* Is this access an access to a non-addressable field? */ |
| unsigned non_addressable : 1; |
| |
| /* Is this access made in reverse storage order? */ |
| unsigned reverse : 1; |
| |
| /* Is this particular access write access? */ |
| unsigned write : 1; |
| |
| /* Is this access currently in the work queue? */ |
| unsigned grp_queued : 1; |
| |
| /* Does this group contain a write access? This flag is propagated down the |
| access tree. */ |
| unsigned grp_write : 1; |
| |
| /* Does this group contain a read access? This flag is propagated down the |
| access tree. */ |
| unsigned grp_read : 1; |
| |
| /* Does this group contain a read access that comes from an assignment |
| statement? This flag is propagated down the access tree. */ |
| unsigned grp_assignment_read : 1; |
| |
| /* Does this group contain a write access that comes from an assignment |
| statement? This flag is propagated down the access tree. */ |
| unsigned grp_assignment_write : 1; |
| |
| /* Does this group contain a read access through a scalar type? This flag is |
| not propagated in the access tree in any direction. */ |
| unsigned grp_scalar_read : 1; |
| |
| /* Does this group contain a write access through a scalar type? This flag |
| is not propagated in the access tree in any direction. */ |
| unsigned grp_scalar_write : 1; |
| |
| /* Is this access an artificial one created to scalarize some record |
| entirely? */ |
| unsigned grp_total_scalarization : 1; |
| |
| /* Other passes of the analysis use this bit to make function |
| analyze_access_subtree create scalar replacements for this group if |
| possible. */ |
| unsigned grp_hint : 1; |
| |
| /* Is the subtree rooted in this access fully covered by scalar |
| replacements? */ |
| unsigned grp_covered : 1; |
| |
| /* If set to true, this access and all below it in an access tree must not be |
| scalarized. */ |
| unsigned grp_unscalarizable_region : 1; |
| |
| /* Whether data have been written to parts of the aggregate covered by this |
| access which is not to be scalarized. This flag is propagated up in the |
| access tree. */ |
| unsigned grp_unscalarized_data : 1; |
| |
| /* Does this access and/or group contain a write access through a |
| BIT_FIELD_REF? */ |
| unsigned grp_partial_lhs : 1; |
| |
| /* Set when a scalar replacement should be created for this variable. */ |
| unsigned grp_to_be_replaced : 1; |
| |
| /* Set when we want a replacement for the sole purpose of having it in |
| generated debug statements. */ |
| unsigned grp_to_be_debug_replaced : 1; |
| |
| /* Should TREE_NO_WARNING of a replacement be set? */ |
| unsigned grp_no_warning : 1; |
| |
| /* Is it possible that the group refers to data which might be (directly or |
| otherwise) modified? */ |
| unsigned grp_maybe_modified : 1; |
| |
| /* Set when this is a representative of a pointer to scalar (i.e. by |
| reference) parameter which we consider for turning into a plain scalar |
| (i.e. a by value parameter). */ |
| unsigned grp_scalar_ptr : 1; |
| |
| /* Set when we discover that this pointer is not safe to dereference in the |
| caller. */ |
| unsigned grp_not_necessarilly_dereferenced : 1; |
| }; |
| |
| typedef struct access *access_p; |
| |
| |
| /* Alloc pool for allocating access structures. */ |
| static object_allocator<struct access> access_pool ("SRA accesses"); |
| |
| /* A structure linking lhs and rhs accesses from an aggregate assignment. They |
| are used to propagate subaccesses from rhs to lhs as long as they don't |
| conflict with what is already there. */ |
| struct assign_link |
| { |
| struct access *lacc, *racc; |
| struct assign_link *next; |
| }; |
| |
| /* Alloc pool for allocating assign link structures. */ |
| static object_allocator<assign_link> assign_link_pool ("SRA links"); |
| |
| /* Base (tree) -> Vector (vec<access_p> *) map. */ |
| static hash_map<tree, auto_vec<access_p> > *base_access_vec; |
| |
| /* Candidate hash table helpers. */ |
| |
| struct uid_decl_hasher : nofree_ptr_hash <tree_node> |
| { |
| static inline hashval_t hash (const tree_node *); |
| static inline bool equal (const tree_node *, const tree_node *); |
| }; |
| |
| /* Hash a tree in a uid_decl_map. */ |
| |
| inline hashval_t |
| uid_decl_hasher::hash (const tree_node *item) |
| { |
| return item->decl_minimal.uid; |
| } |
| |
| /* Return true if the DECL_UID in both trees are equal. */ |
| |
| inline bool |
| uid_decl_hasher::equal (const tree_node *a, const tree_node *b) |
| { |
| return (a->decl_minimal.uid == b->decl_minimal.uid); |
| } |
| |
| /* Set of candidates. */ |
| static bitmap candidate_bitmap; |
| static hash_table<uid_decl_hasher> *candidates; |
| |
| /* For a candidate UID return the candidates decl. */ |
| |
| static inline tree |
| candidate (unsigned uid) |
| { |
| tree_node t; |
| t.decl_minimal.uid = uid; |
| return candidates->find_with_hash (&t, static_cast <hashval_t> (uid)); |
| } |
| |
| /* Bitmap of candidates which we should try to entirely scalarize away and |
| those which cannot be (because they are and need be used as a whole). */ |
| static bitmap should_scalarize_away_bitmap, cannot_scalarize_away_bitmap; |
| |
| /* Bitmap of candidates in the constant pool, which cannot be scalarized |
| because this would produce non-constant expressions (e.g. Ada). */ |
| static bitmap disqualified_constants; |
| |
| /* Obstack for creation of fancy names. */ |
| static struct obstack name_obstack; |
| |
| /* Head of a linked list of accesses that need to have its subaccesses |
| propagated to their assignment counterparts. */ |
| static struct access *work_queue_head; |
| |
| /* Number of parameters of the analyzed function when doing early ipa SRA. */ |
| static int func_param_count; |
| |
| /* scan_function sets the following to true if it encounters a call to |
| __builtin_apply_args. */ |
| static bool encountered_apply_args; |
| |
| /* Set by scan_function when it finds a recursive call. */ |
| static bool encountered_recursive_call; |
| |
| /* Set by scan_function when it finds a recursive call with less actual |
| arguments than formal parameters.. */ |
| static bool encountered_unchangable_recursive_call; |
| |
| /* This is a table in which for each basic block and parameter there is a |
| distance (offset + size) in that parameter which is dereferenced and |
| accessed in that BB. */ |
| static HOST_WIDE_INT *bb_dereferences; |
| /* Bitmap of BBs that can cause the function to "stop" progressing by |
| returning, throwing externally, looping infinitely or calling a function |
| which might abort etc.. */ |
| static bitmap final_bbs; |
| |
| /* Representative of no accesses at all. */ |
| static struct access no_accesses_representant; |
| |
| /* Predicate to test the special value. */ |
| |
| static inline bool |
| no_accesses_p (struct access *access) |
| { |
| return access == &no_accesses_representant; |
| } |
| |
| /* Dump contents of ACCESS to file F in a human friendly way. If GRP is true, |
| representative fields are dumped, otherwise those which only describe the |
| individual access are. */ |
| |
| static struct |
| { |
| /* Number of processed aggregates is readily available in |
| analyze_all_variable_accesses and so is not stored here. */ |
| |
| /* Number of created scalar replacements. */ |
| int replacements; |
| |
| /* Number of times sra_modify_expr or sra_modify_assign themselves changed an |
| expression. */ |
| int exprs; |
| |
| /* Number of statements created by generate_subtree_copies. */ |
| int subtree_copies; |
| |
| /* Number of statements created by load_assign_lhs_subreplacements. */ |
| int subreplacements; |
| |
| /* Number of times sra_modify_assign has deleted a statement. */ |
| int deleted; |
| |
| /* Number of times sra_modify_assign has to deal with subaccesses of LHS and |
| RHS reparately due to type conversions or nonexistent matching |
| references. */ |
| int separate_lhs_rhs_handling; |
| |
| /* Number of parameters that were removed because they were unused. */ |
| int deleted_unused_parameters; |
| |
| /* Number of scalars passed as parameters by reference that have been |
| converted to be passed by value. */ |
| int scalar_by_ref_to_by_val; |
| |
| /* Number of aggregate parameters that were replaced by one or more of their |
| components. */ |
| int aggregate_params_reduced; |
| |
| /* Numbber of components created when splitting aggregate parameters. */ |
| int param_reductions_created; |
| } sra_stats; |
| |
| static void |
| dump_access (FILE *f, struct access *access, bool grp) |
| { |
| fprintf (f, "access { "); |
| fprintf (f, "base = (%d)'", DECL_UID (access->base)); |
| print_generic_expr (f, access->base); |
| fprintf (f, "', offset = " HOST_WIDE_INT_PRINT_DEC, access->offset); |
| fprintf (f, ", size = " HOST_WIDE_INT_PRINT_DEC, access->size); |
| fprintf (f, ", expr = "); |
| print_generic_expr (f, access->expr); |
| fprintf (f, ", type = "); |
| print_generic_expr (f, access->type); |
| fprintf (f, ", non_addressable = %d, reverse = %d", |
| access->non_addressable, access->reverse); |
| if (grp) |
| fprintf (f, ", grp_read = %d, grp_write = %d, grp_assignment_read = %d, " |
| "grp_assignment_write = %d, grp_scalar_read = %d, " |
| "grp_scalar_write = %d, grp_total_scalarization = %d, " |
| "grp_hint = %d, grp_covered = %d, " |
| "grp_unscalarizable_region = %d, grp_unscalarized_data = %d, " |
| "grp_partial_lhs = %d, grp_to_be_replaced = %d, " |
| "grp_to_be_debug_replaced = %d, grp_maybe_modified = %d, " |
| "grp_not_necessarilly_dereferenced = %d\n", |
| access->grp_read, access->grp_write, access->grp_assignment_read, |
| access->grp_assignment_write, access->grp_scalar_read, |
| access->grp_scalar_write, access->grp_total_scalarization, |
| access->grp_hint, access->grp_covered, |
| access->grp_unscalarizable_region, access->grp_unscalarized_data, |
| access->grp_partial_lhs, access->grp_to_be_replaced, |
| access->grp_to_be_debug_replaced, access->grp_maybe_modified, |
| access->grp_not_necessarilly_dereferenced); |
| else |
| fprintf (f, ", write = %d, grp_total_scalarization = %d, " |
| "grp_partial_lhs = %d\n", |
| access->write, access->grp_total_scalarization, |
| access->grp_partial_lhs); |
| } |
| |
| /* Dump a subtree rooted in ACCESS to file F, indent by LEVEL. */ |
| |
| static void |
| dump_access_tree_1 (FILE *f, struct access *access, int level) |
| { |
| do |
| { |
| int i; |
| |
| for (i = 0; i < level; i++) |
| fputs ("* ", f); |
| |
| dump_access (f, access, true); |
| |
| if (access->first_child) |
| dump_access_tree_1 (f, access->first_child, level + 1); |
| |
| access = access->next_sibling; |
| } |
| while (access); |
| } |
| |
| /* Dump all access trees for a variable, given the pointer to the first root in |
| ACCESS. */ |
| |
| static void |
| dump_access_tree (FILE *f, struct access *access) |
| { |
| for (; access; access = access->next_grp) |
| dump_access_tree_1 (f, access, 0); |
| } |
| |
| /* Return true iff ACC is non-NULL and has subaccesses. */ |
| |
| static inline bool |
| access_has_children_p (struct access *acc) |
| { |
| return acc && acc->first_child; |
| } |
| |
| /* Return true iff ACC is (partly) covered by at least one replacement. */ |
| |
| static bool |
| access_has_replacements_p (struct access *acc) |
| { |
| struct access *child; |
| if (acc->grp_to_be_replaced) |
| return true; |
| for (child = acc->first_child; child; child = child->next_sibling) |
| if (access_has_replacements_p (child)) |
| return true; |
| return false; |
| } |
| |
| /* Return a vector of pointers to accesses for the variable given in BASE or |
| NULL if there is none. */ |
| |
| static vec<access_p> * |
| get_base_access_vector (tree base) |
| { |
| return base_access_vec->get (base); |
| } |
| |
| /* Find an access with required OFFSET and SIZE in a subtree of accesses rooted |
| in ACCESS. Return NULL if it cannot be found. */ |
| |
| static struct access * |
| find_access_in_subtree (struct access *access, HOST_WIDE_INT offset, |
| HOST_WIDE_INT size) |
| { |
| while (access && (access->offset != offset || access->size != size)) |
| { |
| struct access *child = access->first_child; |
| |
| while (child && (child->offset + child->size <= offset)) |
| child = child->next_sibling; |
| access = child; |
| } |
| |
| return access; |
| } |
| |
| /* Return the first group representative for DECL or NULL if none exists. */ |
| |
| static struct access * |
| get_first_repr_for_decl (tree base) |
| { |
| vec<access_p> *access_vec; |
| |
| access_vec = get_base_access_vector (base); |
| if (!access_vec) |
| return NULL; |
| |
| return (*access_vec)[0]; |
| } |
| |
| /* Find an access representative for the variable BASE and given OFFSET and |
| SIZE. Requires that access trees have already been built. Return NULL if |
| it cannot be found. */ |
| |
| static struct access * |
| get_var_base_offset_size_access (tree base, HOST_WIDE_INT offset, |
| HOST_WIDE_INT size) |
| { |
| struct access *access; |
| |
| access = get_first_repr_for_decl (base); |
| while (access && (access->offset + access->size <= offset)) |
| access = access->next_grp; |
| if (!access) |
| return NULL; |
| |
| return find_access_in_subtree (access, offset, size); |
| } |
| |
| /* Add LINK to the linked list of assign links of RACC. */ |
| static void |
| add_link_to_rhs (struct access *racc, struct assign_link *link) |
| { |
| gcc_assert (link->racc == racc); |
| |
| if (!racc->first_link) |
| { |
| gcc_assert (!racc->last_link); |
| racc->first_link = link; |
| } |
| else |
| racc->last_link->next = link; |
| |
| racc->last_link = link; |
| link->next = NULL; |
| } |
| |
| /* Move all link structures in their linked list in OLD_RACC to the linked list |
| in NEW_RACC. */ |
| static void |
| relink_to_new_repr (struct access *new_racc, struct access *old_racc) |
| { |
| if (!old_racc->first_link) |
| { |
| gcc_assert (!old_racc->last_link); |
| return; |
| } |
| |
| if (new_racc->first_link) |
| { |
| gcc_assert (!new_racc->last_link->next); |
| gcc_assert (!old_racc->last_link || !old_racc->last_link->next); |
| |
| new_racc->last_link->next = old_racc->first_link; |
| new_racc->last_link = old_racc->last_link; |
| } |
| else |
| { |
| gcc_assert (!new_racc->last_link); |
| |
| new_racc->first_link = old_racc->first_link; |
| new_racc->last_link = old_racc->last_link; |
| } |
| old_racc->first_link = old_racc->last_link = NULL; |
| } |
| |
| /* Add ACCESS to the work queue (which is actually a stack). */ |
| |
| static void |
| add_access_to_work_queue (struct access *access) |
| { |
| if (access->first_link && !access->grp_queued) |
| { |
| gcc_assert (!access->next_queued); |
| access->next_queued = work_queue_head; |
| access->grp_queued = 1; |
| work_queue_head = access; |
| } |
| } |
| |
| /* Pop an access from the work queue, and return it, assuming there is one. */ |
| |
| static struct access * |
| pop_access_from_work_queue (void) |
| { |
| struct access *access = work_queue_head; |
| |
| work_queue_head = access->next_queued; |
| access->next_queued = NULL; |
| access->grp_queued = 0; |
| return access; |
| } |
| |
| |
| /* Allocate necessary structures. */ |
| |
| static void |
| sra_initialize (void) |
| { |
| candidate_bitmap = BITMAP_ALLOC (NULL); |
| candidates = new hash_table<uid_decl_hasher> |
| (vec_safe_length (cfun->local_decls) / 2); |
| should_scalarize_away_bitmap = BITMAP_ALLOC (NULL); |
| cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL); |
| disqualified_constants = BITMAP_ALLOC (NULL); |
| gcc_obstack_init (&name_obstack); |
| base_access_vec = new hash_map<tree, auto_vec<access_p> >; |
| memset (&sra_stats, 0, sizeof (sra_stats)); |
| encountered_apply_args = false; |
| encountered_recursive_call = false; |
| encountered_unchangable_recursive_call = false; |
| } |
| |
| /* Deallocate all general structures. */ |
| |
| static void |
| sra_deinitialize (void) |
| { |
| BITMAP_FREE (candidate_bitmap); |
| delete candidates; |
| candidates = NULL; |
| BITMAP_FREE (should_scalarize_away_bitmap); |
| BITMAP_FREE (cannot_scalarize_away_bitmap); |
| BITMAP_FREE (disqualified_constants); |
| access_pool.release (); |
| assign_link_pool.release (); |
| obstack_free (&name_obstack, NULL); |
| |
| delete base_access_vec; |
| } |
| |
| /* Return true if DECL is a VAR_DECL in the constant pool, false otherwise. */ |
| |
| static bool constant_decl_p (tree decl) |
| { |
| return VAR_P (decl) && DECL_IN_CONSTANT_POOL (decl); |
| } |
| |
| /* Remove DECL from candidates for SRA and write REASON to the dump file if |
| there is one. */ |
| |
| static void |
| disqualify_candidate (tree decl, const char *reason) |
| { |
| if (bitmap_clear_bit (candidate_bitmap, DECL_UID (decl))) |
| candidates->remove_elt_with_hash (decl, DECL_UID (decl)); |
| if (constant_decl_p (decl)) |
| bitmap_set_bit (disqualified_constants, DECL_UID (decl)); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "! Disqualifying "); |
| print_generic_expr (dump_file, decl); |
| fprintf (dump_file, " - %s\n", reason); |
| } |
| } |
| |
| /* Return true iff the type contains a field or an element which does not allow |
| scalarization. */ |
| |
| static bool |
| type_internals_preclude_sra_p (tree type, const char **msg) |
| { |
| tree fld; |
| tree et; |
| |
| switch (TREE_CODE (type)) |
| { |
| case RECORD_TYPE: |
| case UNION_TYPE: |
| case QUAL_UNION_TYPE: |
| for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) |
| if (TREE_CODE (fld) == FIELD_DECL) |
| { |
| tree ft = TREE_TYPE (fld); |
| |
| if (TREE_THIS_VOLATILE (fld)) |
| { |
| *msg = "volatile structure field"; |
| return true; |
| } |
| if (!DECL_FIELD_OFFSET (fld)) |
| { |
| *msg = "no structure field offset"; |
| return true; |
| } |
| if (!DECL_SIZE (fld)) |
| { |
| *msg = "zero structure field size"; |
| return true; |
| } |
| if (!tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) |
| { |
| *msg = "structure field offset not fixed"; |
| return true; |
| } |
| if (!tree_fits_uhwi_p (DECL_SIZE (fld))) |
| { |
| *msg = "structure field size not fixed"; |
| return true; |
| } |
| if (!tree_fits_shwi_p (bit_position (fld))) |
| { |
| *msg = "structure field size too big"; |
| return true; |
| } |
| if (AGGREGATE_TYPE_P (ft) |
| && int_bit_position (fld) % BITS_PER_UNIT != 0) |
| { |
| *msg = "structure field is bit field"; |
| return true; |
| } |
| |
| if (AGGREGATE_TYPE_P (ft) && type_internals_preclude_sra_p (ft, msg)) |
| return true; |
| } |
| |
| return false; |
| |
| case ARRAY_TYPE: |
| et = TREE_TYPE (type); |
| |
| if (TYPE_VOLATILE (et)) |
| { |
| *msg = "element type is volatile"; |
| return true; |
| } |
| |
| if (AGGREGATE_TYPE_P (et) && type_internals_preclude_sra_p (et, msg)) |
| return true; |
| |
| return false; |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* If T is an SSA_NAME, return NULL if it is not a default def or return its |
| base variable if it is. Return T if it is not an SSA_NAME. */ |
| |
| static tree |
| get_ssa_base_param (tree t) |
| { |
| if (TREE_CODE (t) == SSA_NAME) |
| { |
| if (SSA_NAME_IS_DEFAULT_DEF (t)) |
| return SSA_NAME_VAR (t); |
| else |
| return NULL_TREE; |
| } |
| return t; |
| } |
| |
| /* Mark a dereference of BASE of distance DIST in a basic block tht STMT |
| belongs to, unless the BB has already been marked as a potentially |
| final. */ |
| |
| static void |
| mark_parm_dereference (tree base, HOST_WIDE_INT dist, gimple *stmt) |
| { |
| basic_block bb = gimple_bb (stmt); |
| int idx, parm_index = 0; |
| tree parm; |
| |
| if (bitmap_bit_p (final_bbs, bb->index)) |
| return; |
| |
| for (parm = DECL_ARGUMENTS (current_function_decl); |
| parm && parm != base; |
| parm = DECL_CHAIN (parm)) |
| parm_index++; |
| |
| gcc_assert (parm_index < func_param_count); |
| |
| idx = bb->index * func_param_count + parm_index; |
| if (bb_dereferences[idx] < dist) |
| bb_dereferences[idx] = dist; |
| } |
| |
| /* Allocate an access structure for BASE, OFFSET and SIZE, clear it, fill in |
| the three fields. Also add it to the vector of accesses corresponding to |
| the base. Finally, return the new access. */ |
| |
| static struct access * |
| create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size) |
| { |
| struct access *access = access_pool.allocate (); |
| |
| memset (access, 0, sizeof (struct access)); |
| access->base = base; |
| access->offset = offset; |
| access->size = size; |
| |
| base_access_vec->get_or_insert (base).safe_push (access); |
| |
| return access; |
| } |
| |
| static bool maybe_add_sra_candidate (tree); |
| |
| /* Create and insert access for EXPR. Return created access, or NULL if it is |
| not possible. Also scan for uses of constant pool as we go along and add |
| to candidates. */ |
| |
| static struct access * |
| create_access (tree expr, gimple *stmt, bool write) |
| { |
| struct access *access; |
| poly_int64 poffset, psize, pmax_size; |
| HOST_WIDE_INT offset, size, max_size; |
| tree base = expr; |
| bool reverse, ptr, unscalarizable_region = false; |
| |
| base = get_ref_base_and_extent (expr, &poffset, &psize, &pmax_size, |
| &reverse); |
| if (!poffset.is_constant (&offset) |
| || !psize.is_constant (&size) |
| || !pmax_size.is_constant (&max_size)) |
| { |
| disqualify_candidate (base, "Encountered a polynomial-sized access."); |
| return NULL; |
| } |
| |
| if (sra_mode == SRA_MODE_EARLY_IPA |
| && TREE_CODE (base) == MEM_REF) |
| { |
| base = get_ssa_base_param (TREE_OPERAND (base, 0)); |
| if (!base) |
| return NULL; |
| ptr = true; |
| } |
| else |
| ptr = false; |
| |
| /* For constant-pool entries, check we can substitute the constant value. */ |
| if (constant_decl_p (base) |
| && (sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA)) |
| { |
| gcc_assert (!bitmap_bit_p (disqualified_constants, DECL_UID (base))); |
| if (expr != base |
| && !is_gimple_reg_type (TREE_TYPE (expr)) |
| && dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| /* This occurs in Ada with accesses to ARRAY_RANGE_REFs, |
| and elements of multidimensional arrays (which are |
| multi-element arrays in their own right). */ |
| fprintf (dump_file, "Allowing non-reg-type load of part" |
| " of constant-pool entry: "); |
| print_generic_expr (dump_file, expr); |
| } |
| maybe_add_sra_candidate (base); |
| } |
| |
| if (!DECL_P (base) || !bitmap_bit_p (candidate_bitmap, DECL_UID (base))) |
| return NULL; |
| |
| if (sra_mode == SRA_MODE_EARLY_IPA) |
| { |
| if (size < 0 || size != max_size) |
| { |
| disqualify_candidate (base, "Encountered a variable sized access."); |
| return NULL; |
| } |
| if (TREE_CODE (expr) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (expr, 1))) |
| { |
| disqualify_candidate (base, "Encountered a bit-field access."); |
| return NULL; |
| } |
| gcc_checking_assert ((offset % BITS_PER_UNIT) == 0); |
| |
| if (ptr) |
| mark_parm_dereference (base, offset + size, stmt); |
| } |
| else |
| { |
| if (size != max_size) |
| { |
| size = max_size; |
| unscalarizable_region = true; |
| } |
| if (size < 0) |
| { |
| disqualify_candidate (base, "Encountered an unconstrained access."); |
| return NULL; |
| } |
| } |
| |
| access = create_access_1 (base, offset, size); |
| access->expr = expr; |
| access->type = TREE_TYPE (expr); |
| access->write = write; |
| access->grp_unscalarizable_region = unscalarizable_region; |
| access->stmt = stmt; |
| access->reverse = reverse; |
| |
| if (TREE_CODE (expr) == COMPONENT_REF |
| && DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1))) |
| access->non_addressable = 1; |
| |
| return access; |
| } |
| |
| |
| /* Return true iff TYPE is scalarizable - i.e. a RECORD_TYPE or fixed-length |
| ARRAY_TYPE with fields that are either of gimple register types (excluding |
| bit-fields) or (recursively) scalarizable types. CONST_DECL must be true if |
| we are considering a decl from constant pool. If it is false, char arrays |
| will be refused. */ |
| |
| static bool |
| scalarizable_type_p (tree type, bool const_decl) |
| { |
| gcc_assert (!is_gimple_reg_type (type)); |
| if (type_contains_placeholder_p (type)) |
| return false; |
| |
| switch (TREE_CODE (type)) |
| { |
| case RECORD_TYPE: |
| for (tree fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) |
| if (TREE_CODE (fld) == FIELD_DECL) |
| { |
| tree ft = TREE_TYPE (fld); |
| |
| if (DECL_BIT_FIELD (fld)) |
| return false; |
| |
| if (!is_gimple_reg_type (ft) |
| && !scalarizable_type_p (ft, const_decl)) |
| return false; |
| } |
| |
| return true; |
| |
| case ARRAY_TYPE: |
| { |
| HOST_WIDE_INT min_elem_size; |
| if (const_decl) |
| min_elem_size = 0; |
| else |
| min_elem_size = BITS_PER_UNIT; |
| |
| if (TYPE_DOMAIN (type) == NULL_TREE |
| || !tree_fits_shwi_p (TYPE_SIZE (type)) |
| || !tree_fits_shwi_p (TYPE_SIZE (TREE_TYPE (type))) |
| || (tree_to_shwi (TYPE_SIZE (TREE_TYPE (type))) <= min_elem_size) |
| || !tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))) |
| return false; |
| if (tree_to_shwi (TYPE_SIZE (type)) == 0 |
| && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) |
| /* Zero-element array, should not prevent scalarization. */ |
| ; |
| else if ((tree_to_shwi (TYPE_SIZE (type)) <= 0) |
| || !tree_fits_shwi_p (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) |
| /* Variable-length array, do not allow scalarization. */ |
| return false; |
| |
| tree elem = TREE_TYPE (type); |
| if (!is_gimple_reg_type (elem) |
| && !scalarizable_type_p (elem, const_decl)) |
| return false; |
| return true; |
| } |
| default: |
| return false; |
| } |
| } |
| |
| static void scalarize_elem (tree, HOST_WIDE_INT, HOST_WIDE_INT, bool, tree, tree); |
| |
| /* Create total_scalarization accesses for all scalar fields of a member |
| of type DECL_TYPE conforming to scalarizable_type_p. BASE |
| must be the top-most VAR_DECL representing the variable; within that, |
| OFFSET locates the member and REF must be the memory reference expression for |
| the member. */ |
| |
| static void |
| completely_scalarize (tree base, tree decl_type, HOST_WIDE_INT offset, tree ref) |
| { |
| switch (TREE_CODE (decl_type)) |
| { |
| case RECORD_TYPE: |
| for (tree fld = TYPE_FIELDS (decl_type); fld; fld = DECL_CHAIN (fld)) |
| if (TREE_CODE (fld) == FIELD_DECL) |
| { |
| HOST_WIDE_INT pos = offset + int_bit_position (fld); |
| tree ft = TREE_TYPE (fld); |
| tree nref = build3 (COMPONENT_REF, ft, ref, fld, NULL_TREE); |
| |
| scalarize_elem (base, pos, tree_to_uhwi (DECL_SIZE (fld)), |
| TYPE_REVERSE_STORAGE_ORDER (decl_type), |
| nref, ft); |
| } |
| break; |
| case ARRAY_TYPE: |
| { |
| tree elemtype = TREE_TYPE (decl_type); |
| tree elem_size = TYPE_SIZE (elemtype); |
| gcc_assert (elem_size && tree_fits_shwi_p (elem_size)); |
| HOST_WIDE_INT el_size = tree_to_shwi (elem_size); |
| gcc_assert (el_size > 0); |
| |
| tree minidx = TYPE_MIN_VALUE (TYPE_DOMAIN (decl_type)); |
| gcc_assert (TREE_CODE (minidx) == INTEGER_CST); |
| tree maxidx = TYPE_MAX_VALUE (TYPE_DOMAIN (decl_type)); |
| /* Skip (some) zero-length arrays; others have MAXIDX == MINIDX - 1. */ |
| if (maxidx) |
| { |
| gcc_assert (TREE_CODE (maxidx) == INTEGER_CST); |
| tree domain = TYPE_DOMAIN (decl_type); |
| /* MINIDX and MAXIDX are inclusive, and must be interpreted in |
| DOMAIN (e.g. signed int, whereas min/max may be size_int). */ |
| offset_int idx = wi::to_offset (minidx); |
| offset_int max = wi::to_offset (maxidx); |
| if (!TYPE_UNSIGNED (domain)) |
| { |
| idx = wi::sext (idx, TYPE_PRECISION (domain)); |
| max = wi::sext (max, TYPE_PRECISION (domain)); |
| } |
| for (int el_off = offset; idx <= max; ++idx) |
| { |
| tree nref = build4 (ARRAY_REF, elemtype, |
| ref, |
| wide_int_to_tree (domain, idx), |
| NULL_TREE, NULL_TREE); |
| scalarize_elem (base, el_off, el_size, |
| TYPE_REVERSE_STORAGE_ORDER (decl_type), |
| nref, elemtype); |
| el_off += el_size; |
| } |
| } |
| } |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Create total_scalarization accesses for a member of type TYPE, which must |
| satisfy either is_gimple_reg_type or scalarizable_type_p. BASE must be the |
| top-most VAR_DECL representing the variable; within that, POS and SIZE locate |
| the member, REVERSE gives its torage order. and REF must be the reference |
| expression for it. */ |
| |
| static void |
| scalarize_elem (tree base, HOST_WIDE_INT pos, HOST_WIDE_INT size, bool reverse, |
| tree ref, tree type) |
| { |
| if (is_gimple_reg_type (type)) |
| { |
| struct access *access = create_access_1 (base, pos, size); |
| access->expr = ref; |
| access->type = type; |
| access->grp_total_scalarization = 1; |
| access->reverse = reverse; |
| /* Accesses for intraprocedural SRA can have their stmt NULL. */ |
| } |
| else |
| completely_scalarize (base, type, pos, ref); |
| } |
| |
| /* Create a total_scalarization access for VAR as a whole. VAR must be of a |
| RECORD_TYPE or ARRAY_TYPE conforming to scalarizable_type_p. */ |
| |
| static void |
| create_total_scalarization_access (tree var) |
| { |
| HOST_WIDE_INT size = tree_to_uhwi (DECL_SIZE (var)); |
| struct access *access; |
| |
| access = create_access_1 (var, 0, size); |
| access->expr = var; |
| access->type = TREE_TYPE (var); |
| access->grp_total_scalarization = 1; |
| } |
| |
| /* Return true if REF has an VIEW_CONVERT_EXPR somewhere in it. */ |
| |
| static inline bool |
| contains_view_convert_expr_p (const_tree ref) |
| { |
| while (handled_component_p (ref)) |
| { |
| if (TREE_CODE (ref) == VIEW_CONVERT_EXPR) |
| return true; |
| ref = TREE_OPERAND (ref, 0); |
| } |
| |
| return false; |
| } |
| |
| /* Return true if REF contains a VIEW_CONVERT_EXPR or a MEM_REF that performs |
| type conversion or a COMPONENT_REF with a bit-field field declaration. */ |
| |
| static bool |
| contains_vce_or_bfcref_p (const_tree ref) |
| { |
| while (handled_component_p (ref)) |
| { |
| if (TREE_CODE (ref) == VIEW_CONVERT_EXPR |
| || (TREE_CODE (ref) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (ref, 1)))) |
| return true; |
| ref = TREE_OPERAND (ref, 0); |
| } |
| |
| if (TREE_CODE (ref) != MEM_REF |
| || TREE_CODE (TREE_OPERAND (ref, 0)) != ADDR_EXPR) |
| return false; |
| |
| tree mem = TREE_OPERAND (TREE_OPERAND (ref, 0), 0); |
| if (TYPE_MAIN_VARIANT (TREE_TYPE (ref)) |
| != TYPE_MAIN_VARIANT (TREE_TYPE (mem))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Search the given tree for a declaration by skipping handled components and |
| exclude it from the candidates. */ |
| |
| static void |
| disqualify_base_of_expr (tree t, const char *reason) |
| { |
| t = get_base_address (t); |
| if (sra_mode == SRA_MODE_EARLY_IPA |
| && TREE_CODE (t) == MEM_REF) |
| t = get_ssa_base_param (TREE_OPERAND (t, 0)); |
| |
| if (t && DECL_P (t)) |
| disqualify_candidate (t, reason); |
| } |
| |
| /* Scan expression EXPR and create access structures for all accesses to |
| candidates for scalarization. Return the created access or NULL if none is |
| created. */ |
| |
| static struct access * |
| build_access_from_expr_1 (tree expr, gimple *stmt, bool write) |
| { |
| struct access *ret = NULL; |
| bool partial_ref; |
| |
| if (TREE_CODE (expr) == BIT_FIELD_REF |
| || TREE_CODE (expr) == IMAGPART_EXPR |
| || TREE_CODE (expr) == REALPART_EXPR) |
| { |
| expr = TREE_OPERAND (expr, 0); |
| partial_ref = true; |
| } |
| else |
| partial_ref = false; |
| |
| if (storage_order_barrier_p (expr)) |
| { |
| disqualify_base_of_expr (expr, "storage order barrier."); |
| return NULL; |
| } |
| |
| /* We need to dive through V_C_Es in order to get the size of its parameter |
| and not the result type. Ada produces such statements. We are also |
| capable of handling the topmost V_C_E but not any of those buried in other |
| handled components. */ |
| if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) |
| expr = TREE_OPERAND (expr, 0); |
| |
| if (contains_view_convert_expr_p (expr)) |
| { |
| disqualify_base_of_expr (expr, "V_C_E under a different handled " |
| "component."); |
| return NULL; |
| } |
| if (TREE_THIS_VOLATILE (expr)) |
| { |
| disqualify_base_of_expr (expr, "part of a volatile reference."); |
| return NULL; |
| } |
| |
| switch (TREE_CODE (expr)) |
| { |
| case MEM_REF: |
| if (TREE_CODE (TREE_OPERAND (expr, 0)) != ADDR_EXPR |
| && sra_mode != SRA_MODE_EARLY_IPA) |
| return NULL; |
| /* fall through */ |
| case VAR_DECL: |
| case PARM_DECL: |
| case RESULT_DECL: |
| case COMPONENT_REF: |
| case ARRAY_REF: |
| case ARRAY_RANGE_REF: |
| ret = create_access (expr, stmt, write); |
| break; |
| |
| default: |
| break; |
| } |
| |
| if (write && partial_ref && ret) |
| ret->grp_partial_lhs = 1; |
| |
| return ret; |
| } |
| |
| /* Scan expression EXPR and create access structures for all accesses to |
| candidates for scalarization. Return true if any access has been inserted. |
| STMT must be the statement from which the expression is taken, WRITE must be |
| true if the expression is a store and false otherwise. */ |
| |
| static bool |
| build_access_from_expr (tree expr, gimple *stmt, bool write) |
| { |
| struct access *access; |
| |
| access = build_access_from_expr_1 (expr, stmt, write); |
| if (access) |
| { |
| /* This means the aggregate is accesses as a whole in a way other than an |
| assign statement and thus cannot be removed even if we had a scalar |
| replacement for everything. */ |
| if (cannot_scalarize_away_bitmap) |
| bitmap_set_bit (cannot_scalarize_away_bitmap, DECL_UID (access->base)); |
| return true; |
| } |
| return false; |
| } |
| |
| /* Return the single non-EH successor edge of BB or NULL if there is none or |
| more than one. */ |
| |
| static edge |
| single_non_eh_succ (basic_block bb) |
| { |
| edge e, res = NULL; |
| edge_iterator ei; |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| if (!(e->flags & EDGE_EH)) |
| { |
| if (res) |
| return NULL; |
| res = e; |
| } |
| |
| return res; |
| } |
| |
| /* Disqualify LHS and RHS for scalarization if STMT has to terminate its BB and |
| there is no alternative spot where to put statements SRA might need to |
| generate after it. The spot we are looking for is an edge leading to a |
| single non-EH successor, if it exists and is indeed single. RHS may be |
| NULL, in that case ignore it. */ |
| |
| static bool |
| disqualify_if_bad_bb_terminating_stmt (gimple *stmt, tree lhs, tree rhs) |
| { |
| if ((sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA) |
| && stmt_ends_bb_p (stmt)) |
| { |
| if (single_non_eh_succ (gimple_bb (stmt))) |
| return false; |
| |
| disqualify_base_of_expr (lhs, "LHS of a throwing stmt."); |
| if (rhs) |
| disqualify_base_of_expr (rhs, "RHS of a throwing stmt."); |
| return true; |
| } |
| return false; |
| } |
| |
| /* Return true if the nature of BASE is such that it contains data even if |
| there is no write to it in the function. */ |
| |
| static bool |
| comes_initialized_p (tree base) |
| { |
| return TREE_CODE (base) == PARM_DECL || constant_decl_p (base); |
| } |
| |
| /* Scan expressions occurring in STMT, create access structures for all accesses |
| to candidates for scalarization and remove those candidates which occur in |
| statements or expressions that prevent them from being split apart. Return |
| true if any access has been inserted. */ |
| |
| static bool |
| build_accesses_from_assign (gimple *stmt) |
| { |
| tree lhs, rhs; |
| struct access *lacc, *racc; |
| |
| if (!gimple_assign_single_p (stmt) |
| /* Scope clobbers don't influence scalarization. */ |
| || gimple_clobber_p (stmt)) |
| return false; |
| |
| lhs = gimple_assign_lhs (stmt); |
| rhs = gimple_assign_rhs1 (stmt); |
| |
| if (disqualify_if_bad_bb_terminating_stmt (stmt, lhs, rhs)) |
| return false; |
| |
| racc = build_access_from_expr_1 (rhs, stmt, false); |
| lacc = build_access_from_expr_1 (lhs, stmt, true); |
| |
| if (lacc) |
| { |
| lacc->grp_assignment_write = 1; |
| if (storage_order_barrier_p (rhs)) |
| lacc->grp_unscalarizable_region = 1; |
| } |
| |
| if (racc) |
| { |
| racc->grp_assignment_read = 1; |
| if (should_scalarize_away_bitmap && !gimple_has_volatile_ops (stmt) |
| && !is_gimple_reg_type (racc->type)) |
| { |
| if (contains_vce_or_bfcref_p (rhs)) |
| bitmap_set_bit (cannot_scalarize_away_bitmap, |
| DECL_UID (racc->base)); |
| else |
| bitmap_set_bit (should_scalarize_away_bitmap, |
| DECL_UID (racc->base)); |
| } |
| if (storage_order_barrier_p (lhs)) |
| racc->grp_unscalarizable_region = 1; |
| } |
| |
| if (lacc && racc |
| && (sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA) |
| && !lacc->grp_unscalarizable_region |
| && !racc->grp_unscalarizable_region |
| && AGGREGATE_TYPE_P (TREE_TYPE (lhs)) |
| && lacc->size == racc->size |
| && useless_type_conversion_p (lacc->type, racc->type)) |
| { |
| struct assign_link *link; |
| |
| link = assign_link_pool.allocate (); |
| memset (link, 0, sizeof (struct assign_link)); |
| |
| link->lacc = lacc; |
| link->racc = racc; |
| add_link_to_rhs (racc, link); |
| add_access_to_work_queue (racc); |
| |
| /* Let's delay marking the areas as written until propagation of accesses |
| across link, unless the nature of rhs tells us that its data comes |
| from elsewhere. */ |
| if (!comes_initialized_p (racc->base)) |
| lacc->write = false; |
| } |
| |
| return lacc || racc; |
| } |
| |
| /* Callback of walk_stmt_load_store_addr_ops visit_addr used to determine |
| GIMPLE_ASM operands with memory constrains which cannot be scalarized. */ |
| |
| static bool |
| asm_visit_addr (gimple *, tree op, tree, void *) |
| { |
| op = get_base_address (op); |
| if (op |
| && DECL_P (op)) |
| disqualify_candidate (op, "Non-scalarizable GIMPLE_ASM operand."); |
| |
| return false; |
| } |
| |
| /* Return true iff callsite CALL has at least as many actual arguments as there |
| are formal parameters of the function currently processed by IPA-SRA and |
| that their types match. */ |
| |
| static inline bool |
| callsite_arguments_match_p (gimple *call) |
| { |
| if (gimple_call_num_args (call) < (unsigned) func_param_count) |
| return false; |
| |
| tree parm; |
| int i; |
| for (parm = DECL_ARGUMENTS (current_function_decl), i = 0; |
| parm; |
| parm = DECL_CHAIN (parm), i++) |
| { |
| tree arg = gimple_call_arg (call, i); |
| if (!useless_type_conversion_p (TREE_TYPE (parm), TREE_TYPE (arg))) |
| return false; |
| } |
| return true; |
| } |
| |
| /* Scan function and look for interesting expressions and create access |
| structures for them. Return true iff any access is created. */ |
| |
| static bool |
| scan_function (void) |
| { |
| basic_block bb; |
| bool ret = false; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| { |
| gimple_stmt_iterator gsi; |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| tree t; |
| unsigned i; |
| |
| if (final_bbs && stmt_can_throw_external (stmt)) |
| bitmap_set_bit (final_bbs, bb->index); |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_RETURN: |
| t = gimple_return_retval (as_a <greturn *> (stmt)); |
| if (t != NULL_TREE) |
| ret |= build_access_from_expr (t, stmt, false); |
| if (final_bbs) |
| bitmap_set_bit (final_bbs, bb->index); |
| break; |
| |
| case GIMPLE_ASSIGN: |
| ret |= build_accesses_from_assign (stmt); |
| break; |
| |
| case GIMPLE_CALL: |
| for (i = 0; i < gimple_call_num_args (stmt); i++) |
| ret |= build_access_from_expr (gimple_call_arg (stmt, i), |
| stmt, false); |
| |
| if (sra_mode == SRA_MODE_EARLY_IPA) |
| { |
| tree dest = gimple_call_fndecl (stmt); |
| int flags = gimple_call_flags (stmt); |
| |
| if (dest) |
| { |
| if (DECL_BUILT_IN_CLASS (dest) == BUILT_IN_NORMAL |
| && DECL_FUNCTION_CODE (dest) == BUILT_IN_APPLY_ARGS) |
| encountered_apply_args = true; |
| if (recursive_call_p (current_function_decl, dest)) |
| { |
| encountered_recursive_call = true; |
| if (!callsite_arguments_match_p (stmt)) |
| encountered_unchangable_recursive_call = true; |
| } |
| } |
| |
| if (final_bbs |
| && (flags & (ECF_CONST | ECF_PURE)) == 0) |
| bitmap_set_bit (final_bbs, bb->index); |
| } |
| |
| t = gimple_call_lhs (stmt); |
| if (t && !disqualify_if_bad_bb_terminating_stmt (stmt, t, NULL)) |
| ret |= build_access_from_expr (t, stmt, true); |
| break; |
| |
| case GIMPLE_ASM: |
| { |
| gasm *asm_stmt = as_a <gasm *> (stmt); |
| walk_stmt_load_store_addr_ops (asm_stmt, NULL, NULL, NULL, |
| asm_visit_addr); |
| if (final_bbs) |
| bitmap_set_bit (final_bbs, bb->index); |
| |
| for (i = 0; i < gimple_asm_ninputs (asm_stmt); i++) |
| { |
| t = TREE_VALUE (gimple_asm_input_op (asm_stmt, i)); |
| ret |= build_access_from_expr (t, asm_stmt, false); |
| } |
| for (i = 0; i < gimple_asm_noutputs (asm_stmt); i++) |
| { |
| t = TREE_VALUE (gimple_asm_output_op (asm_stmt, i)); |
| ret |= build_access_from_expr (t, asm_stmt, true); |
| } |
| } |
| break; |
| |
| default: |
| break; |
| } |
| } |
| } |
| |
| return ret; |
| } |
| |
| /* Helper of QSORT function. There are pointers to accesses in the array. An |
| access is considered smaller than another if it has smaller offset or if the |
| offsets are the same but is size is bigger. */ |
| |
| static int |
| compare_access_positions (const void *a, const void *b) |
| { |
| const access_p *fp1 = (const access_p *) a; |
| const access_p *fp2 = (const access_p *) b; |
| const access_p f1 = *fp1; |
| const access_p f2 = *fp2; |
| |
| if (f1->offset != f2->offset) |
| return f1->offset < f2->offset ? -1 : 1; |
| |
| if (f1->size == f2->size) |
| { |
| if (f1->type == f2->type) |
| return 0; |
| /* Put any non-aggregate type before any aggregate type. */ |
| else if (!is_gimple_reg_type (f1->type) |
| && is_gimple_reg_type (f2->type)) |
| return 1; |
| else if (is_gimple_reg_type (f1->type) |
| && !is_gimple_reg_type (f2->type)) |
| return -1; |
| /* Put any complex or vector type before any other scalar type. */ |
| else if (TREE_CODE (f1->type) != COMPLEX_TYPE |
| && TREE_CODE (f1->type) != VECTOR_TYPE |
| && (TREE_CODE (f2->type) == COMPLEX_TYPE |
| || TREE_CODE (f2->type) == VECTOR_TYPE)) |
| return 1; |
| else if ((TREE_CODE (f1->type) == COMPLEX_TYPE |
| || TREE_CODE (f1->type) == VECTOR_TYPE) |
| && TREE_CODE (f2->type) != COMPLEX_TYPE |
| && TREE_CODE (f2->type) != VECTOR_TYPE) |
| return -1; |
| /* Put any integral type before any non-integral type. When splicing, we |
| make sure that those with insufficient precision and occupying the |
| same space are not scalarized. */ |
| else if (INTEGRAL_TYPE_P (f1->type) |
| && !INTEGRAL_TYPE_P (f2->type)) |
| return -1; |
| else if (!INTEGRAL_TYPE_P (f1->type) |
| && INTEGRAL_TYPE_P (f2->type)) |
| return 1; |
| /* Put the integral type with the bigger precision first. */ |
| else if (INTEGRAL_TYPE_P (f1->type) |
| && INTEGRAL_TYPE_P (f2->type) |
| && (TYPE_PRECISION (f2->type) != TYPE_PRECISION (f1->type))) |
| return TYPE_PRECISION (f2->type) - TYPE_PRECISION (f1->type); |
| /* Stabilize the sort. */ |
| return TYPE_UID (f1->type) - TYPE_UID (f2->type); |
| } |
| |
| /* We want the bigger accesses first, thus the opposite operator in the next |
| line: */ |
| return f1->size > f2->size ? -1 : 1; |
| } |
| |
| |
| /* Append a name of the declaration to the name obstack. A helper function for |
| make_fancy_name. */ |
| |
| static void |
| make_fancy_decl_name (tree decl) |
| { |
| char buffer[32]; |
| |
| tree name = DECL_NAME (decl); |
| if (name) |
| obstack_grow (&name_obstack, IDENTIFIER_POINTER (name), |
| IDENTIFIER_LENGTH (name)); |
| else |
| { |
| sprintf (buffer, "D%u", DECL_UID (decl)); |
| obstack_grow (&name_obstack, buffer, strlen (buffer)); |
| } |
| } |
| |
| /* Helper for make_fancy_name. */ |
| |
| static void |
| make_fancy_name_1 (tree expr) |
| { |
| char buffer[32]; |
| tree index; |
| |
| if (DECL_P (expr)) |
| { |
| make_fancy_decl_name (expr); |
| return; |
| } |
| |
| switch (TREE_CODE (expr)) |
| { |
| case COMPONENT_REF: |
| make_fancy_name_1 (TREE_OPERAND (expr, 0)); |
| obstack_1grow (&name_obstack, '$'); |
| make_fancy_decl_name (TREE_OPERAND (expr, 1)); |
| break; |
| |
| case ARRAY_REF: |
| make_fancy_name_1 (TREE_OPERAND (expr, 0)); |
| obstack_1grow (&name_obstack, '$'); |
| /* Arrays with only one element may not have a constant as their |
| index. */ |
| index = TREE_OPERAND (expr, 1); |
| if (TREE_CODE (index) != INTEGER_CST) |
| break; |
| sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (index)); |
| obstack_grow (&name_obstack, buffer, strlen (buffer)); |
| break; |
| |
| case ADDR_EXPR: |
| make_fancy_name_1 (TREE_OPERAND (expr, 0)); |
| break; |
| |
| case MEM_REF: |
| make_fancy_name_1 (TREE_OPERAND (expr, 0)); |
| if (!integer_zerop (TREE_OPERAND (expr, 1))) |
| { |
| obstack_1grow (&name_obstack, '$'); |
| sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, |
| TREE_INT_CST_LOW (TREE_OPERAND (expr, 1))); |
| obstack_grow (&name_obstack, buffer, strlen (buffer)); |
| } |
| break; |
| |
| case BIT_FIELD_REF: |
| case REALPART_EXPR: |
| case IMAGPART_EXPR: |
| gcc_unreachable (); /* we treat these as scalars. */ |
| break; |
| default: |
| break; |
| } |
| } |
| |
| /* Create a human readable name for replacement variable of ACCESS. */ |
| |
| static char * |
| make_fancy_name (tree expr) |
| { |
| make_fancy_name_1 (expr); |
| obstack_1grow (&name_obstack, '\0'); |
| return XOBFINISH (&name_obstack, char *); |
| } |
| |
| /* Construct a MEM_REF that would reference a part of aggregate BASE of type |
| EXP_TYPE at the given OFFSET and with storage order REVERSE. If BASE is |
| something for which get_addr_base_and_unit_offset returns NULL, gsi must |
| be non-NULL and is used to insert new statements either before or below |
| the current one as specified by INSERT_AFTER. This function is not capable |
| of handling bitfields. */ |
| |
| tree |
| build_ref_for_offset (location_t loc, tree base, poly_int64 offset, |
| bool reverse, tree exp_type, gimple_stmt_iterator *gsi, |
| bool insert_after) |
| { |
| tree prev_base = base; |
| tree off; |
| tree mem_ref; |
| poly_int64 base_offset; |
| unsigned HOST_WIDE_INT misalign; |
| unsigned int align; |
| |
| /* Preserve address-space information. */ |
| addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (base)); |
| if (as != TYPE_ADDR_SPACE (exp_type)) |
| exp_type = build_qualified_type (exp_type, |
| TYPE_QUALS (exp_type) |
| | ENCODE_QUAL_ADDR_SPACE (as)); |
| |
| poly_int64 byte_offset = exact_div (offset, BITS_PER_UNIT); |
| get_object_alignment_1 (base, &align, &misalign); |
| base = get_addr_base_and_unit_offset (base, &base_offset); |
| |
| /* get_addr_base_and_unit_offset returns NULL for references with a variable |
| offset such as array[var_index]. */ |
| if (!base) |
| { |
| gassign *stmt; |
| tree tmp, addr; |
| |
| gcc_checking_assert (gsi); |
| tmp = make_ssa_name (build_pointer_type (TREE_TYPE (prev_base))); |
| addr = build_fold_addr_expr (unshare_expr (prev_base)); |
| STRIP_USELESS_TYPE_CONVERSION (addr); |
| stmt = gimple_build_assign (tmp, addr); |
| gimple_set_location (stmt, loc); |
| if (insert_after) |
| gsi_insert_after (gsi, stmt, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| |
| off = build_int_cst (reference_alias_ptr_type (prev_base), byte_offset); |
| base = tmp; |
| } |
| else if (TREE_CODE (base) == MEM_REF) |
| { |
| off = build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), |
| base_offset + byte_offset); |
| off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1), off); |
| base = unshare_expr (TREE_OPERAND (base, 0)); |
| } |
| else |
| { |
| off = build_int_cst (reference_alias_ptr_type (prev_base), |
| base_offset + byte_offset); |
| base = build_fold_addr_expr (unshare_expr (base)); |
| } |
| |
| unsigned int align_bound = known_alignment (misalign + offset); |
| if (align_bound != 0) |
| align = MIN (align, align_bound); |
| if (align != TYPE_ALIGN (exp_type)) |
| exp_type = build_aligned_type (exp_type, align); |
| |
| mem_ref = fold_build2_loc (loc, MEM_REF, exp_type, base, off); |
| REF_REVERSE_STORAGE_ORDER (mem_ref) = reverse; |
| if (TREE_THIS_VOLATILE (prev_base)) |
| TREE_THIS_VOLATILE (mem_ref) = 1; |
| if (TREE_SIDE_EFFECTS (prev_base)) |
| TREE_SIDE_EFFECTS (mem_ref) = 1; |
| return mem_ref; |
| } |
| |
| /* Construct a memory reference to a part of an aggregate BASE at the given |
| OFFSET and of the same type as MODEL. In case this is a reference to a |
| bit-field, the function will replicate the last component_ref of model's |
| expr to access it. GSI and INSERT_AFTER have the same meaning as in |
| build_ref_for_offset. */ |
| |
| static tree |
| build_ref_for_model (location_t loc, tree base, HOST_WIDE_INT offset, |
| struct access *model, gimple_stmt_iterator *gsi, |
| bool insert_after) |
| { |
| if (TREE_CODE (model->expr) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (model->expr, 1))) |
| { |
| /* This access represents a bit-field. */ |
| tree t, exp_type, fld = TREE_OPERAND (model->expr, 1); |
| |
| offset -= int_bit_position (fld); |
| exp_type = TREE_TYPE (TREE_OPERAND (model->expr, 0)); |
| t = build_ref_for_offset (loc, base, offset, model->reverse, exp_type, |
| gsi, insert_after); |
| /* The flag will be set on the record type. */ |
| REF_REVERSE_STORAGE_ORDER (t) = 0; |
| return fold_build3_loc (loc, COMPONENT_REF, TREE_TYPE (fld), t, fld, |
| NULL_TREE); |
| } |
| else |
| return |
| build_ref_for_offset (loc, base, offset, model->reverse, model->type, |
| gsi, insert_after); |
| } |
| |
| /* Attempt to build a memory reference that we could but into a gimple |
| debug_bind statement. Similar to build_ref_for_model but punts if it has to |
| create statements and return s NULL instead. This function also ignores |
| alignment issues and so its results should never end up in non-debug |
| statements. */ |
| |
| static tree |
| build_debug_ref_for_model (location_t loc, tree base, HOST_WIDE_INT offset, |
| struct access *model) |
| { |
| poly_int64 base_offset; |
| tree off; |
| |
| if (TREE_CODE (model->expr) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (model->expr, 1))) |
| return NULL_TREE; |
| |
| base = get_addr_base_and_unit_offset (base, &base_offset); |
| if (!base) |
| return NULL_TREE; |
| if (TREE_CODE (base) == MEM_REF) |
| { |
| off = build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), |
| base_offset + offset / BITS_PER_UNIT); |
| off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1), off); |
| base = unshare_expr (TREE_OPERAND (base, 0)); |
| } |
| else |
| { |
| off = build_int_cst (reference_alias_ptr_type (base), |
| base_offset + offset / BITS_PER_UNIT); |
| base = build_fold_addr_expr (unshare_expr (base)); |
| } |
| |
| return fold_build2_loc (loc, MEM_REF, model->type, base, off); |
| } |
| |
| /* Construct a memory reference consisting of component_refs and array_refs to |
| a part of an aggregate *RES (which is of type TYPE). The requested part |
| should have type EXP_TYPE at be the given OFFSET. This function might not |
| succeed, it returns true when it does and only then *RES points to something |
| meaningful. This function should be used only to build expressions that we |
| might need to present to user (e.g. in warnings). In all other situations, |
| build_ref_for_model or build_ref_for_offset should be used instead. */ |
| |
| static bool |
| build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, |
| tree exp_type) |
| { |
| while (1) |
| { |
| tree fld; |
| tree tr_size, index, minidx; |
| HOST_WIDE_INT el_size; |
| |
| if (offset == 0 && exp_type |
| && types_compatible_p (exp_type, type)) |
| return true; |
| |
| switch (TREE_CODE (type)) |
| { |
| case UNION_TYPE: |
| case QUAL_UNION_TYPE: |
| case RECORD_TYPE: |
| for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) |
| { |
| HOST_WIDE_INT pos, size; |
| tree tr_pos, expr, *expr_ptr; |
| |
| if (TREE_CODE (fld) != FIELD_DECL) |
| continue; |
| |
| tr_pos = bit_position (fld); |
| if (!tr_pos || !tree_fits_uhwi_p (tr_pos)) |
| continue; |
| pos = tree_to_uhwi (tr_pos); |
| gcc_assert (TREE_CODE (type) == RECORD_TYPE || pos == 0); |
| tr_size = DECL_SIZE (fld); |
| if (!tr_size || !tree_fits_uhwi_p (tr_size)) |
| continue; |
| size = tree_to_uhwi (tr_size); |
| if (size == 0) |
| { |
| if (pos != offset) |
| continue; |
| } |
| else if (pos > offset || (pos + size) <= offset) |
| continue; |
| |
| expr = build3 (COMPONENT_REF, TREE_TYPE (fld), *res, fld, |
| NULL_TREE); |
| expr_ptr = &expr; |
| if (build_user_friendly_ref_for_offset (expr_ptr, TREE_TYPE (fld), |
| offset - pos, exp_type)) |
| { |
| *res = expr; |
| return true; |
| } |
| } |
| return false; |
| |
| case ARRAY_TYPE: |
| tr_size = TYPE_SIZE (TREE_TYPE (type)); |
| if (!tr_size || !tree_fits_uhwi_p (tr_size)) |
| return false; |
| el_size = tree_to_uhwi (tr_size); |
| |
| minidx = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); |
| if (TREE_CODE (minidx) != INTEGER_CST || el_size == 0) |
| return false; |
| index = build_int_cst (TYPE_DOMAIN (type), offset / el_size); |
| if (!integer_zerop (minidx)) |
| index = int_const_binop (PLUS_EXPR, index, minidx); |
| *res = build4 (ARRAY_REF, TREE_TYPE (type), *res, index, |
| NULL_TREE, NULL_TREE); |
| offset = offset % el_size; |
| type = TREE_TYPE (type); |
| break; |
| |
| default: |
| if (offset != 0) |
| return false; |
| |
| if (exp_type) |
| return false; |
| else |
| return true; |
| } |
| } |
| } |
| |
| /* Return true iff TYPE is stdarg va_list type. */ |
| |
| static inline bool |
| is_va_list_type (tree type) |
| { |
| return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node); |
| } |
| |
| /* Print message to dump file why a variable was rejected. */ |
| |
| static void |
| reject (tree var, const char *msg) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Rejected (%d): %s: ", DECL_UID (var), msg); |
| print_generic_expr (dump_file, var); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| |
| /* Return true if VAR is a candidate for SRA. */ |
| |
| static bool |
| maybe_add_sra_candidate (tree var) |
| { |
| tree type = TREE_TYPE (var); |
| const char *msg; |
| tree_node **slot; |
| |
| if (!AGGREGATE_TYPE_P (type)) |
| { |
| reject (var, "not aggregate"); |
| return false; |
| } |
| /* Allow constant-pool entries (that "need to live in memory") |
| unless we are doing IPA SRA. */ |
| if (needs_to_live_in_memory (var) |
| && (sra_mode == SRA_MODE_EARLY_IPA || !constant_decl_p (var))) |
| { |
| reject (var, "needs to live in memory"); |
| return false; |
| } |
| if (TREE_THIS_VOLATILE (var)) |
| { |
| reject (var, "is volatile"); |
| return false; |
| } |
| if (!COMPLETE_TYPE_P (type)) |
| { |
| reject (var, "has incomplete type"); |
| return false; |
| } |
| if (!tree_fits_uhwi_p (TYPE_SIZE (type))) |
| { |
| reject (var, "type size not fixed"); |
| return false; |
| } |
| if (tree_to_uhwi (TYPE_SIZE (type)) == 0) |
| { |
| reject (var, "type size is zero"); |
| return false; |
| } |
| if (type_internals_preclude_sra_p (type, &msg)) |
| { |
| reject (var, msg); |
| return false; |
| } |
| if (/* Fix for PR 41089. tree-stdarg.c needs to have va_lists intact but |
| we also want to schedule it rather late. Thus we ignore it in |
| the early pass. */ |
| (sra_mode == SRA_MODE_EARLY_INTRA |
| && is_va_list_type (type))) |
| { |
| reject (var, "is va_list"); |
| return false; |
| } |
| |
| bitmap_set_bit (candidate_bitmap, DECL_UID (var)); |
| slot = candidates->find_slot_with_hash (var, DECL_UID (var), INSERT); |
| *slot = var; |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Candidate (%d): ", DECL_UID (var)); |
| print_generic_expr (dump_file, var); |
| fprintf (dump_file, "\n"); |
| } |
| |
| return true; |
| } |
| |
| /* The very first phase of intraprocedural SRA. It marks in candidate_bitmap |
| those with type which is suitable for scalarization. */ |
| |
| static bool |
| find_var_candidates (void) |
| { |
| tree var, parm; |
| unsigned int i; |
| bool ret = false; |
| |
| for (parm = DECL_ARGUMENTS (current_function_decl); |
| parm; |
| parm = DECL_CHAIN (parm)) |
| ret |= maybe_add_sra_candidate (parm); |
| |
| FOR_EACH_LOCAL_DECL (cfun, i, var) |
| { |
| if (!VAR_P (var)) |
| continue; |
| |
| ret |= maybe_add_sra_candidate (var); |
| } |
| |
| return ret; |
| } |
| |
| /* Sort all accesses for the given variable, check for partial overlaps and |
| return NULL if there are any. If there are none, pick a representative for |
| each combination of offset and size and create a linked list out of them. |
| Return the pointer to the first representative and make sure it is the first |
| one in the vector of accesses. */ |
| |
| static struct access * |
| sort_and_splice_var_accesses (tree var) |
| { |
| int i, j, access_count; |
| struct access *res, **prev_acc_ptr = &res; |
| vec<access_p> *access_vec; |
| bool first = true; |
| HOST_WIDE_INT low = -1, high = 0; |
| |
| access_vec = get_base_access_vector (var); |
| if (!access_vec) |
| return NULL; |
| access_count = access_vec->length (); |
| |
| /* Sort by <OFFSET, SIZE>. */ |
| access_vec->qsort (compare_access_positions); |
| |
| i = 0; |
| while (i < access_count) |
| { |
| struct access *access = (*access_vec)[i]; |
| bool grp_write = access->write; |
| bool grp_read = !access->write; |
| bool grp_scalar_write = access->write |
| && is_gimple_reg_type (access->type); |
| bool grp_scalar_read = !access->write |
| && is_gimple_reg_type (access->type); |
| bool grp_assignment_read = access->grp_assignment_read; |
| bool grp_assignment_write = access->grp_assignment_write; |
| bool multiple_scalar_reads = false; |
| bool total_scalarization = access->grp_total_scalarization; |
| bool grp_partial_lhs = access->grp_partial_lhs; |
| bool first_scalar = is_gimple_reg_type (access->type); |
| bool unscalarizable_region = access->grp_unscalarizable_region; |
| bool bf_non_full_precision |
| = (INTEGRAL_TYPE_P (access->type) |
| && TYPE_PRECISION (access->type) != access->size |
| && TREE_CODE (access->expr) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (access->expr, 1))); |
| |
| if (first || access->offset >= high) |
| { |
| first = false; |
| low = access->offset; |
| high = access->offset + access->size; |
| } |
| else if (access->offset > low && access->offset + access->size > high) |
| return NULL; |
| else |
| gcc_assert (access->offset >= low |
| && access->offset + access->size <= high); |
| |
| j = i + 1; |
| while (j < access_count) |
| { |
| struct access *ac2 = (*access_vec)[j]; |
| if (ac2->offset != access->offset || ac2->size != access->size) |
| break; |
| if (ac2->write) |
| { |
| grp_write = true; |
| grp_scalar_write = (grp_scalar_write |
| || is_gimple_reg_type (ac2->type)); |
| } |
| else |
| { |
| grp_read = true; |
| if (is_gimple_reg_type (ac2->type)) |
| { |
| if (grp_scalar_read) |
| multiple_scalar_reads = true; |
| else |
| grp_scalar_read = true; |
| } |
| } |
| grp_assignment_read |= ac2->grp_assignment_read; |
| grp_assignment_write |= ac2->grp_assignment_write; |
| grp_partial_lhs |= ac2->grp_partial_lhs; |
| unscalarizable_region |= ac2->grp_unscalarizable_region; |
| total_scalarization |= ac2->grp_total_scalarization; |
| relink_to_new_repr (access, ac2); |
| |
| /* If there are both aggregate-type and scalar-type accesses with |
| this combination of size and offset, the comparison function |
| should have put the scalars first. */ |
| gcc_assert (first_scalar || !is_gimple_reg_type (ac2->type)); |
| /* It also prefers integral types to non-integral. However, when the |
| precision of the selected type does not span the entire area and |
| should also be used for a non-integer (i.e. float), we must not |
| let that happen. Normally analyze_access_subtree expands the type |
| to cover the entire area but for bit-fields it doesn't. */ |
| if (bf_non_full_precision && !INTEGRAL_TYPE_P (ac2->type)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Cannot scalarize the following access " |
| "because insufficient precision integer type was " |
| "selected.\n "); |
| dump_access (dump_file, access, false); |
| } |
| unscalarizable_region = true; |
| } |
| ac2->group_representative = access; |
| j++; |
| } |
| |
| i = j; |
| |
| access->group_representative = access; |
| access->grp_write = grp_write; |
| access->grp_read = grp_read; |
| access->grp_scalar_read = grp_scalar_read; |
| access->grp_scalar_write = grp_scalar_write; |
| access->grp_assignment_read = grp_assignment_read; |
| access->grp_assignment_write = grp_assignment_write; |
| access->grp_hint = total_scalarization |
| || (multiple_scalar_reads && !constant_decl_p (var)); |
| access->grp_total_scalarization = total_scalarization; |
| access->grp_partial_lhs = grp_partial_lhs; |
| access->grp_unscalarizable_region = unscalarizable_region; |
| |
| *prev_acc_ptr = access; |
| prev_acc_ptr = &access->next_grp; |
| } |
| |
| gcc_assert (res == (*access_vec)[0]); |
| return res; |
| } |
| |
| /* Create a variable for the given ACCESS which determines the type, name and a |
| few other properties. Return the variable declaration and store it also to |
| ACCESS->replacement. */ |
| |
| static tree |
| create_access_replacement (struct access *access) |
| { |
| tree repl; |
| |
| if (access->grp_to_be_debug_replaced) |
| { |
| repl = create_tmp_var_raw (access->type); |
| DECL_CONTEXT (repl) = current_function_decl; |
| } |
| else |
| /* Drop any special alignment on the type if it's not on the main |
| variant. This avoids issues with weirdo ABIs like AAPCS. */ |
| repl = create_tmp_var (build_qualified_type |
| (TYPE_MAIN_VARIANT (access->type), |
| TYPE_QUALS (access->type)), "SR"); |
| if (TREE_CODE (access->type) == COMPLEX_TYPE |
| || TREE_CODE (access->type) == VECTOR_TYPE) |
| { |
| if (!access->grp_partial_lhs) |
| DECL_GIMPLE_REG_P (repl) = 1; |
| } |
| else if (access->grp_partial_lhs |
| && is_gimple_reg_type (access->type)) |
| TREE_ADDRESSABLE (repl) = 1; |
| |
| DECL_SOURCE_LOCATION (repl) = DECL_SOURCE_LOCATION (access->base); |
| DECL_ARTIFICIAL (repl) = 1; |
| DECL_IGNORED_P (repl) = DECL_IGNORED_P (access->base); |
| |
| if (DECL_NAME (access->base) |
| && !DECL_IGNORED_P (access->base) |
| && !DECL_ARTIFICIAL (access->base)) |
| { |
| char *pretty_name = make_fancy_name (access->expr); |
| tree debug_expr = unshare_expr_without_location (access->expr), d; |
| bool fail = false; |
| |
| DECL_NAME (repl) = get_identifier (pretty_name); |
| DECL_NAMELESS (repl) = 1; |
| obstack_free (&name_obstack, pretty_name); |
| |
| /* Get rid of any SSA_NAMEs embedded in debug_expr, |
| as DECL_DEBUG_EXPR isn't considered when looking for still |
| used SSA_NAMEs and thus they could be freed. All debug info |
| generation cares is whether something is constant or variable |
| and that get_ref_base_and_extent works properly on the |
| expression. It cannot handle accesses at a non-constant offset |
| though, so just give up in those cases. */ |
| for (d = debug_expr; |
| !fail && (handled_component_p (d) || TREE_CODE (d) == MEM_REF); |
| d = TREE_OPERAND (d, 0)) |
| switch (TREE_CODE (d)) |
| { |
| case ARRAY_REF: |
| case ARRAY_RANGE_REF: |
| if (TREE_OPERAND (d, 1) |
| && TREE_CODE (TREE_OPERAND (d, 1)) != INTEGER_CST) |
| fail = true; |
| if (TREE_OPERAND (d, 3) |
| && TREE_CODE (TREE_OPERAND (d, 3)) != INTEGER_CST) |
| fail = true; |
| /* FALLTHRU */ |
| case COMPONENT_REF: |
| if (TREE_OPERAND (d, 2) |
| && TREE_CODE (TREE_OPERAND (d, 2)) != INTEGER_CST) |
| fail = true; |
| break; |
| case MEM_REF: |
| if (TREE_CODE (TREE_OPERAND (d, 0)) != ADDR_EXPR) |
| fail = true; |
| else |
| d = TREE_OPERAND (d, 0); |
| break; |
| default: |
| break; |
| } |
| if (!fail) |
| { |
| SET_DECL_DEBUG_EXPR (repl, debug_expr); |
| DECL_HAS_DEBUG_EXPR_P (repl) = 1; |
| } |
| if (access->grp_no_warning) |
| TREE_NO_WARNING (repl) = 1; |
| else |
| TREE_NO_WARNING (repl) = TREE_NO_WARNING (access->base); |
| } |
| else |
| TREE_NO_WARNING (repl) = 1; |
| |
| if (dump_file) |
| { |
| if (access->grp_to_be_debug_replaced) |
| { |
| fprintf (dump_file, "Created a debug-only replacement for "); |
| print_generic_expr (dump_file, access->base); |
| fprintf (dump_file, " offset: %u, size: %u\n", |
| (unsigned) access->offset, (unsigned) access->size); |
| } |
| else |
| { |
| fprintf (dump_file, "Created a replacement for "); |
| print_generic_expr (dump_file, access->base); |
| fprintf (dump_file, " offset: %u, size: %u: ", |
| (unsigned) access->offset, (unsigned) access->size); |
| print_generic_expr (dump_file, repl); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| sra_stats.replacements++; |
| |
| return repl; |
| } |
| |
| /* Return ACCESS scalar replacement, which must exist. */ |
| |
| static inline tree |
| get_access_replacement (struct access *access) |
| { |
| gcc_checking_assert (access->replacement_decl); |
| return access->replacement_decl; |
| } |
| |
| |
| /* Build a subtree of accesses rooted in *ACCESS, and move the pointer in the |
| linked list along the way. Stop when *ACCESS is NULL or the access pointed |
| to it is not "within" the root. Return false iff some accesses partially |
| overlap. */ |
| |
| static bool |
| build_access_subtree (struct access **access) |
| { |
| struct access *root = *access, *last_child = NULL; |
| HOST_WIDE_INT limit = root->offset + root->size; |
| |
| *access = (*access)->next_grp; |
| while (*access && (*access)->offset + (*access)->size <= limit) |
| { |
| if (!last_child) |
| root->first_child = *access; |
| else |
| last_child->next_sibling = *access; |
| last_child = *access; |
| (*access)->parent = root; |
| (*access)->grp_write |= root->grp_write; |
| |
| if (!build_access_subtree (access)) |
| return false; |
| } |
| |
| if (*access && (*access)->offset < limit) |
| return false; |
| |
| return true; |
| } |
| |
| /* Build a tree of access representatives, ACCESS is the pointer to the first |
| one, others are linked in a list by the next_grp field. Return false iff |
| some accesses partially overlap. */ |
| |
| static bool |
| build_access_trees (struct access *access) |
| { |
| while (access) |
| { |
| struct access *root = access; |
| |
| if (!build_access_subtree (&access)) |
| return false; |
| root->next_grp = access; |
| } |
| return true; |
| } |
| |
| /* Return true if expr contains some ARRAY_REFs into a variable bounded |
| array. */ |
| |
| static bool |
| expr_with_var_bounded_array_refs_p (tree expr) |
| { |
| while (handled_component_p (expr)) |
| { |
| if (TREE_CODE (expr) == ARRAY_REF |
| && !tree_fits_shwi_p (array_ref_low_bound (expr))) |
| return true; |
| expr = TREE_OPERAND (expr, 0); |
| } |
| return false; |
| } |
| |
| /* Analyze the subtree of accesses rooted in ROOT, scheduling replacements when |
| both seeming beneficial and when ALLOW_REPLACEMENTS allows it. Also set all |
| sorts of access flags appropriately along the way, notably always set |
| grp_read and grp_assign_read according to MARK_READ and grp_write when |
| MARK_WRITE is true. |
| |
| Creating a replacement for a scalar access is considered beneficial if its |
| grp_hint is set (this means we are either attempting total scalarization or |
| there is more than one direct read access) or according to the following |
| table: |
| |
| Access written to through a scalar type (once or more times) |
| | |
| | Written to in an assignment statement |
| | | |
| | | Access read as scalar _once_ |
| | | | |
| | | | Read in an assignment statement |
| | | | | |
| | | | | Scalarize Comment |
| ----------------------------------------------------------------------------- |
| 0 0 0 0 No access for the scalar |
| 0 0 0 1 No access for the scalar |
| 0 0 1 0 No Single read - won't help |
| 0 0 1 1 No The same case |
| 0 1 0 0 No access for the scalar |
| 0 1 0 1 No access for the scalar |
| 0 1 1 0 Yes s = *g; return s.i; |
| 0 1 1 1 Yes The same case as above |
| 1 0 0 0 No Won't help |
| 1 0 0 1 Yes s.i = 1; *g = s; |
| 1 0 1 0 Yes s.i = 5; g = s.i; |
| 1 0 1 1 Yes The same case as above |
| 1 1 0 0 No Won't help. |
| 1 1 0 1 Yes s.i = 1; *g = s; |
| 1 1 1 0 Yes s = *g; return s.i; |
| 1 1 1 1 Yes Any of the above yeses */ |
| |
| static bool |
| analyze_access_subtree (struct access *root, struct access *parent, |
| bool allow_replacements) |
| { |
| struct access *child; |
| HOST_WIDE_INT limit = root->offset + root->size; |
| HOST_WIDE_INT covered_to = root->offset; |
| bool scalar = is_gimple_reg_type (root->type); |
| bool hole = false, sth_created = false; |
| |
| if (parent) |
| { |
| if (parent->grp_read) |
| root->grp_read = 1; |
| if (parent->grp_assignment_read) |
| root->grp_assignment_read = 1; |
| if (parent->grp_write) |
| root->grp_write = 1; |
| if (parent->grp_assignment_write) |
| root->grp_assignment_write = 1; |
| if (parent->grp_total_scalarization) |
| root->grp_total_scalarization = 1; |
| } |
| |
| if (root->grp_unscalarizable_region) |
| allow_replacements = false; |
| |
| if (allow_replacements && expr_with_var_bounded_array_refs_p (root->expr)) |
| allow_replacements = false; |
| |
| for (child = root->first_child; child; child = child->next_sibling) |
| { |
| hole |= covered_to < child->offset; |
| sth_created |= analyze_access_subtree (child, root, |
| allow_replacements && !scalar); |
| |
| root->grp_unscalarized_data |= child->grp_unscalarized_data; |
| root->grp_total_scalarization &= child->grp_total_scalarization; |
| if (child->grp_covered) |
| covered_to += child->size; |
| else |
| hole = true; |
| } |
| |
| if (allow_replacements && scalar && !root->first_child |
| && (root->grp_hint |
| || ((root->grp_scalar_read || root->grp_assignment_read) |
| && (root->grp_scalar_write || root->grp_assignment_write)))) |
| { |
| /* Always create access replacements that cover the whole access. |
| For integral types this means the precision has to match. |
| Avoid assumptions based on the integral type kind, too. */ |
| if (INTEGRAL_TYPE_P (root->type) |
| && (TREE_CODE (root->type) != INTEGER_TYPE |
| || TYPE_PRECISION (root->type) != root->size) |
| /* But leave bitfield accesses alone. */ |
| && (TREE_CODE (root->expr) != COMPONENT_REF |
| || !DECL_BIT_FIELD (TREE_OPERAND (root->expr, 1)))) |
| { |
| tree rt = root->type; |
| gcc_assert ((root->offset % BITS_PER_UNIT) == 0 |
| && (root->size % BITS_PER_UNIT) == 0); |
| root->type = build_nonstandard_integer_type (root->size, |
| TYPE_UNSIGNED (rt)); |
| root->expr = build_ref_for_offset (UNKNOWN_LOCATION, root->base, |
| root->offset, root->reverse, |
| root->type, NULL, false); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Changing the type of a replacement for "); |
| print_generic_expr (dump_file, root->base); |
| fprintf (dump_file, " offset: %u, size: %u ", |
| (unsigned) root->offset, (unsigned) root->size); |
| fprintf (dump_file, " to an integer.\n"); |
| } |
| } |
| |
| root->grp_to_be_replaced = 1; |
| root->replacement_decl = create_access_replacement (root); |
| sth_created = true; |
| hole = false; |
| } |
| else |
| { |
| if (allow_replacements |
| && scalar && !root->first_child |
| && (root->grp_scalar_write || root->grp_assignment_write) |
| && !bitmap_bit_p (cannot_scalarize_away_bitmap, |
| DECL_UID (root->base))) |
| { |
| gcc_checking_assert (!root->grp_scalar_read |
| && !root->grp_assignment_read); |
| sth_created = true; |
| if (MAY_HAVE_DEBUG_BIND_STMTS) |
| { |
| root->grp_to_be_debug_replaced = 1; |
| root->replacement_decl = create_access_replacement (root); |
| } |
| } |
| |
| if (covered_to < limit) |
| hole = true; |
| if (scalar || !allow_replacements) |
| root->grp_total_scalarization = 0; |
| } |
| |
| if (!hole || root->grp_total_scalarization) |
| root->grp_covered = 1; |
| else if (root->grp_write || comes_initialized_p (root->base)) |
| root->grp_unscalarized_data = 1; /* not covered and written to */ |
| return sth_created; |
| } |
| |
| /* Analyze all access trees linked by next_grp by the means of |
| analyze_access_subtree. */ |
| static bool |
| analyze_access_trees (struct access *access) |
| { |
| bool ret = false; |
| |
| while (access) |
| { |
| if (analyze_access_subtree (access, NULL, true)) |
| ret = true; |
| access = access->next_grp; |
| } |
| |
| return ret; |
| } |
| |
| /* Return true iff a potential new child of LACC at offset OFFSET and with size |
| SIZE would conflict with an already existing one. If exactly such a child |
| already exists in LACC, store a pointer to it in EXACT_MATCH. */ |
| |
| static bool |
| child_would_conflict_in_lacc (struct access *lacc, HOST_WIDE_INT norm_offset, |
| HOST_WIDE_INT size, struct access **exact_match) |
| { |
| struct access *child; |
| |
| for (child = lacc->first_child; child; child = child->next_sibling) |
| { |
| if (child->offset == norm_offset && child->size == size) |
| { |
| *exact_match = child; |
| return true; |
| } |
| |
| if (child->offset < norm_offset + size |
| && child->offset + child->size > norm_offset) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Create a new child access of PARENT, with all properties just like MODEL |
| except for its offset and with its grp_write false and grp_read true. |
| Return the new access or NULL if it cannot be created. Note that this |
| access is created long after all splicing and sorting, it's not located in |
| any access vector and is automatically a representative of its group. Set |
| the gpr_write flag of the new accesss if SET_GRP_WRITE is true. */ |
| |
| static struct access * |
| create_artificial_child_access (struct access *parent, struct access *model, |
| HOST_WIDE_INT new_offset, |
| bool set_grp_write) |
| { |
| struct access **child; |
| tree expr = parent->base; |
| |
| gcc_assert (!model->grp_unscalarizable_region); |
| |
| struct access *access = access_pool.allocate (); |
| memset (access, 0, sizeof (struct access)); |
| if (!build_user_friendly_ref_for_offset (&expr, TREE_TYPE (expr), new_offset, |
| model->type)) |
| { |
| access->grp_no_warning = true; |
| expr = build_ref_for_model (EXPR_LOCATION (parent->base), parent->base, |
| new_offset, model, NULL, false); |
| } |
| |
| access->base = parent->base; |
| access->expr = expr; |
| access->offset = new_offset; |
| access->size = model->size; |
| access->type = model->type; |
| access->grp_write = set_grp_write; |
| access->grp_read = false; |
| access->reverse = model->reverse; |
| |
| child = &parent->first_child; |
| while (*child && (*child)->offset < new_offset) |
| child = &(*child)->next_sibling; |
| |
| access->next_sibling = *child; |
| *child = access; |
| |
| return access; |
| } |
| |
| |
| /* Beginning with ACCESS, traverse its whole access subtree and mark all |
| sub-trees as written to. If any of them has not been marked so previously |
| and has assignment links leading from it, re-enqueue it. */ |
| |
| static void |
| subtree_mark_written_and_enqueue (struct access *access) |
| { |
| if (access->grp_write) |
| return; |
| access->grp_write = true; |
| add_access_to_work_queue (access); |
| |
| struct access *child; |
| for (child = access->first_child; child; child = child->next_sibling) |
| subtree_mark_written_and_enqueue (child); |
| } |
| |
| /* Propagate subaccesses and grp_write flags of RACC across an assignment link |
| to LACC. Enqueue sub-accesses as necessary so that the write flag is |
| propagated transitively. Return true if anything changed. Additionally, if |
| RACC is a scalar access but LACC is not, change the type of the latter, if |
| possible. */ |
| |
| static bool |
| propagate_subaccesses_across_link (struct access *lacc, struct access *racc) |
| { |
| struct access *rchild; |
| HOST_WIDE_INT norm_delta = lacc->offset - racc->offset; |
| bool ret = false; |
| |
| /* IF the LHS is still not marked as being written to, we only need to do so |
| if the RHS at this level actually was. */ |
| if (!lacc->grp_write) |
| { |
| gcc_checking_assert (!comes_initialized_p (racc->base)); |
| if (racc->grp_write) |
| { |
| subtree_mark_written_and_enqueue (lacc); |
| ret = true; |
| } |
| } |
| |
| if (is_gimple_reg_type (lacc->type) |
| || lacc->grp_unscalarizable_region |
| || racc->grp_unscalarizable_region) |
| { |
| if (!lacc->grp_write) |
| { |
| ret = true; |
| subtree_mark_written_and_enqueue (lacc); |
| } |
| return ret; |
| } |
| |
| if (is_gimple_reg_type (racc->type)) |
| { |
| if (!lacc->grp_write) |
| { |
| ret = true; |
| subtree_mark_written_and_enqueue (lacc); |
| } |
| if (!lacc->first_child && !racc->first_child) |
| { |
| tree t = lacc->base; |
| |
| lacc->type = racc->type; |
| if (build_user_friendly_ref_for_offset (&t, TREE_TYPE (t), |
| lacc->offset, racc->type)) |
| lacc->expr = t; |
| else |
| { |
| lacc->expr = build_ref_for_model (EXPR_LOCATION (lacc->base), |
| lacc->base, lacc->offset, |
| racc, NULL, false); |
| lacc->grp_no_warning = true; |
| } |
| } |
| return ret; |
| } |
| |
| for (rchild = racc->first_child; rchild; rchild = rchild->next_sibling) |
| { |
| struct access *new_acc = NULL; |
| HOST_WIDE_INT norm_offset = rchild->offset + norm_delta; |
| |
| if (child_would_conflict_in_lacc (lacc, norm_offset, rchild->size, |
| &new_acc)) |
| { |
| if (new_acc) |
| { |
| if (!new_acc->grp_write && rchild->grp_write) |
| { |
| gcc_assert (!lacc->grp_write); |
| subtree_mark_written_and_enqueue (new_acc); |
| ret = true; |
| } |
| |
| rchild->grp_hint = 1; |
| new_acc->grp_hint |= new_acc->grp_read; |
| if (rchild->first_child) |
| ret |= propagate_subaccesses_across_link (new_acc, rchild); |
| } |
| else |
| { |
| if (!lacc->grp_write) |
| { |
| ret = true; |
| subtree_mark_written_and_enqueue (lacc); |
| } |
| } |
| continue; |
| } |
| |
| if (rchild->grp_unscalarizable_region) |
| { |
| if (rchild->grp_write && !lacc->grp_write) |
| { |
| ret = true; |
| subtree_mark_written_and_enqueue (lacc); |
| } |
| continue; |
| } |
| |
| rchild->grp_hint = 1; |
| new_acc = create_artificial_child_access (lacc, rchild, norm_offset, |
| lacc->grp_write |
| || rchild->grp_write); |
| gcc_checking_assert (new_acc); |
| if (racc->first_child) |
| propagate_subaccesses_across_link (new_acc, rchild); |
| |
| add_access_to_work_queue (lacc); |
| ret = true; |
| } |
| |
| return ret; |
| } |
| |
| /* Propagate all subaccesses across assignment links. */ |
| |
| static void |
| propagate_all_subaccesses (void) |
| { |
| while (work_queue_head) |
| { |
| struct access *racc = pop_access_from_work_queue (); |
| struct assign_link *link; |
| |
| if (racc->group_representative) |
| racc= racc->group_representative; |
| gcc_assert (racc->first_link); |
| |
| for (link = racc->first_link; link; link = link->next) |
| { |
| struct access *lacc = link->lacc; |
| |
| if (!bitmap_bit_p (candidate_bitmap, DECL_UID (lacc->base))) |
| continue; |
| lacc = lacc->group_representative; |
| |
| bool reque_parents = false; |
| if (!bitmap_bit_p (candidate_bitmap, DECL_UID (racc->base))) |
| { |
| if (!lacc->grp_write) |
| { |
| subtree_mark_written_and_enqueue (lacc); |
| reque_parents = true; |
| } |
| } |
| else if (propagate_subaccesses_across_link (lacc, racc)) |
| reque_parents = true; |
| |
| if (reque_parents) |
| do |
| { |
| add_access_to_work_queue (lacc); |
| lacc = lacc->parent; |
| } |
| while (lacc); |
| } |
| } |
| } |
| |
| /* Go through all accesses collected throughout the (intraprocedural) analysis |
| stage, exclude overlapping ones, identify representatives and build trees |
| out of them, making decisions about scalarization on the way. Return true |
| iff there are any to-be-scalarized variables after this stage. */ |
| |
| static bool |
| analyze_all_variable_accesses (void) |
| { |
| int res = 0; |
| bitmap tmp = BITMAP_ALLOC (NULL); |
| bitmap_iterator bi; |
| unsigned i; |
| bool optimize_speed_p = !optimize_function_for_size_p (cfun); |
| |
| enum compiler_param param = optimize_speed_p |
| ? PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED |
| : PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE; |
| |
| /* If the user didn't set PARAM_SRA_MAX_SCALARIZATION_SIZE_<...>, |
| fall back to a target default. */ |
| unsigned HOST_WIDE_INT max_scalarization_size |
| = global_options_set.x_param_values[param] |
| ? PARAM_VALUE (param) |
| : get_move_ratio (optimize_speed_p) * UNITS_PER_WORD; |
| |
| max_scalarization_size *= BITS_PER_UNIT; |
| |
| EXECUTE_IF_SET_IN_BITMAP (candidate_bitmap, 0, i, bi) |
| if (bitmap_bit_p (should_scalarize_away_bitmap, i) |
| && !bitmap_bit_p (cannot_scalarize_away_bitmap, i)) |
| { |
| tree var = candidate (i); |
| |
| if (VAR_P (var) && scalarizable_type_p (TREE_TYPE (var), |
| constant_decl_p (var))) |
| { |
| if (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (var))) |
| <= max_scalarization_size) |
| { |
| create_total_scalarization_access (var); |
| completely_scalarize (var, TREE_TYPE (var), 0, var); |
| statistics_counter_event (cfun, |
| "Totally-scalarized aggregates", 1); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Will attempt to totally scalarize "); |
| print_generic_expr (dump_file, var); |
| fprintf (dump_file, " (UID: %u): \n", DECL_UID (var)); |
| } |
| } |
| else if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Too big to totally scalarize: "); |
| print_generic_expr (dump_file, var); |
| fprintf (dump_file, " (UID: %u)\n", DECL_UID (var)); |
| } |
| } |
| } |
| |
| bitmap_copy (tmp, candidate_bitmap); |
| EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) |
| { |
| tree var = candidate (i); |
| struct access *access; |
| |
| access = sort_and_splice_var_accesses (var); |
| if (!access || !build_access_trees (access)) |
| disqualify_candidate (var, |
| "No or inhibitingly overlapping accesses."); |
| } |
| |
| propagate_all_subaccesses (); |
| |
| bitmap_copy (tmp, candidate_bitmap); |
| EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) |
| { |
| tree var = candidate (i); |
| struct access *access = get_first_repr_for_decl (var); |
| |
| if (analyze_access_trees (access)) |
| { |
| res++; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "\nAccess trees for "); |
| print_generic_expr (dump_file, var); |
| fprintf (dump_file, " (UID: %u): \n", DECL_UID (var)); |
| dump_access_tree (dump_file, access); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| else |
| disqualify_candidate (var, "No scalar replacements to be created."); |
| } |
| |
| BITMAP_FREE (tmp); |
| |
| if (res) |
| { |
| statistics_counter_event (cfun, "Scalarized aggregates", res); |
| return true; |
| } |
| else |
| return false; |
| } |
| |
| /* Generate statements copying scalar replacements of accesses within a subtree |
| into or out of AGG. ACCESS, all its children, siblings and their children |
| are to be processed. AGG is an aggregate type expression (can be a |
| declaration but does not have to be, it can for example also be a mem_ref or |
| a series of handled components). TOP_OFFSET is the offset of the processed |
| subtree which has to be subtracted from offsets of individual accesses to |
| get corresponding offsets for AGG. If CHUNK_SIZE is non-null, copy only |
| replacements in the interval <start_offset, start_offset + chunk_size>, |
| otherwise copy all. GSI is a statement iterator used to place the new |
| statements. WRITE should be true when the statements should write from AGG |
| to the replacement and false if vice versa. if INSERT_AFTER is true, new |
| statements will be added after the current statement in GSI, they will be |
| added before the statement otherwise. */ |
| |
| static void |
| generate_subtree_copies (struct access *access, tree agg, |
| HOST_WIDE_INT top_offset, |
| HOST_WIDE_INT start_offset, HOST_WIDE_INT chunk_size, |
| gimple_stmt_iterator *gsi, bool write, |
| bool insert_after, location_t loc) |
| { |
| /* Never write anything into constant pool decls. See PR70602. */ |
| if (!write && constant_decl_p (agg)) |
| return; |
| do |
| { |
| if (chunk_size && access->offset >= start_offset + chunk_size) |
| return; |
| |
| if (access->grp_to_be_replaced |
| && (chunk_size == 0 |
| || access->offset + access->size > start_offset)) |
| { |
| tree expr, repl = get_access_replacement (access); |
| gassign *stmt; |
| |
| expr = build_ref_for_model (loc, agg, access->offset - top_offset, |
| access, gsi, insert_after); |
| |
| if (write) |
| { |
| if (access->grp_partial_lhs) |
| expr = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, |
| !insert_after, |
| insert_after ? GSI_NEW_STMT |
| : GSI_SAME_STMT); |
| stmt = gimple_build_assign (repl, expr); |
| } |
| else |
| { |
| TREE_NO_WARNING (repl) = 1; |
| if (access->grp_partial_lhs) |
| repl = force_gimple_operand_gsi (gsi, repl, true, NULL_TREE, |
| !insert_after, |
| insert_after ? GSI_NEW_STMT |
| : GSI_SAME_STMT); |
| stmt = gimple_build_assign (expr, repl); |
| } |
| gimple_set_location (stmt, loc); |
| |
| if (insert_after) |
| gsi_insert_after (gsi, stmt, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| update_stmt (stmt); |
| sra_stats.subtree_copies++; |
| } |
| else if (write |
| && access->grp_to_be_debug_replaced |
| && (chunk_size == 0 |
| || access->offset + access->size > start_offset)) |
| { |
| gdebug *ds; |
| tree drhs = build_debug_ref_for_model (loc, agg, |
| access->offset - top_offset, |
| access); |
| ds = gimple_build_debug_bind (get_access_replacement (access), |
| drhs, gsi_stmt (*gsi)); |
| if (insert_after) |
| gsi_insert_after (gsi, ds, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, ds, GSI_SAME_STMT); |
| } |
| |
| if (access->first_child) |
| generate_subtree_copies (access->first_child, agg, top_offset, |
| start_offset, chunk_size, gsi, |
| write, insert_after, loc); |
| |
| access = access->next_sibling; |
| } |
| while (access); |
| } |
| |
| /* Assign zero to all scalar replacements in an access subtree. ACCESS is the |
| root of the subtree to be processed. GSI is the statement iterator used |
| for inserting statements which are added after the current statement if |
| INSERT_AFTER is true or before it otherwise. */ |
| |
| static void |
| init_subtree_with_zero (struct access *access, gimple_stmt_iterator *gsi, |
| bool insert_after, location_t loc) |
| |
| { |
| struct access *child; |
| |
| if (access->grp_to_be_replaced) |
| { |
| gassign *stmt; |
| |
| stmt = gimple_build_assign (get_access_replacement (access), |
| build_zero_cst (access->type)); |
| if (insert_after) |
| gsi_insert_after (gsi, stmt, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| update_stmt (stmt); |
| gimple_set_location (stmt, loc); |
| } |
| else if (access->grp_to_be_debug_replaced) |
| { |
| gdebug *ds |
| = gimple_build_debug_bind (get_access_replacement (access), |
| build_zero_cst (access->type), |
| gsi_stmt (*gsi)); |
| if (insert_after) |
| gsi_insert_after (gsi, ds, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, ds, GSI_SAME_STMT); |
| } |
| |
| for (child = access->first_child; child; child = child->next_sibling) |
| init_subtree_with_zero (child, gsi, insert_after, loc); |
| } |
| |
| /* Clobber all scalar replacements in an access subtree. ACCESS is the |
| root of the subtree to be processed. GSI is the statement iterator used |
| for inserting statements which are added after the current statement if |
| INSERT_AFTER is true or before it otherwise. */ |
| |
| static void |
| clobber_subtree (struct access *access, gimple_stmt_iterator *gsi, |
| bool insert_after, location_t loc) |
| |
| { |
| struct access *child; |
| |
| if (access->grp_to_be_replaced) |
| { |
| tree rep = get_access_replacement (access); |
| tree clobber = build_constructor (access->type, NULL); |
| TREE_THIS_VOLATILE (clobber) = 1; |
| gimple *stmt = gimple_build_assign (rep, clobber); |
| |
| if (insert_after) |
| gsi_insert_after (gsi, stmt, GSI_NEW_STMT); |
| else |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| update_stmt (stmt); |
| gimple_set_location (stmt, loc); |
| } |
| |
| for (child = access->first_child; child; child = child->next_sibling) |
| clobber_subtree (child, gsi, insert_after, loc); |
| } |
| |
| /* Search for an access representative for the given expression EXPR and |
| return it or NULL if it cannot be found. */ |
| |
| static struct access * |
| get_access_for_expr (tree expr) |
| { |
| poly_int64 poffset, psize, pmax_size; |
| HOST_WIDE_INT offset, max_size; |
| tree base; |
| bool reverse; |
| |
| /* FIXME: This should not be necessary but Ada produces V_C_Es with a type of |
| a different size than the size of its argument and we need the latter |
| one. */ |
| if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) |
| expr = TREE_OPERAND (expr, 0); |
| |
| base = get_ref_base_and_extent (expr, &poffset, &psize, &pmax_size, |
| &reverse); |
| if (!known_size_p (pmax_size) |
| || !pmax_size.is_constant (&max_size) |
| || !poffset.is_constant (&offset) |
| || !DECL_P (base)) |
| return NULL; |
| |
| if (!bitmap_bit_p (candidate_bitmap, DECL_UID (base))) |
| return NULL; |
| |
| return get_var_base_offset_size_access (base, offset, max_size); |
| } |
| |
| /* Replace the expression EXPR with a scalar replacement if there is one and |
| generate other statements to do type conversion or subtree copying if |
| necessary. GSI is used to place newly created statements, WRITE is true if |
| the expression is being written to (it is on a LHS of a statement or output |
| in an assembly statement). */ |
| |
| static bool |
| sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write) |
| { |
| location_t loc; |
| struct access *access; |
| tree type, bfr, orig_expr; |
| |
| if (TREE_CODE (*expr) == BIT_FIELD_REF) |
| { |
| bfr = *expr; |
| expr = &TREE_OPERAND (*expr, 0); |
| } |
| else |
| bfr = NULL_TREE; |
| |
| if (TREE_CODE (*expr) == REALPART_EXPR || TREE_CODE (*expr) == IMAGPART_EXPR) |
| expr = &TREE_OPERAND (*expr, 0); |
| access = get_access_for_expr (*expr); |
| if (!access) |
| return false; |
| type = TREE_TYPE (*expr); |
| orig_expr = *expr; |
| |
| loc = gimple_location (gsi_stmt (*gsi)); |
| gimple_stmt_iterator alt_gsi = gsi_none (); |
| if (write && stmt_ends_bb_p (gsi_stmt (*gsi))) |
| { |
| alt_gsi = gsi_start_edge (single_non_eh_succ (gsi_bb (*gsi))); |
| gsi = &alt_gsi; |
| } |
| |
| if (access->grp_to_be_replaced) |
| { |
| tree repl = get_access_replacement (access); |
| /* If we replace a non-register typed access simply use the original |
| access expression to extract the scalar component afterwards. |
| This happens if scalarizing a function return value or parameter |
| like in gcc.c-torture/execute/20041124-1.c, 20050316-1.c and |
| gcc.c-torture/compile/20011217-1.c. |
| |
| We also want to use this when accessing a complex or vector which can |
| be accessed as a different type too, potentially creating a need for |
| type conversion (see PR42196) and when scalarized unions are involved |
| in assembler statements (see PR42398). */ |
| if (!useless_type_conversion_p (type, access->type)) |
| { |
| tree ref; |
| |
| ref = build_ref_for_model (loc, orig_expr, 0, access, gsi, false); |
| |
| if (write) |
| { |
| gassign *stmt; |
| |
| if (access->grp_partial_lhs) |
| ref = force_gimple_operand_gsi (gsi, ref, true, NULL_TREE, |
| false, GSI_NEW_STMT); |
| stmt = gimple_build_assign (repl, ref); |
| gimple_set_location (stmt, loc); |
| gsi_insert_after (gsi, stmt, GSI_NEW_STMT); |
| } |
| else |
| { |
| gassign *stmt; |
| |
| if (access->grp_partial_lhs) |
| repl = force_gimple_operand_gsi (gsi, repl, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| stmt = gimple_build_assign (ref, repl); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
|