| /* SCC value numbering for trees |
| Copyright (C) 2006-2021 Free Software Foundation, Inc. |
| Contributed by Daniel Berlin <dan@dberlin.org> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "splay-tree.h" |
| #include "backend.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "ssa.h" |
| #include "expmed.h" |
| #include "insn-config.h" |
| #include "memmodel.h" |
| #include "emit-rtl.h" |
| #include "cgraph.h" |
| #include "gimple-pretty-print.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "cfganal.h" |
| #include "tree-inline.h" |
| #include "internal-fn.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimplify.h" |
| #include "flags.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "calls.h" |
| #include "varasm.h" |
| #include "stmt.h" |
| #include "expr.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "dumpfile.h" |
| #include "cfgloop.h" |
| #include "tree-ssa-propagate.h" |
| #include "tree-cfg.h" |
| #include "domwalk.h" |
| #include "gimple-iterator.h" |
| #include "gimple-match.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "tree-pass.h" |
| #include "statistics.h" |
| #include "langhooks.h" |
| #include "ipa-utils.h" |
| #include "dbgcnt.h" |
| #include "tree-cfgcleanup.h" |
| #include "tree-ssa-loop.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "builtins.h" |
| #include "fold-const-call.h" |
| #include "tree-ssa-sccvn.h" |
| |
| /* This algorithm is based on the SCC algorithm presented by Keith |
| Cooper and L. Taylor Simpson in "SCC-Based Value numbering" |
| (http://citeseer.ist.psu.edu/41805.html). In |
| straight line code, it is equivalent to a regular hash based value |
| numbering that is performed in reverse postorder. |
| |
| For code with cycles, there are two alternatives, both of which |
| require keeping the hashtables separate from the actual list of |
| value numbers for SSA names. |
| |
| 1. Iterate value numbering in an RPO walk of the blocks, removing |
| all the entries from the hashtable after each iteration (but |
| keeping the SSA name->value number mapping between iterations). |
| Iterate until it does not change. |
| |
| 2. Perform value numbering as part of an SCC walk on the SSA graph, |
| iterating only the cycles in the SSA graph until they do not change |
| (using a separate, optimistic hashtable for value numbering the SCC |
| operands). |
| |
| The second is not just faster in practice (because most SSA graph |
| cycles do not involve all the variables in the graph), it also has |
| some nice properties. |
| |
| One of these nice properties is that when we pop an SCC off the |
| stack, we are guaranteed to have processed all the operands coming from |
| *outside of that SCC*, so we do not need to do anything special to |
| ensure they have value numbers. |
| |
| Another nice property is that the SCC walk is done as part of a DFS |
| of the SSA graph, which makes it easy to perform combining and |
| simplifying operations at the same time. |
| |
| The code below is deliberately written in a way that makes it easy |
| to separate the SCC walk from the other work it does. |
| |
| In order to propagate constants through the code, we track which |
| expressions contain constants, and use those while folding. In |
| theory, we could also track expressions whose value numbers are |
| replaced, in case we end up folding based on expression |
| identities. |
| |
| In order to value number memory, we assign value numbers to vuses. |
| This enables us to note that, for example, stores to the same |
| address of the same value from the same starting memory states are |
| equivalent. |
| TODO: |
| |
| 1. We can iterate only the changing portions of the SCC's, but |
| I have not seen an SCC big enough for this to be a win. |
| 2. If you differentiate between phi nodes for loops and phi nodes |
| for if-then-else, you can properly consider phi nodes in different |
| blocks for equivalence. |
| 3. We could value number vuses in more cases, particularly, whole |
| structure copies. |
| */ |
| |
| /* There's no BB_EXECUTABLE but we can use BB_VISITED. */ |
| #define BB_EXECUTABLE BB_VISITED |
| |
| static vn_lookup_kind default_vn_walk_kind; |
| |
| /* vn_nary_op hashtable helpers. */ |
| |
| struct vn_nary_op_hasher : nofree_ptr_hash <vn_nary_op_s> |
| { |
| typedef vn_nary_op_s *compare_type; |
| static inline hashval_t hash (const vn_nary_op_s *); |
| static inline bool equal (const vn_nary_op_s *, const vn_nary_op_s *); |
| }; |
| |
| /* Return the computed hashcode for nary operation P1. */ |
| |
| inline hashval_t |
| vn_nary_op_hasher::hash (const vn_nary_op_s *vno1) |
| { |
| return vno1->hashcode; |
| } |
| |
| /* Compare nary operations P1 and P2 and return true if they are |
| equivalent. */ |
| |
| inline bool |
| vn_nary_op_hasher::equal (const vn_nary_op_s *vno1, const vn_nary_op_s *vno2) |
| { |
| return vno1 == vno2 || vn_nary_op_eq (vno1, vno2); |
| } |
| |
| typedef hash_table<vn_nary_op_hasher> vn_nary_op_table_type; |
| typedef vn_nary_op_table_type::iterator vn_nary_op_iterator_type; |
| |
| |
| /* vn_phi hashtable helpers. */ |
| |
| static int |
| vn_phi_eq (const_vn_phi_t const vp1, const_vn_phi_t const vp2); |
| |
| struct vn_phi_hasher : nofree_ptr_hash <vn_phi_s> |
| { |
| static inline hashval_t hash (const vn_phi_s *); |
| static inline bool equal (const vn_phi_s *, const vn_phi_s *); |
| }; |
| |
| /* Return the computed hashcode for phi operation P1. */ |
| |
| inline hashval_t |
| vn_phi_hasher::hash (const vn_phi_s *vp1) |
| { |
| return vp1->hashcode; |
| } |
| |
| /* Compare two phi entries for equality, ignoring VN_TOP arguments. */ |
| |
| inline bool |
| vn_phi_hasher::equal (const vn_phi_s *vp1, const vn_phi_s *vp2) |
| { |
| return vp1 == vp2 || vn_phi_eq (vp1, vp2); |
| } |
| |
| typedef hash_table<vn_phi_hasher> vn_phi_table_type; |
| typedef vn_phi_table_type::iterator vn_phi_iterator_type; |
| |
| |
| /* Compare two reference operands P1 and P2 for equality. Return true if |
| they are equal, and false otherwise. */ |
| |
| static int |
| vn_reference_op_eq (const void *p1, const void *p2) |
| { |
| const_vn_reference_op_t const vro1 = (const_vn_reference_op_t) p1; |
| const_vn_reference_op_t const vro2 = (const_vn_reference_op_t) p2; |
| |
| return (vro1->opcode == vro2->opcode |
| /* We do not care for differences in type qualification. */ |
| && (vro1->type == vro2->type |
| || (vro1->type && vro2->type |
| && types_compatible_p (TYPE_MAIN_VARIANT (vro1->type), |
| TYPE_MAIN_VARIANT (vro2->type)))) |
| && expressions_equal_p (vro1->op0, vro2->op0) |
| && expressions_equal_p (vro1->op1, vro2->op1) |
| && expressions_equal_p (vro1->op2, vro2->op2) |
| && (vro1->opcode != CALL_EXPR || vro1->clique == vro2->clique)); |
| } |
| |
| /* Free a reference operation structure VP. */ |
| |
| static inline void |
| free_reference (vn_reference_s *vr) |
| { |
| vr->operands.release (); |
| } |
| |
| |
| /* vn_reference hashtable helpers. */ |
| |
| struct vn_reference_hasher : nofree_ptr_hash <vn_reference_s> |
| { |
| static inline hashval_t hash (const vn_reference_s *); |
| static inline bool equal (const vn_reference_s *, const vn_reference_s *); |
| }; |
| |
| /* Return the hashcode for a given reference operation P1. */ |
| |
| inline hashval_t |
| vn_reference_hasher::hash (const vn_reference_s *vr1) |
| { |
| return vr1->hashcode; |
| } |
| |
| inline bool |
| vn_reference_hasher::equal (const vn_reference_s *v, const vn_reference_s *c) |
| { |
| return v == c || vn_reference_eq (v, c); |
| } |
| |
| typedef hash_table<vn_reference_hasher> vn_reference_table_type; |
| typedef vn_reference_table_type::iterator vn_reference_iterator_type; |
| |
| /* Pretty-print OPS to OUTFILE. */ |
| |
| void |
| print_vn_reference_ops (FILE *outfile, const vec<vn_reference_op_s> ops) |
| { |
| vn_reference_op_t vro; |
| unsigned int i; |
| fprintf (outfile, "{"); |
| for (i = 0; ops.iterate (i, &vro); i++) |
| { |
| bool closebrace = false; |
| if (vro->opcode != SSA_NAME |
| && TREE_CODE_CLASS (vro->opcode) != tcc_declaration) |
| { |
| fprintf (outfile, "%s", get_tree_code_name (vro->opcode)); |
| if (vro->op0 || vro->opcode == CALL_EXPR) |
| { |
| fprintf (outfile, "<"); |
| closebrace = true; |
| } |
| } |
| if (vro->op0 || vro->opcode == CALL_EXPR) |
| { |
| if (!vro->op0) |
| fprintf (outfile, internal_fn_name ((internal_fn)vro->clique)); |
| else |
| print_generic_expr (outfile, vro->op0); |
| if (vro->op1) |
| { |
| fprintf (outfile, ","); |
| print_generic_expr (outfile, vro->op1); |
| } |
| if (vro->op2) |
| { |
| fprintf (outfile, ","); |
| print_generic_expr (outfile, vro->op2); |
| } |
| } |
| if (closebrace) |
| fprintf (outfile, ">"); |
| if (i != ops.length () - 1) |
| fprintf (outfile, ","); |
| } |
| fprintf (outfile, "}"); |
| } |
| |
| DEBUG_FUNCTION void |
| debug_vn_reference_ops (const vec<vn_reference_op_s> ops) |
| { |
| print_vn_reference_ops (stderr, ops); |
| fputc ('\n', stderr); |
| } |
| |
| /* The set of VN hashtables. */ |
| |
| typedef struct vn_tables_s |
| { |
| vn_nary_op_table_type *nary; |
| vn_phi_table_type *phis; |
| vn_reference_table_type *references; |
| } *vn_tables_t; |
| |
| |
| /* vn_constant hashtable helpers. */ |
| |
| struct vn_constant_hasher : free_ptr_hash <vn_constant_s> |
| { |
| static inline hashval_t hash (const vn_constant_s *); |
| static inline bool equal (const vn_constant_s *, const vn_constant_s *); |
| }; |
| |
| /* Hash table hash function for vn_constant_t. */ |
| |
| inline hashval_t |
| vn_constant_hasher::hash (const vn_constant_s *vc1) |
| { |
| return vc1->hashcode; |
| } |
| |
| /* Hash table equality function for vn_constant_t. */ |
| |
| inline bool |
| vn_constant_hasher::equal (const vn_constant_s *vc1, const vn_constant_s *vc2) |
| { |
| if (vc1->hashcode != vc2->hashcode) |
| return false; |
| |
| return vn_constant_eq_with_type (vc1->constant, vc2->constant); |
| } |
| |
| static hash_table<vn_constant_hasher> *constant_to_value_id; |
| |
| |
| /* Obstack we allocate the vn-tables elements from. */ |
| static obstack vn_tables_obstack; |
| /* Special obstack we never unwind. */ |
| static obstack vn_tables_insert_obstack; |
| |
| static vn_reference_t last_inserted_ref; |
| static vn_phi_t last_inserted_phi; |
| static vn_nary_op_t last_inserted_nary; |
| static vn_ssa_aux_t last_pushed_avail; |
| |
| /* Valid hashtables storing information we have proven to be |
| correct. */ |
| static vn_tables_t valid_info; |
| |
| |
| /* Valueization hook for simplify_replace_tree. Valueize NAME if it is |
| an SSA name, otherwise just return it. */ |
| tree (*vn_valueize) (tree); |
| static tree |
| vn_valueize_for_srt (tree t, void* context ATTRIBUTE_UNUSED) |
| { |
| basic_block saved_vn_context_bb = vn_context_bb; |
| /* Look for sth available at the definition block of the argument. |
| This avoids inconsistencies between availability there which |
| decides if the stmt can be removed and availability at the |
| use site. The SSA property ensures that things available |
| at the definition are also available at uses. */ |
| if (!SSA_NAME_IS_DEFAULT_DEF (t)) |
| vn_context_bb = gimple_bb (SSA_NAME_DEF_STMT (t)); |
| tree res = vn_valueize (t); |
| vn_context_bb = saved_vn_context_bb; |
| return res; |
| } |
| |
| |
| /* This represents the top of the VN lattice, which is the universal |
| value. */ |
| |
| tree VN_TOP; |
| |
| /* Unique counter for our value ids. */ |
| |
| static unsigned int next_value_id; |
| static int next_constant_value_id; |
| |
| |
| /* Table of vn_ssa_aux_t's, one per ssa_name. The vn_ssa_aux_t objects |
| are allocated on an obstack for locality reasons, and to free them |
| without looping over the vec. */ |
| |
| struct vn_ssa_aux_hasher : typed_noop_remove <vn_ssa_aux_t> |
| { |
| typedef vn_ssa_aux_t value_type; |
| typedef tree compare_type; |
| static inline hashval_t hash (const value_type &); |
| static inline bool equal (const value_type &, const compare_type &); |
| static inline void mark_deleted (value_type &) {} |
| static const bool empty_zero_p = true; |
| static inline void mark_empty (value_type &e) { e = NULL; } |
| static inline bool is_deleted (value_type &) { return false; } |
| static inline bool is_empty (value_type &e) { return e == NULL; } |
| }; |
| |
| hashval_t |
| vn_ssa_aux_hasher::hash (const value_type &entry) |
| { |
| return SSA_NAME_VERSION (entry->name); |
| } |
| |
| bool |
| vn_ssa_aux_hasher::equal (const value_type &entry, const compare_type &name) |
| { |
| return name == entry->name; |
| } |
| |
| static hash_table<vn_ssa_aux_hasher> *vn_ssa_aux_hash; |
| typedef hash_table<vn_ssa_aux_hasher>::iterator vn_ssa_aux_iterator_type; |
| static struct obstack vn_ssa_aux_obstack; |
| |
| static vn_nary_op_t vn_nary_op_insert_stmt (gimple *, tree); |
| static unsigned int vn_nary_length_from_stmt (gimple *); |
| static vn_nary_op_t alloc_vn_nary_op_noinit (unsigned int, obstack *); |
| static vn_nary_op_t vn_nary_op_insert_into (vn_nary_op_t, |
| vn_nary_op_table_type *, bool); |
| static void init_vn_nary_op_from_stmt (vn_nary_op_t, gassign *); |
| static void init_vn_nary_op_from_pieces (vn_nary_op_t, unsigned int, |
| enum tree_code, tree, tree *); |
| static tree vn_lookup_simplify_result (gimple_match_op *); |
| static vn_reference_t vn_reference_lookup_or_insert_for_pieces |
| (tree, alias_set_type, alias_set_type, tree, |
| vec<vn_reference_op_s, va_heap>, tree); |
| |
| /* Return whether there is value numbering information for a given SSA name. */ |
| |
| bool |
| has_VN_INFO (tree name) |
| { |
| return vn_ssa_aux_hash->find_with_hash (name, SSA_NAME_VERSION (name)); |
| } |
| |
| vn_ssa_aux_t |
| VN_INFO (tree name) |
| { |
| vn_ssa_aux_t *res |
| = vn_ssa_aux_hash->find_slot_with_hash (name, SSA_NAME_VERSION (name), |
| INSERT); |
| if (*res != NULL) |
| return *res; |
| |
| vn_ssa_aux_t newinfo = *res = XOBNEW (&vn_ssa_aux_obstack, struct vn_ssa_aux); |
| memset (newinfo, 0, sizeof (struct vn_ssa_aux)); |
| newinfo->name = name; |
| newinfo->valnum = VN_TOP; |
| /* We are using the visited flag to handle uses with defs not within the |
| region being value-numbered. */ |
| newinfo->visited = false; |
| |
| /* Given we create the VN_INFOs on-demand now we have to do initialization |
| different than VN_TOP here. */ |
| if (SSA_NAME_IS_DEFAULT_DEF (name)) |
| switch (TREE_CODE (SSA_NAME_VAR (name))) |
| { |
| case VAR_DECL: |
| /* All undefined vars are VARYING. */ |
| newinfo->valnum = name; |
| newinfo->visited = true; |
| break; |
| |
| case PARM_DECL: |
| /* Parameters are VARYING but we can record a condition |
| if we know it is a non-NULL pointer. */ |
| newinfo->visited = true; |
| newinfo->valnum = name; |
| if (POINTER_TYPE_P (TREE_TYPE (name)) |
| && nonnull_arg_p (SSA_NAME_VAR (name))) |
| { |
| tree ops[2]; |
| ops[0] = name; |
| ops[1] = build_int_cst (TREE_TYPE (name), 0); |
| vn_nary_op_t nary; |
| /* Allocate from non-unwinding stack. */ |
| nary = alloc_vn_nary_op_noinit (2, &vn_tables_insert_obstack); |
| init_vn_nary_op_from_pieces (nary, 2, NE_EXPR, |
| boolean_type_node, ops); |
| nary->predicated_values = 0; |
| nary->u.result = boolean_true_node; |
| vn_nary_op_insert_into (nary, valid_info->nary, true); |
| gcc_assert (nary->unwind_to == NULL); |
| /* Also do not link it into the undo chain. */ |
| last_inserted_nary = nary->next; |
| nary->next = (vn_nary_op_t)(void *)-1; |
| nary = alloc_vn_nary_op_noinit (2, &vn_tables_insert_obstack); |
| init_vn_nary_op_from_pieces (nary, 2, EQ_EXPR, |
| boolean_type_node, ops); |
| nary->predicated_values = 0; |
| nary->u.result = boolean_false_node; |
| vn_nary_op_insert_into (nary, valid_info->nary, true); |
| gcc_assert (nary->unwind_to == NULL); |
| last_inserted_nary = nary->next; |
| nary->next = (vn_nary_op_t)(void *)-1; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Recording "); |
| print_generic_expr (dump_file, name, TDF_SLIM); |
| fprintf (dump_file, " != 0\n"); |
| } |
| } |
| break; |
| |
| case RESULT_DECL: |
| /* If the result is passed by invisible reference the default |
| def is initialized, otherwise it's uninitialized. Still |
| undefined is varying. */ |
| newinfo->visited = true; |
| newinfo->valnum = name; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| return newinfo; |
| } |
| |
| /* Return the SSA value of X. */ |
| |
| inline tree |
| SSA_VAL (tree x, bool *visited = NULL) |
| { |
| vn_ssa_aux_t tem = vn_ssa_aux_hash->find_with_hash (x, SSA_NAME_VERSION (x)); |
| if (visited) |
| *visited = tem && tem->visited; |
| return tem && tem->visited ? tem->valnum : x; |
| } |
| |
| /* Return the SSA value of the VUSE x, supporting released VDEFs |
| during elimination which will value-number the VDEF to the |
| associated VUSE (but not substitute in the whole lattice). */ |
| |
| static inline tree |
| vuse_ssa_val (tree x) |
| { |
| if (!x) |
| return NULL_TREE; |
| |
| do |
| { |
| x = SSA_VAL (x); |
| gcc_assert (x != VN_TOP); |
| } |
| while (SSA_NAME_IN_FREE_LIST (x)); |
| |
| return x; |
| } |
| |
| /* Similar to the above but used as callback for walk_non_aliased_vuses |
| and thus should stop at unvisited VUSE to not walk across region |
| boundaries. */ |
| |
| static tree |
| vuse_valueize (tree vuse) |
| { |
| do |
| { |
| bool visited; |
| vuse = SSA_VAL (vuse, &visited); |
| if (!visited) |
| return NULL_TREE; |
| gcc_assert (vuse != VN_TOP); |
| } |
| while (SSA_NAME_IN_FREE_LIST (vuse)); |
| return vuse; |
| } |
| |
| |
| /* Return the vn_kind the expression computed by the stmt should be |
| associated with. */ |
| |
| enum vn_kind |
| vn_get_stmt_kind (gimple *stmt) |
| { |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_CALL: |
| return VN_REFERENCE; |
| case GIMPLE_PHI: |
| return VN_PHI; |
| case GIMPLE_ASSIGN: |
| { |
| enum tree_code code = gimple_assign_rhs_code (stmt); |
| tree rhs1 = gimple_assign_rhs1 (stmt); |
| switch (get_gimple_rhs_class (code)) |
| { |
| case GIMPLE_UNARY_RHS: |
| case GIMPLE_BINARY_RHS: |
| case GIMPLE_TERNARY_RHS: |
| return VN_NARY; |
| case GIMPLE_SINGLE_RHS: |
| switch (TREE_CODE_CLASS (code)) |
| { |
| case tcc_reference: |
| /* VOP-less references can go through unary case. */ |
| if ((code == REALPART_EXPR |
| || code == IMAGPART_EXPR |
| || code == VIEW_CONVERT_EXPR |
| || code == BIT_FIELD_REF) |
| && (TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME |
| || is_gimple_min_invariant (TREE_OPERAND (rhs1, 0)))) |
| return VN_NARY; |
| |
| /* Fallthrough. */ |
| case tcc_declaration: |
| return VN_REFERENCE; |
| |
| case tcc_constant: |
| return VN_CONSTANT; |
| |
| default: |
| if (code == ADDR_EXPR) |
| return (is_gimple_min_invariant (rhs1) |
| ? VN_CONSTANT : VN_REFERENCE); |
| else if (code == CONSTRUCTOR) |
| return VN_NARY; |
| return VN_NONE; |
| } |
| default: |
| return VN_NONE; |
| } |
| } |
| default: |
| return VN_NONE; |
| } |
| } |
| |
| /* Lookup a value id for CONSTANT and return it. If it does not |
| exist returns 0. */ |
| |
| unsigned int |
| get_constant_value_id (tree constant) |
| { |
| vn_constant_s **slot; |
| struct vn_constant_s vc; |
| |
| vc.hashcode = vn_hash_constant_with_type (constant); |
| vc.constant = constant; |
| slot = constant_to_value_id->find_slot (&vc, NO_INSERT); |
| if (slot) |
| return (*slot)->value_id; |
| return 0; |
| } |
| |
| /* Lookup a value id for CONSTANT, and if it does not exist, create a |
| new one and return it. If it does exist, return it. */ |
| |
| unsigned int |
| get_or_alloc_constant_value_id (tree constant) |
| { |
| vn_constant_s **slot; |
| struct vn_constant_s vc; |
| vn_constant_t vcp; |
| |
| /* If the hashtable isn't initialized we're not running from PRE and thus |
| do not need value-ids. */ |
| if (!constant_to_value_id) |
| return 0; |
| |
| vc.hashcode = vn_hash_constant_with_type (constant); |
| vc.constant = constant; |
| slot = constant_to_value_id->find_slot (&vc, INSERT); |
| if (*slot) |
| return (*slot)->value_id; |
| |
| vcp = XNEW (struct vn_constant_s); |
| vcp->hashcode = vc.hashcode; |
| vcp->constant = constant; |
| vcp->value_id = get_next_constant_value_id (); |
| *slot = vcp; |
| return vcp->value_id; |
| } |
| |
| /* Compute the hash for a reference operand VRO1. */ |
| |
| static void |
| vn_reference_op_compute_hash (const vn_reference_op_t vro1, inchash::hash &hstate) |
| { |
| hstate.add_int (vro1->opcode); |
| if (vro1->opcode == CALL_EXPR && !vro1->op0) |
| hstate.add_int (vro1->clique); |
| if (vro1->op0) |
| inchash::add_expr (vro1->op0, hstate); |
| if (vro1->op1) |
| inchash::add_expr (vro1->op1, hstate); |
| if (vro1->op2) |
| inchash::add_expr (vro1->op2, hstate); |
| } |
| |
| /* Compute a hash for the reference operation VR1 and return it. */ |
| |
| static hashval_t |
| vn_reference_compute_hash (const vn_reference_t vr1) |
| { |
| inchash::hash hstate; |
| hashval_t result; |
| int i; |
| vn_reference_op_t vro; |
| poly_int64 off = -1; |
| bool deref = false; |
| |
| FOR_EACH_VEC_ELT (vr1->operands, i, vro) |
| { |
| if (vro->opcode == MEM_REF) |
| deref = true; |
| else if (vro->opcode != ADDR_EXPR) |
| deref = false; |
| if (maybe_ne (vro->off, -1)) |
| { |
| if (known_eq (off, -1)) |
| off = 0; |
| off += vro->off; |
| } |
| else |
| { |
| if (maybe_ne (off, -1) |
| && maybe_ne (off, 0)) |
| hstate.add_poly_int (off); |
| off = -1; |
| if (deref |
| && vro->opcode == ADDR_EXPR) |
| { |
| if (vro->op0) |
| { |
| tree op = TREE_OPERAND (vro->op0, 0); |
| hstate.add_int (TREE_CODE (op)); |
| inchash::add_expr (op, hstate); |
| } |
| } |
| else |
| vn_reference_op_compute_hash (vro, hstate); |
| } |
| } |
| result = hstate.end (); |
| /* ??? We would ICE later if we hash instead of adding that in. */ |
| if (vr1->vuse) |
| result += SSA_NAME_VERSION (vr1->vuse); |
| |
| return result; |
| } |
| |
| /* Return true if reference operations VR1 and VR2 are equivalent. This |
| means they have the same set of operands and vuses. */ |
| |
| bool |
| vn_reference_eq (const_vn_reference_t const vr1, const_vn_reference_t const vr2) |
| { |
| unsigned i, j; |
| |
| /* Early out if this is not a hash collision. */ |
| if (vr1->hashcode != vr2->hashcode) |
| return false; |
| |
| /* The VOP needs to be the same. */ |
| if (vr1->vuse != vr2->vuse) |
| return false; |
| |
| /* If the operands are the same we are done. */ |
| if (vr1->operands == vr2->operands) |
| return true; |
| |
| if (!vr1->type || !vr2->type) |
| { |
| if (vr1->type != vr2->type) |
| return false; |
| } |
| else if (vr1->type == vr2->type) |
| ; |
| else if (COMPLETE_TYPE_P (vr1->type) != COMPLETE_TYPE_P (vr2->type) |
| || (COMPLETE_TYPE_P (vr1->type) |
| && !expressions_equal_p (TYPE_SIZE (vr1->type), |
| TYPE_SIZE (vr2->type)))) |
| return false; |
| else if (vr1->operands[0].opcode == CALL_EXPR |
| && !types_compatible_p (vr1->type, vr2->type)) |
| return false; |
| else if (INTEGRAL_TYPE_P (vr1->type) |
| && INTEGRAL_TYPE_P (vr2->type)) |
| { |
| if (TYPE_PRECISION (vr1->type) != TYPE_PRECISION (vr2->type)) |
| return false; |
| } |
| else if (INTEGRAL_TYPE_P (vr1->type) |
| && (TYPE_PRECISION (vr1->type) |
| != TREE_INT_CST_LOW (TYPE_SIZE (vr1->type)))) |
| return false; |
| else if (INTEGRAL_TYPE_P (vr2->type) |
| && (TYPE_PRECISION (vr2->type) |
| != TREE_INT_CST_LOW (TYPE_SIZE (vr2->type)))) |
| return false; |
| |
| i = 0; |
| j = 0; |
| do |
| { |
| poly_int64 off1 = 0, off2 = 0; |
| vn_reference_op_t vro1, vro2; |
| vn_reference_op_s tem1, tem2; |
| bool deref1 = false, deref2 = false; |
| bool reverse1 = false, reverse2 = false; |
| for (; vr1->operands.iterate (i, &vro1); i++) |
| { |
| if (vro1->opcode == MEM_REF) |
| deref1 = true; |
| /* Do not look through a storage order barrier. */ |
| else if (vro1->opcode == VIEW_CONVERT_EXPR && vro1->reverse) |
| return false; |
| reverse1 |= vro1->reverse; |
| if (known_eq (vro1->off, -1)) |
| break; |
| off1 += vro1->off; |
| } |
| for (; vr2->operands.iterate (j, &vro2); j++) |
| { |
| if (vro2->opcode == MEM_REF) |
| deref2 = true; |
| /* Do not look through a storage order barrier. */ |
| else if (vro2->opcode == VIEW_CONVERT_EXPR && vro2->reverse) |
| return false; |
| reverse2 |= vro2->reverse; |
| if (known_eq (vro2->off, -1)) |
| break; |
| off2 += vro2->off; |
| } |
| if (maybe_ne (off1, off2) || reverse1 != reverse2) |
| return false; |
| if (deref1 && vro1->opcode == ADDR_EXPR) |
| { |
| memset (&tem1, 0, sizeof (tem1)); |
| tem1.op0 = TREE_OPERAND (vro1->op0, 0); |
| tem1.type = TREE_TYPE (tem1.op0); |
| tem1.opcode = TREE_CODE (tem1.op0); |
| vro1 = &tem1; |
| deref1 = false; |
| } |
| if (deref2 && vro2->opcode == ADDR_EXPR) |
| { |
| memset (&tem2, 0, sizeof (tem2)); |
| tem2.op0 = TREE_OPERAND (vro2->op0, 0); |
| tem2.type = TREE_TYPE (tem2.op0); |
| tem2.opcode = TREE_CODE (tem2.op0); |
| vro2 = &tem2; |
| deref2 = false; |
| } |
| if (deref1 != deref2) |
| return false; |
| if (!vn_reference_op_eq (vro1, vro2)) |
| return false; |
| ++j; |
| ++i; |
| } |
| while (vr1->operands.length () != i |
| || vr2->operands.length () != j); |
| |
| return true; |
| } |
| |
| /* Copy the operations present in load/store REF into RESULT, a vector of |
| vn_reference_op_s's. */ |
| |
| static void |
| copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) |
| { |
| /* For non-calls, store the information that makes up the address. */ |
| tree orig = ref; |
| while (ref) |
| { |
| vn_reference_op_s temp; |
| |
| memset (&temp, 0, sizeof (temp)); |
| temp.type = TREE_TYPE (ref); |
| temp.opcode = TREE_CODE (ref); |
| temp.off = -1; |
| |
| switch (temp.opcode) |
| { |
| case MODIFY_EXPR: |
| temp.op0 = TREE_OPERAND (ref, 1); |
| break; |
| case WITH_SIZE_EXPR: |
| temp.op0 = TREE_OPERAND (ref, 1); |
| temp.off = 0; |
| break; |
| case MEM_REF: |
| /* The base address gets its own vn_reference_op_s structure. */ |
| temp.op0 = TREE_OPERAND (ref, 1); |
| if (!mem_ref_offset (ref).to_shwi (&temp.off)) |
| temp.off = -1; |
| temp.clique = MR_DEPENDENCE_CLIQUE (ref); |
| temp.base = MR_DEPENDENCE_BASE (ref); |
| temp.reverse = REF_REVERSE_STORAGE_ORDER (ref); |
| break; |
| case TARGET_MEM_REF: |
| /* The base address gets its own vn_reference_op_s structure. */ |
| temp.op0 = TMR_INDEX (ref); |
| temp.op1 = TMR_STEP (ref); |
| temp.op2 = TMR_OFFSET (ref); |
| temp.clique = MR_DEPENDENCE_CLIQUE (ref); |
| temp.base = MR_DEPENDENCE_BASE (ref); |
| result->safe_push (temp); |
| memset (&temp, 0, sizeof (temp)); |
| temp.type = NULL_TREE; |
| temp.opcode = ERROR_MARK; |
| temp.op0 = TMR_INDEX2 (ref); |
| temp.off = -1; |
| break; |
| case BIT_FIELD_REF: |
| /* Record bits, position and storage order. */ |
| temp.op0 = TREE_OPERAND (ref, 1); |
| temp.op1 = TREE_OPERAND (ref, 2); |
| if (!multiple_p (bit_field_offset (ref), BITS_PER_UNIT, &temp.off)) |
| temp.off = -1; |
| temp.reverse = REF_REVERSE_STORAGE_ORDER (ref); |
| break; |
| case COMPONENT_REF: |
| /* The field decl is enough to unambiguously specify the field, |
| so use its type here. */ |
| temp.type = TREE_TYPE (TREE_OPERAND (ref, 1)); |
| temp.op0 = TREE_OPERAND (ref, 1); |
| temp.op1 = TREE_OPERAND (ref, 2); |
| temp.reverse = (AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (ref, 0))) |
| && TYPE_REVERSE_STORAGE_ORDER |
| (TREE_TYPE (TREE_OPERAND (ref, 0)))); |
| { |
| tree this_offset = component_ref_field_offset (ref); |
| if (this_offset |
| && poly_int_tree_p (this_offset)) |
| { |
| tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); |
| if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0) |
| { |
| poly_offset_int off |
| = (wi::to_poly_offset (this_offset) |
| + (wi::to_offset (bit_offset) >> LOG2_BITS_PER_UNIT)); |
| /* Probibit value-numbering zero offset components |
| of addresses the same before the pass folding |
| __builtin_object_size had a chance to run. */ |
| if (TREE_CODE (orig) != ADDR_EXPR |
| || maybe_ne (off, 0) |
| || (cfun->curr_properties & PROP_objsz)) |
| off.to_shwi (&temp.off); |
| } |
| } |
| } |
| break; |
| case ARRAY_RANGE_REF: |
| case ARRAY_REF: |
| { |
| tree eltype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (ref, 0))); |
| /* Record index as operand. */ |
| temp.op0 = TREE_OPERAND (ref, 1); |
| /* Always record lower bounds and element size. */ |
| temp.op1 = array_ref_low_bound (ref); |
| /* But record element size in units of the type alignment. */ |
| temp.op2 = TREE_OPERAND (ref, 3); |
| temp.align = eltype->type_common.align; |
| if (! temp.op2) |
| temp.op2 = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (eltype), |
| size_int (TYPE_ALIGN_UNIT (eltype))); |
| if (poly_int_tree_p (temp.op0) |
| && poly_int_tree_p (temp.op1) |
| && TREE_CODE (temp.op2) == INTEGER_CST) |
| { |
| poly_offset_int off = ((wi::to_poly_offset (temp.op0) |
| - wi::to_poly_offset (temp.op1)) |
| * wi::to_offset (temp.op2) |
| * vn_ref_op_align_unit (&temp)); |
| off.to_shwi (&temp.off); |
| } |
| temp.reverse = (AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (ref, 0))) |
| && TYPE_REVERSE_STORAGE_ORDER |
| (TREE_TYPE (TREE_OPERAND (ref, 0)))); |
| } |
| break; |
| case VAR_DECL: |
| if (DECL_HARD_REGISTER (ref)) |
| { |
| temp.op0 = ref; |
| break; |
| } |
| /* Fallthru. */ |
| case PARM_DECL: |
| case CONST_DECL: |
| case RESULT_DECL: |
| /* Canonicalize decls to MEM[&decl] which is what we end up with |
| when valueizing MEM[ptr] with ptr = &decl. */ |
| temp.opcode = MEM_REF; |
| temp.op0 = build_int_cst (build_pointer_type (TREE_TYPE (ref)), 0); |
| temp.off = 0; |
| result->safe_push (temp); |
| temp.opcode = ADDR_EXPR; |
| temp.op0 = build1 (ADDR_EXPR, TREE_TYPE (temp.op0), ref); |
| temp.type = TREE_TYPE (temp.op0); |
| temp.off = -1; |
| break; |
| case STRING_CST: |
| case INTEGER_CST: |
| case POLY_INT_CST: |
| case COMPLEX_CST: |
| case VECTOR_CST: |
| case REAL_CST: |
| case FIXED_CST: |
| case CONSTRUCTOR: |
| case SSA_NAME: |
| temp.op0 = ref; |
| break; |
| case ADDR_EXPR: |
| if (is_gimple_min_invariant (ref)) |
| { |
| temp.op0 = ref; |
| break; |
| } |
| break; |
| /* These are only interesting for their operands, their |
| existence, and their type. They will never be the last |
| ref in the chain of references (IE they require an |
| operand), so we don't have to put anything |
| for op* as it will be handled by the iteration */ |
| case REALPART_EXPR: |
| temp.off = 0; |
| break; |
| case VIEW_CONVERT_EXPR: |
| temp.off = 0; |
| temp.reverse = storage_order_barrier_p (ref); |
| break; |
| case IMAGPART_EXPR: |
| /* This is only interesting for its constant offset. */ |
| temp.off = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (ref))); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| result->safe_push (temp); |
| |
| if (REFERENCE_CLASS_P (ref) |
| || TREE_CODE (ref) == MODIFY_EXPR |
| || TREE_CODE (ref) == WITH_SIZE_EXPR |
| || (TREE_CODE (ref) == ADDR_EXPR |
| && !is_gimple_min_invariant (ref))) |
| ref = TREE_OPERAND (ref, 0); |
| else |
| ref = NULL_TREE; |
| } |
| } |
| |
| /* Build a alias-oracle reference abstraction in *REF from the vn_reference |
| operands in *OPS, the reference alias set SET and the reference type TYPE. |
| Return true if something useful was produced. */ |
| |
| bool |
| ao_ref_init_from_vn_reference (ao_ref *ref, |
| alias_set_type set, alias_set_type base_set, |
| tree type, const vec<vn_reference_op_s> &ops) |
| { |
| unsigned i; |
| tree base = NULL_TREE; |
| tree *op0_p = &base; |
| poly_offset_int offset = 0; |
| poly_offset_int max_size; |
| poly_offset_int size = -1; |
| tree size_tree = NULL_TREE; |
| |
| /* We don't handle calls. */ |
| if (!type) |
| return false; |
| |
| machine_mode mode = TYPE_MODE (type); |
| if (mode == BLKmode) |
| size_tree = TYPE_SIZE (type); |
| else |
| size = GET_MODE_BITSIZE (mode); |
| if (size_tree != NULL_TREE |
| && poly_int_tree_p (size_tree)) |
| size = wi::to_poly_offset (size_tree); |
| |
| /* Lower the final access size from the outermost expression. */ |
| const_vn_reference_op_t cst_op = &ops[0]; |
| /* Cast away constness for the sake of the const-unsafe |
| FOR_EACH_VEC_ELT(). */ |
| vn_reference_op_t op = const_cast<vn_reference_op_t>(cst_op); |
| size_tree = NULL_TREE; |
| if (op->opcode == COMPONENT_REF) |
| size_tree = DECL_SIZE (op->op0); |
| else if (op->opcode == BIT_FIELD_REF) |
| size_tree = op->op0; |
| if (size_tree != NULL_TREE |
| && poly_int_tree_p (size_tree) |
| && (!known_size_p (size) |
| || known_lt (wi::to_poly_offset (size_tree), size))) |
| size = wi::to_poly_offset (size_tree); |
| |
| /* Initially, maxsize is the same as the accessed element size. |
| In the following it will only grow (or become -1). */ |
| max_size = size; |
| |
| /* Compute cumulative bit-offset for nested component-refs and array-refs, |
| and find the ultimate containing object. */ |
| FOR_EACH_VEC_ELT (ops, i, op) |
| { |
| switch (op->opcode) |
| { |
| /* These may be in the reference ops, but we cannot do anything |
| sensible with them here. */ |
| case ADDR_EXPR: |
| /* Apart from ADDR_EXPR arguments to MEM_REF. */ |
| if (base != NULL_TREE |
| && TREE_CODE (base) == MEM_REF |
| && op->op0 |
| && DECL_P (TREE_OPERAND (op->op0, 0))) |
| { |
| const_vn_reference_op_t pop = &ops[i-1]; |
| base = TREE_OPERAND (op->op0, 0); |
| if (known_eq (pop->off, -1)) |
| { |
| max_size = -1; |
| offset = 0; |
| } |
| else |
| offset += pop->off * BITS_PER_UNIT; |
| op0_p = NULL; |
| break; |
| } |
| /* Fallthru. */ |
| case CALL_EXPR: |
| return false; |
| |
| /* Record the base objects. */ |
| case MEM_REF: |
| *op0_p = build2 (MEM_REF, op->type, |
| NULL_TREE, op->op0); |
| MR_DEPENDENCE_CLIQUE (*op0_p) = op->clique; |
| MR_DEPENDENCE_BASE (*op0_p) = op->base; |
| op0_p = &TREE_OPERAND (*op0_p, 0); |
| break; |
| |
| case VAR_DECL: |
| case PARM_DECL: |
| case RESULT_DECL: |
| case SSA_NAME: |
| *op0_p = op->op0; |
| op0_p = NULL; |
| break; |
| |
| /* And now the usual component-reference style ops. */ |
| case BIT_FIELD_REF: |
| offset += wi::to_poly_offset (op->op1); |
| break; |
| |
| case COMPONENT_REF: |
| { |
| tree field = op->op0; |
| /* We do not have a complete COMPONENT_REF tree here so we |
| cannot use component_ref_field_offset. Do the interesting |
| parts manually. */ |
| tree this_offset = DECL_FIELD_OFFSET (field); |
| |
| if (op->op1 || !poly_int_tree_p (this_offset)) |
| max_size = -1; |
| else |
| { |
| poly_offset_int woffset = (wi::to_poly_offset (this_offset) |
| << LOG2_BITS_PER_UNIT); |
| woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field)); |
| offset += woffset; |
| } |
| break; |
| } |
| |
| case ARRAY_RANGE_REF: |
| case ARRAY_REF: |
| /* We recorded the lower bound and the element size. */ |
| if (!poly_int_tree_p (op->op0) |
| || !poly_int_tree_p (op->op1) |
| || TREE_CODE (op->op2) != INTEGER_CST) |
| max_size = -1; |
| else |
| { |
| poly_offset_int woffset |
| = wi::sext (wi::to_poly_offset (op->op0) |
| - wi::to_poly_offset (op->op1), |
| TYPE_PRECISION (sizetype)); |
| woffset *= wi::to_offset (op->op2) * vn_ref_op_align_unit (op); |
| woffset <<= LOG2_BITS_PER_UNIT; |
| offset += woffset; |
| } |
| break; |
| |
| case REALPART_EXPR: |
| break; |
| |
| case IMAGPART_EXPR: |
| offset += size; |
| break; |
| |
| case VIEW_CONVERT_EXPR: |
| break; |
| |
| case STRING_CST: |
| case INTEGER_CST: |
| case COMPLEX_CST: |
| case VECTOR_CST: |
| case REAL_CST: |
| case CONSTRUCTOR: |
| case CONST_DECL: |
| return false; |
| |
| default: |
| return false; |
| } |
| } |
| |
| if (base == NULL_TREE) |
| return false; |
| |
| ref->ref = NULL_TREE; |
| ref->base = base; |
| ref->ref_alias_set = set; |
| ref->base_alias_set = base_set; |
| /* We discount volatiles from value-numbering elsewhere. */ |
| ref->volatile_p = false; |
| |
| if (!size.to_shwi (&ref->size) || maybe_lt (ref->size, 0)) |
| { |
| ref->offset = 0; |
| ref->size = -1; |
| ref->max_size = -1; |
| return true; |
| } |
| |
| if (!offset.to_shwi (&ref->offset)) |
| { |
| ref->offset = 0; |
| ref->max_size = -1; |
| return true; |
| } |
| |
| if (!max_size.to_shwi (&ref->max_size) || maybe_lt (ref->max_size, 0)) |
| ref->max_size = -1; |
| |
| return true; |
| } |
| |
| /* Copy the operations present in load/store/call REF into RESULT, a vector of |
| vn_reference_op_s's. */ |
| |
| static void |
| copy_reference_ops_from_call (gcall *call, |
| vec<vn_reference_op_s> *result) |
| { |
| vn_reference_op_s temp; |
| unsigned i; |
| tree lhs = gimple_call_lhs (call); |
| int lr; |
| |
| /* If 2 calls have a different non-ssa lhs, vdef value numbers should be |
| different. By adding the lhs here in the vector, we ensure that the |
| hashcode is different, guaranteeing a different value number. */ |
| if (lhs && TREE_CODE (lhs) != SSA_NAME) |
| { |
| memset (&temp, 0, sizeof (temp)); |
| temp.opcode = MODIFY_EXPR; |
| temp.type = TREE_TYPE (lhs); |
| temp.op0 = lhs; |
| temp.off = -1; |
| result->safe_push (temp); |
| } |
| |
| /* Copy the type, opcode, function, static chain and EH region, if any. */ |
| memset (&temp, 0, sizeof (temp)); |
| temp.type = gimple_call_fntype (call); |
| temp.opcode = CALL_EXPR; |
| temp.op0 = gimple_call_fn (call); |
| if (gimple_call_internal_p (call)) |
| temp.clique = gimple_call_internal_fn (call); |
| temp.op1 = gimple_call_chain (call); |
| if (stmt_could_throw_p (cfun, call) && (lr = lookup_stmt_eh_lp (call)) > 0) |
| temp.op2 = size_int (lr); |
| temp.off = -1; |
| result->safe_push (temp); |
| |
| /* Copy the call arguments. As they can be references as well, |
| just chain them together. */ |
| for (i = 0; i < gimple_call_num_args (call); ++i) |
| { |
| tree callarg = gimple_call_arg (call, i); |
| copy_reference_ops_from_ref (callarg, result); |
| } |
| } |
| |
| /* Fold *& at position *I_P in a vn_reference_op_s vector *OPS. Updates |
| *I_P to point to the last element of the replacement. */ |
| static bool |
| vn_reference_fold_indirect (vec<vn_reference_op_s> *ops, |
| unsigned int *i_p) |
| { |
| unsigned int i = *i_p; |
| vn_reference_op_t op = &(*ops)[i]; |
| vn_reference_op_t mem_op = &(*ops)[i - 1]; |
| tree addr_base; |
| poly_int64 addr_offset = 0; |
| |
| /* The only thing we have to do is from &OBJ.foo.bar add the offset |
| from .foo.bar to the preceding MEM_REF offset and replace the |
| address with &OBJ. */ |
| addr_base = get_addr_base_and_unit_offset_1 (TREE_OPERAND (op->op0, 0), |
| &addr_offset, vn_valueize); |
| gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF); |
| if (addr_base != TREE_OPERAND (op->op0, 0)) |
| { |
| poly_offset_int off |
| = (poly_offset_int::from (wi::to_poly_wide (mem_op->op0), |
| SIGNED) |
| + addr_offset); |
| mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); |
| op->op0 = build_fold_addr_expr (addr_base); |
| if (tree_fits_shwi_p (mem_op->op0)) |
| mem_op->off = tree_to_shwi (mem_op->op0); |
| else |
| mem_op->off = -1; |
| return true; |
| } |
| return false; |
| } |
| |
| /* Fold *& at position *I_P in a vn_reference_op_s vector *OPS. Updates |
| *I_P to point to the last element of the replacement. */ |
| static bool |
| vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, |
| unsigned int *i_p) |
| { |
| bool changed = false; |
| vn_reference_op_t op; |
| |
| do |
| { |
| unsigned int i = *i_p; |
| op = &(*ops)[i]; |
| vn_reference_op_t mem_op = &(*ops)[i - 1]; |
| gimple *def_stmt; |
| enum tree_code code; |
| poly_offset_int off; |
| |
| def_stmt = SSA_NAME_DEF_STMT (op->op0); |
| if (!is_gimple_assign (def_stmt)) |
| return changed; |
| |
| code = gimple_assign_rhs_code (def_stmt); |
| if (code != ADDR_EXPR |
| && code != POINTER_PLUS_EXPR) |
| return changed; |
| |
| off = poly_offset_int::from (wi::to_poly_wide (mem_op->op0), SIGNED); |
| |
| /* The only thing we have to do is from &OBJ.foo.bar add the offset |
| from .foo.bar to the preceding MEM_REF offset and replace the |
| address with &OBJ. */ |
| if (code == ADDR_EXPR) |
| { |
| tree addr, addr_base; |
| poly_int64 addr_offset; |
| |
| addr = gimple_assign_rhs1 (def_stmt); |
| addr_base = get_addr_base_and_unit_offset_1 (TREE_OPERAND (addr, 0), |
| &addr_offset, |
| vn_valueize); |
| /* If that didn't work because the address isn't invariant propagate |
| the reference tree from the address operation in case the current |
| dereference isn't offsetted. */ |
| if (!addr_base |
| && *i_p == ops->length () - 1 |
| && known_eq (off, 0) |
| /* This makes us disable this transform for PRE where the |
| reference ops might be also used for code insertion which |
| is invalid. */ |
| && default_vn_walk_kind == VN_WALKREWRITE) |
| { |
| auto_vec<vn_reference_op_s, 32> tem; |
| copy_reference_ops_from_ref (TREE_OPERAND (addr, 0), &tem); |
| /* Make sure to preserve TBAA info. The only objects not |
| wrapped in MEM_REFs that can have their address taken are |
| STRING_CSTs. */ |
| if (tem.length () >= 2 |
| && tem[tem.length () - 2].opcode == MEM_REF) |
| { |
| vn_reference_op_t new_mem_op = &tem[tem.length () - 2]; |
| new_mem_op->op0 |
| = wide_int_to_tree (TREE_TYPE (mem_op->op0), |
| wi::to_poly_wide (new_mem_op->op0)); |
| } |
| else |
| gcc_assert (tem.last ().opcode == STRING_CST); |
| ops->pop (); |
| ops->pop (); |
| ops->safe_splice (tem); |
| --*i_p; |
| return true; |
| } |
| if (!addr_base |
| || TREE_CODE (addr_base) != MEM_REF |
| || (TREE_CODE (TREE_OPERAND (addr_base, 0)) == SSA_NAME |
| && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (TREE_OPERAND (addr_base, |
| 0)))) |
| return changed; |
| |
| off += addr_offset; |
| off += mem_ref_offset (addr_base); |
| op->op0 = TREE_OPERAND (addr_base, 0); |
| } |
| else |
| { |
| tree ptr, ptroff; |
| ptr = gimple_assign_rhs1 (def_stmt); |
| ptroff = gimple_assign_rhs2 (def_stmt); |
| if (TREE_CODE (ptr) != SSA_NAME |
| || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ptr) |
| /* Make sure to not endlessly recurse. |
| See gcc.dg/tree-ssa/20040408-1.c for an example. Can easily |
| happen when we value-number a PHI to its backedge value. */ |
| || SSA_VAL (ptr) == op->op0 |
| || !poly_int_tree_p (ptroff)) |
| return changed; |
| |
| off += wi::to_poly_offset (ptroff); |
| op->op0 = ptr; |
| } |
| |
| mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); |
| if (tree_fits_shwi_p (mem_op->op0)) |
| mem_op->off = tree_to_shwi (mem_op->op0); |
| else |
| mem_op->off = -1; |
| /* ??? Can end up with endless recursion here!? |
| gcc.c-torture/execute/strcmp-1.c */ |
| if (TREE_CODE (op->op0) == SSA_NAME) |
| op->op0 = SSA_VAL (op->op0); |
| if (TREE_CODE (op->op0) != SSA_NAME) |
| op->opcode = TREE_CODE (op->op0); |
| |
| changed = true; |
| } |
| /* Tail-recurse. */ |
| while (TREE_CODE (op->op0) == SSA_NAME); |
| |
| /* Fold a remaining *&. */ |
| if (TREE_CODE (op->op0) == ADDR_EXPR) |
| vn_reference_fold_indirect (ops, i_p); |
| |
| return changed; |
| } |
| |
| /* Optimize the reference REF to a constant if possible or return |
| NULL_TREE if not. */ |
| |
| tree |
| fully_constant_vn_reference_p (vn_reference_t ref) |
| { |
| vec<vn_reference_op_s> operands = ref->operands; |
| vn_reference_op_t op; |
| |
| /* Try to simplify the translated expression if it is |
| a call to a builtin function with at most two arguments. */ |
| op = &operands[0]; |
| if (op->opcode == CALL_EXPR |
| && (!op->op0 |
| || (TREE_CODE (op->op0) == ADDR_EXPR |
| && TREE_CODE (TREE_OPERAND (op->op0, 0)) == FUNCTION_DECL |
| && fndecl_built_in_p (TREE_OPERAND (op->op0, 0), |
| BUILT_IN_NORMAL))) |
| && operands.length () >= 2 |
| && operands.length () <= 3) |
| { |
| vn_reference_op_t arg0, arg1 = NULL; |
| bool anyconst = false; |
| arg0 = &operands[1]; |
| if (operands.length () > 2) |
| arg1 = &operands[2]; |
| if (TREE_CODE_CLASS (arg0->opcode) == tcc_constant |
| || (arg0->opcode == ADDR_EXPR |
| && is_gimple_min_invariant (arg0->op0))) |
| anyconst = true; |
| if (arg1 |
| && (TREE_CODE_CLASS (arg1->opcode) == tcc_constant |
| || (arg1->opcode == ADDR_EXPR |
| && is_gimple_min_invariant (arg1->op0)))) |
| anyconst = true; |
| if (anyconst) |
| { |
| combined_fn fn; |
| if (op->op0) |
| fn = as_combined_fn (DECL_FUNCTION_CODE |
| (TREE_OPERAND (op->op0, 0))); |
| else |
| fn = as_combined_fn ((internal_fn) op->clique); |
| tree folded; |
| if (arg1) |
| folded = fold_const_call (fn, ref->type, arg0->op0, arg1->op0); |
| else |
| folded = fold_const_call (fn, ref->type, arg0->op0); |
| if (folded |
| && is_gimple_min_invariant (folded)) |
| return folded; |
| } |
| } |
| |
| /* Simplify reads from constants or constant initializers. */ |
| else if (BITS_PER_UNIT == 8 |
| && ref->type |
| && COMPLETE_TYPE_P (ref->type) |
| && is_gimple_reg_type (ref->type)) |
| { |
| poly_int64 off = 0; |
| HOST_WIDE_INT size; |
| if (INTEGRAL_TYPE_P (ref->type)) |
| size = TYPE_PRECISION (ref->type); |
| else if (tree_fits_shwi_p (TYPE_SIZE (ref->type))) |
| size = tree_to_shwi (TYPE_SIZE (ref->type)); |
| else |
| return NULL_TREE; |
| if (size % BITS_PER_UNIT != 0 |
| || size > MAX_BITSIZE_MODE_ANY_MODE) |
| return NULL_TREE; |
| size /= BITS_PER_UNIT; |
| unsigned i; |
| for (i = 0; i < operands.length (); ++i) |
| { |
| if (TREE_CODE_CLASS (operands[i].opcode) == tcc_constant) |
| { |
| ++i; |
| break; |
| } |
| if (known_eq (operands[i].off, -1)) |
| return NULL_TREE; |
| off += operands[i].off; |
| if (operands[i].opcode == MEM_REF) |
| { |
| ++i; |
| break; |
| } |
| } |
| vn_reference_op_t base = &operands[--i]; |
| tree ctor = error_mark_node; |
| tree decl = NULL_TREE; |
| if (TREE_CODE_CLASS (base->opcode) == tcc_constant) |
| ctor = base->op0; |
| else if (base->opcode == MEM_REF |
| && base[1].opcode == ADDR_EXPR |
| && (TREE_CODE (TREE_OPERAND (base[1].op0, 0)) == VAR_DECL |
| || TREE_CODE (TREE_OPERAND (base[1].op0, 0)) == CONST_DECL |
| || TREE_CODE (TREE_OPERAND (base[1].op0, 0)) == STRING_CST)) |
| { |
| decl = TREE_OPERAND (base[1].op0, 0); |
| if (TREE_CODE (decl) == STRING_CST) |
| ctor = decl; |
| else |
| ctor = ctor_for_folding (decl); |
| } |
| if (ctor == NULL_TREE) |
| return build_zero_cst (ref->type); |
| else if (ctor != error_mark_node) |
| { |
| HOST_WIDE_INT const_off; |
| if (decl) |
| { |
| tree res = fold_ctor_reference (ref->type, ctor, |
| off * BITS_PER_UNIT, |
| size * BITS_PER_UNIT, decl); |
| if (res) |
| { |
| STRIP_USELESS_TYPE_CONVERSION (res); |
| if (is_gimple_min_invariant (res)) |
| return res; |
| } |
| } |
| else if (off.is_constant (&const_off)) |
| { |
| unsigned char buf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT]; |
| int len = native_encode_expr (ctor, buf, size, const_off); |
| if (len > 0) |
| return native_interpret_expr (ref->type, buf, len); |
| } |
| } |
| } |
| |
| return NULL_TREE; |
| } |
| |
| /* Return true if OPS contain a storage order barrier. */ |
| |
| static bool |
| contains_storage_order_barrier_p (vec<vn_reference_op_s> ops) |
| { |
| vn_reference_op_t op; |
| unsigned i; |
| |
| FOR_EACH_VEC_ELT (ops, i, op) |
| if (op->opcode == VIEW_CONVERT_EXPR && op->reverse) |
| return true; |
| |
| return false; |
| } |
| |
| /* Return true if OPS represent an access with reverse storage order. */ |
| |
| static bool |
| reverse_storage_order_for_component_p (vec<vn_reference_op_s> ops) |
| { |
| unsigned i = 0; |
| if (ops[i].opcode == REALPART_EXPR || ops[i].opcode == IMAGPART_EXPR) |
| ++i; |
| switch (ops[i].opcode) |
| { |
| case ARRAY_REF: |
| case COMPONENT_REF: |
| case BIT_FIELD_REF: |
| case MEM_REF: |
| return ops[i].reverse; |
| default: |
| return false; |
| } |
| } |
| |
| /* Transform any SSA_NAME's in a vector of vn_reference_op_s |
| structures into their value numbers. This is done in-place, and |
| the vector passed in is returned. *VALUEIZED_ANYTHING will specify |
| whether any operands were valueized. */ |
| |
| static void |
| valueize_refs_1 (vec<vn_reference_op_s> *orig, bool *valueized_anything, |
| bool with_avail = false) |
| { |
| vn_reference_op_t vro; |
| unsigned int i; |
| |
| *valueized_anything = false; |
| |
| FOR_EACH_VEC_ELT (*orig, i, vro) |
| { |
| if (vro->opcode == SSA_NAME |
| || (vro->op0 && TREE_CODE (vro->op0) == SSA_NAME)) |
| { |
| tree tem = with_avail ? vn_valueize (vro->op0) : SSA_VAL (vro->op0); |
| if (tem != vro->op0) |
| { |
| *valueized_anything = true; |
| vro->op0 = tem; |
| } |
| /* If it transforms from an SSA_NAME to a constant, update |
| the opcode. */ |
| if (TREE_CODE (vro->op0) != SSA_NAME && vro->opcode == SSA_NAME) |
| vro->opcode = TREE_CODE (vro->op0); |
| } |
| if (vro->op1 && TREE_CODE (vro->op1) == SSA_NAME) |
| { |
| tree tem = with_avail ? vn_valueize (vro->op1) : SSA_VAL (vro->op1); |
| if (tem != vro->op1) |
| { |
| *valueized_anything = true; |
| vro->op1 = tem; |
| } |
| } |
| if (vro->op2 && TREE_CODE (vro->op2) == SSA_NAME) |
| { |
| tree tem = with_avail ? vn_valueize (vro->op2) : SSA_VAL (vro->op2); |
| if (tem != vro->op2) |
| { |
| *valueized_anything = true; |
| vro->op2 = tem; |
| } |
| } |
| /* If it transforms from an SSA_NAME to an address, fold with |
| a preceding indirect reference. */ |
| if (i > 0 |
| && vro->op0 |
| && TREE_CODE (vro->op0) == ADDR_EXPR |
| && (*orig)[i - 1].opcode == MEM_REF) |
| { |
| if (vn_reference_fold_indirect (orig, &i)) |
| *valueized_anything = true; |
| } |
| else if (i > 0 |
| && vro->opcode == SSA_NAME |
| && (*orig)[i - 1].opcode == MEM_REF) |
| { |
| if (vn_reference_maybe_forwprop_address (orig, &i)) |
| *valueized_anything = true; |
| } |
| /* If it transforms a non-constant ARRAY_REF into a constant |
| one, adjust the constant offset. */ |
| else if (vro->opcode == ARRAY_REF |
| && known_eq (vro->off, -1) |
| && poly_int_tree_p (vro->op0) |
| && poly_int_tree_p (vro->op1) |
| && TREE_CODE (vro->op2) == INTEGER_CST) |
| { |
| poly_offset_int off = ((wi::to_poly_offset (vro->op0) |
| - wi::to_poly_offset (vro->op1)) |
| * wi::to_offset (vro->op2) |
| * vn_ref_op_align_unit (vro)); |
| off.to_shwi (&vro->off); |
| } |
| } |
| } |
| |
| static void |
| valueize_refs (vec<vn_reference_op_s> *orig) |
| { |
| bool tem; |
| valueize_refs_1 (orig, &tem); |
| } |
| |
| static vec<vn_reference_op_s> shared_lookup_references; |
| |
| /* Create a vector of vn_reference_op_s structures from REF, a |
| REFERENCE_CLASS_P tree. The vector is shared among all callers of |
| this function. *VALUEIZED_ANYTHING will specify whether any |
| operands were valueized. */ |
| |
| static vec<vn_reference_op_s> |
| valueize_shared_reference_ops_from_ref (tree ref, bool *valueized_anything) |
| { |
| if (!ref) |
| return vNULL; |
| shared_lookup_references.truncate (0); |
| copy_reference_ops_from_ref (ref, &shared_lookup_references); |
| valueize_refs_1 (&shared_lookup_references, valueized_anything); |
| return shared_lookup_references; |
| } |
| |
| /* Create a vector of vn_reference_op_s structures from CALL, a |
| call statement. The vector is shared among all callers of |
| this function. */ |
| |
| static vec<vn_reference_op_s> |
| valueize_shared_reference_ops_from_call (gcall *call) |
| { |
| if (!call) |
| return vNULL; |
| shared_lookup_references.truncate (0); |
| copy_reference_ops_from_call (call, &shared_lookup_references); |
| valueize_refs (&shared_lookup_references); |
| return shared_lookup_references; |
| } |
| |
| /* Lookup a SCCVN reference operation VR in the current hash table. |
| Returns the resulting value number if it exists in the hash table, |
| NULL_TREE otherwise. VNRESULT will be filled in with the actual |
| vn_reference_t stored in the hashtable if something is found. */ |
| |
| static tree |
| vn_reference_lookup_1 (vn_reference_t vr, vn_reference_t *vnresult) |
| { |
| vn_reference_s **slot; |
| hashval_t hash; |
| |
| hash = vr->hashcode; |
| slot = valid_info->references->find_slot_with_hash (vr, hash, NO_INSERT); |
| if (slot) |
| { |
| if (vnresult) |
| *vnresult = (vn_reference_t)*slot; |
| return ((vn_reference_t)*slot)->result; |
| } |
| |
| return NULL_TREE; |
| } |
| |
| |
| /* Partial definition tracking support. */ |
| |
| struct pd_range |
| { |
| HOST_WIDE_INT offset; |
| HOST_WIDE_INT size; |
| }; |
| |
| struct pd_data |
| { |
| tree rhs; |
| HOST_WIDE_INT offset; |
| HOST_WIDE_INT size; |
| }; |
| |
| /* Context for alias walking. */ |
| |
| struct vn_walk_cb_data |
| { |
| vn_walk_cb_data (vn_reference_t vr_, tree orig_ref_, tree *last_vuse_ptr_, |
| vn_lookup_kind vn_walk_kind_, bool tbaa_p_, tree mask_) |
| : vr (vr_), last_vuse_ptr (last_vuse_ptr_), last_vuse (NULL_TREE), |
| mask (mask_), masked_result (NULL_TREE), vn_walk_kind (vn_walk_kind_), |
| tbaa_p (tbaa_p_), saved_operands (vNULL), first_set (-2), |
| first_base_set (-2), known_ranges (NULL) |
| { |
| if (!last_vuse_ptr) |
| last_vuse_ptr = &last_vuse; |
| ao_ref_init (&orig_ref, orig_ref_); |
| if (mask) |
| { |
| wide_int w = wi::to_wide (mask); |
| unsigned int pos = 0, prec = w.get_precision (); |
| pd_data pd; |
| pd.rhs = build_constructor (NULL_TREE, NULL); |
| /* When bitwise and with a constant is done on a memory load, |
| we don't really need all the bits to be defined or defined |
| to constants, we don't really care what is in the position |
| corresponding to 0 bits in the mask. |
| So, push the ranges of those 0 bits in the mask as artificial |
| zero stores and let the partial def handling code do the |
| rest. */ |
| while (pos < prec) |
| { |
| int tz = wi::ctz (w); |
| if (pos + tz > prec) |
| tz = prec - pos; |
| if (tz) |
| { |
| if (BYTES_BIG_ENDIAN) |
| pd.offset = prec - pos - tz; |
| else |
| pd.offset = pos; |
| pd.size = tz; |
| void *r = push_partial_def (pd, 0, 0, 0, prec); |
| gcc_assert (r == NULL_TREE); |
| } |
| pos += tz; |
| if (pos == prec) |
| break; |
| w = wi::lrshift (w, tz); |
| tz = wi::ctz (wi::bit_not (w)); |
| if (pos + tz > prec) |
| tz = prec - pos; |
| pos += tz; |
| w = wi::lrshift (w, tz); |
| } |
| } |
| } |
| ~vn_walk_cb_data (); |
| void *finish (alias_set_type, alias_set_type, tree); |
| void *push_partial_def (pd_data pd, |
| alias_set_type, alias_set_type, HOST_WIDE_INT, |
| HOST_WIDE_INT); |
| |
| vn_reference_t vr; |
| ao_ref orig_ref; |
| tree *last_vuse_ptr; |
| tree last_vuse; |
| tree mask; |
| tree masked_result; |
| vn_lookup_kind vn_walk_kind; |
| bool tbaa_p; |
| vec<vn_reference_op_s> saved_operands; |
| |
| /* The VDEFs of partial defs we come along. */ |
| auto_vec<pd_data, 2> partial_defs; |
| /* The first defs range to avoid splay tree setup in most cases. */ |
| pd_range first_range; |
| alias_set_type first_set; |
| alias_set_type first_base_set; |
| splay_tree known_ranges; |
| obstack ranges_obstack; |
| }; |
| |
| vn_walk_cb_data::~vn_walk_cb_data () |
| { |
| if (known_ranges) |
| { |
| splay_tree_delete (known_ranges); |
| obstack_free (&ranges_obstack, NULL); |
| } |
| saved_operands.release (); |
| } |
| |
| void * |
| vn_walk_cb_data::finish (alias_set_type set, alias_set_type base_set, tree val) |
| { |
| if (first_set != -2) |
| { |
| set = first_set; |
| base_set = first_base_set; |
| } |
| if (mask) |
| { |
| masked_result = val; |
| return (void *) -1; |
| } |
| vec<vn_reference_op_s> &operands |
| = saved_operands.exists () ? saved_operands : vr->operands; |
| return vn_reference_lookup_or_insert_for_pieces (last_vuse, set, base_set, |
| vr->type, operands, val); |
| } |
| |
| /* pd_range splay-tree helpers. */ |
| |
| static int |
| pd_range_compare (splay_tree_key offset1p, splay_tree_key offset2p) |
| { |
| HOST_WIDE_INT offset1 = *(HOST_WIDE_INT *)offset1p; |
| HOST_WIDE_INT offset2 = *(HOST_WIDE_INT *)offset2p; |
| if (offset1 < offset2) |
| return -1; |
| else if (offset1 > offset2) |
| return 1; |
| return 0; |
| } |
| |
| static void * |
| pd_tree_alloc (int size, void *data_) |
| { |
| vn_walk_cb_data *data = (vn_walk_cb_data *)data_; |
| return obstack_alloc (&data->ranges_obstack, size); |
| } |
| |
| static void |
| pd_tree_dealloc (void *, void *) |
| { |
| } |
| |
| /* Push PD to the vector of partial definitions returning a |
| value when we are ready to combine things with VUSE, SET and MAXSIZEI, |
| NULL when we want to continue looking for partial defs or -1 |
| on failure. */ |
| |
| void * |
| vn_walk_cb_data::push_partial_def (pd_data pd, |
| alias_set_type set, alias_set_type base_set, |
| HOST_WIDE_INT offseti, |
| HOST_WIDE_INT maxsizei) |
| { |
| const HOST_WIDE_INT bufsize = 64; |
| /* We're using a fixed buffer for encoding so fail early if the object |
| we want to interpret is bigger. */ |
| if (maxsizei > bufsize * BITS_PER_UNIT |
| || CHAR_BIT != 8 |
| || BITS_PER_UNIT != 8 |
| /* Not prepared to handle PDP endian. */ |
| || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) |
| return (void *)-1; |
| |
| /* Turn too large constant stores into non-constant stores. */ |
| if (CONSTANT_CLASS_P (pd.rhs) && pd.size > bufsize * BITS_PER_UNIT) |
| pd.rhs = error_mark_node; |
| |
| /* And for non-constant or CONSTRUCTOR stores shrink them to only keep at |
| most a partial byte before and/or after the region. */ |
| if (!CONSTANT_CLASS_P (pd.rhs)) |
| { |
| if (pd.offset < offseti) |
| { |
| HOST_WIDE_INT o = ROUND_DOWN (offseti - pd.offset, BITS_PER_UNIT); |
| gcc_assert (pd.size > o); |
| pd.size -= o; |
| pd.offset += o; |
| } |
| if (pd.size > maxsizei) |
| pd.size = maxsizei + ((pd.size - maxsizei) % BITS_PER_UNIT); |
| } |
| |
| pd.offset -= offseti; |
| |
| bool pd_constant_p = (TREE_CODE (pd.rhs) == CONSTRUCTOR |
| || CONSTANT_CLASS_P (pd.rhs)); |
| if (partial_defs.is_empty ()) |
| { |
| /* If we get a clobber upfront, fail. */ |
| if (TREE_CLOBBER_P (pd.rhs)) |
| return (void *)-1; |
| if (!pd_constant_p) |
| return (void *)-1; |
| partial_defs.safe_push (pd); |
| first_range.offset = pd.offset; |
| first_range.size = pd.size; |
| first_set = set; |
| first_base_set = base_set; |
| last_vuse_ptr = NULL; |
| /* Continue looking for partial defs. */ |
| return NULL; |
| } |
| |
| if (!known_ranges) |
| { |
| /* ??? Optimize the case where the 2nd partial def completes things. */ |
| gcc_obstack_init (&ranges_obstack); |
| known_ranges = splay_tree_new_with_allocator (pd_range_compare, 0, 0, |
| pd_tree_alloc, |
| pd_tree_dealloc, this); |
| splay_tree_insert (known_ranges, |
| (splay_tree_key)&first_range.offset, |
| (splay_tree_value)&first_range); |
| } |
| |
| pd_range newr = { pd.offset, pd.size }; |
| splay_tree_node n; |
| pd_range *r; |
| /* Lookup the predecessor of offset + 1 and see if we need to merge. */ |
| HOST_WIDE_INT loffset = newr.offset + 1; |
| if ((n = splay_tree_predecessor (known_ranges, (splay_tree_key)&loffset)) |
| && ((r = (pd_range *)n->value), true) |
| && ranges_known_overlap_p (r->offset, r->size + 1, |
| newr.offset, newr.size)) |
| { |
| /* Ignore partial defs already covered. Here we also drop shadowed |
| clobbers arriving here at the floor. */ |
| if (known_subrange_p (newr.offset, newr.size, r->offset, r->size)) |
| return NULL; |
| r->size = MAX (r->offset + r->size, newr.offset + newr.size) - r->offset; |
| } |
| else |
| { |
| /* newr.offset wasn't covered yet, insert the range. */ |
| r = XOBNEW (&ranges_obstack, pd_range); |
| *r = newr; |
| splay_tree_insert (known_ranges, (splay_tree_key)&r->offset, |
| (splay_tree_value)r); |
| } |
| /* Merge r which now contains newr and is a member of the splay tree with |
| adjacent overlapping ranges. */ |
| pd_range *rafter; |
| while ((n = splay_tree_successor (known_ranges, (splay_tree_key)&r->offset)) |
| && ((rafter = (pd_range *)n->value), true) |
| && ranges_known_overlap_p (r->offset, r->size + 1, |
| rafter->offset, rafter->size)) |
| { |
| r->size = MAX (r->offset + r->size, |
| rafter->offset + rafter->size) - r->offset; |
| splay_tree_remove (known_ranges, (splay_tree_key)&rafter->offset); |
| } |
| /* If we get a clobber, fail. */ |
| if (TREE_CLOBBER_P (pd.rhs)) |
| return (void *)-1; |
| /* Non-constants are OK as long as they are shadowed by a constant. */ |
| if (!pd_constant_p) |
| return (void *)-1; |
| partial_defs.safe_push (pd); |
| |
| /* Now we have merged newr into the range tree. When we have covered |
| [offseti, sizei] then the tree will contain exactly one node which has |
| the desired properties and it will be 'r'. */ |
| if (!known_subrange_p (0, maxsizei, r->offset, r->size)) |
| /* Continue looking for partial defs. */ |
| return NULL; |
| |
| /* Now simply native encode all partial defs in reverse order. */ |
| unsigned ndefs = partial_defs.length (); |
| /* We support up to 512-bit values (for V8DFmode). */ |
| unsigned char buffer[bufsize + 1]; |
| unsigned char this_buffer[bufsize + 1]; |
| int len; |
| |
| memset (buffer, 0, bufsize + 1); |
| unsigned needed_len = ROUND_UP (maxsizei, BITS_PER_UNIT) / BITS_PER_UNIT; |
| while (!partial_defs.is_empty ()) |
| { |
| pd_data pd = partial_defs.pop (); |
| unsigned int amnt; |
| if (TREE_CODE (pd.rhs) == CONSTRUCTOR) |
| { |
| /* Empty CONSTRUCTOR. */ |
| if (pd.size >= needed_len * BITS_PER_UNIT) |
| len = needed_len; |
| else |
| len = ROUND_UP (pd.size, BITS_PER_UNIT) / BITS_PER_UNIT; |
| memset (this_buffer, 0, len); |
| } |
| else |
| { |
| len = native_encode_expr (pd.rhs, this_buffer, bufsize, |
| MAX (0, -pd.offset) / BITS_PER_UNIT); |
| if (len <= 0 |
| || len < (ROUND_UP (pd.size, BITS_PER_UNIT) / BITS_PER_UNIT |
| - MAX (0, -pd.offset) / BITS_PER_UNIT)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Failed to encode %u " |
| "partial definitions\n", ndefs); |
| return (void *)-1; |
| } |
| } |
| |
| unsigned char *p = buffer; |
| HOST_WIDE_INT size = pd.size; |
| if (pd.offset < 0) |
| size -= ROUND_DOWN (-pd.offset, BITS_PER_UNIT); |
| this_buffer[len] = 0; |
| if (BYTES_BIG_ENDIAN) |
| { |
| /* LSB of this_buffer[len - 1] byte should be at |
| pd.offset + pd.size - 1 bits in buffer. */ |
| amnt = ((unsigned HOST_WIDE_INT) pd.offset |
| + pd.size) % BITS_PER_UNIT; |
| if (amnt) |
| shift_bytes_in_array_right (this_buffer, len + 1, amnt); |
| unsigned char *q = this_buffer; |
| unsigned int off = 0; |
| if (pd.offset >= 0) |
| { |
| unsigned int msk; |
| off = pd.offset / BITS_PER_UNIT; |
| gcc_assert (off < needed_len); |
| p = buffer + off; |
| if (size <= amnt) |
| { |
| msk = ((1 << size) - 1) << (BITS_PER_UNIT - amnt); |
| *p = (*p & ~msk) | (this_buffer[len] & msk); |
| size = 0; |
| } |
| else |
| { |
| if (TREE_CODE (pd.rhs) != CONSTRUCTOR) |
| q = (this_buffer + len |
| - (ROUND_UP (size - amnt, BITS_PER_UNIT) |
| / BITS_PER_UNIT)); |
| if (pd.offset % BITS_PER_UNIT) |
| { |
| msk = -1U << (BITS_PER_UNIT |
| - (pd.offset % BITS_PER_UNIT)); |
| *p = (*p & msk) | (*q & ~msk); |
| p++; |
| q++; |
| off++; |
| size -= BITS_PER_UNIT - (pd.offset % BITS_PER_UNIT); |
| gcc_assert (size >= 0); |
| } |
| } |
| } |
| else if (TREE_CODE (pd.rhs) != CONSTRUCTOR) |
| { |
| q = (this_buffer + len |
| - (ROUND_UP (size - amnt, BITS_PER_UNIT) |
| / BITS_PER_UNIT)); |
| if (pd.offset % BITS_PER_UNIT) |
| { |
| q++; |
| size -= BITS_PER_UNIT - ((unsigned HOST_WIDE_INT) pd.offset |
| % BITS_PER_UNIT); |
| gcc_assert (size >= 0); |
| } |
| } |
| if ((unsigned HOST_WIDE_INT) size / BITS_PER_UNIT + off |
| > needed_len) |
| size = (needed_len - off) * BITS_PER_UNIT; |
| memcpy (p, q, size / BITS_PER_UNIT); |
| if (size % BITS_PER_UNIT) |
| { |
| unsigned int msk |
| = -1U << (BITS_PER_UNIT - (size % BITS_PER_UNIT)); |
| p += size / BITS_PER_UNIT; |
| q += size / BITS_PER_UNIT; |
| *p = (*q & msk) | (*p & ~msk); |
| } |
| } |
| else |
| { |
| if (pd.offset >= 0) |
| { |
| /* LSB of this_buffer[0] byte should be at pd.offset bits |
| in buffer. */ |
| unsigned int msk; |
| size = MIN (size, (HOST_WIDE_INT) needed_len * BITS_PER_UNIT); |
| amnt = pd.offset % BITS_PER_UNIT; |
| if (amnt) |
| shift_bytes_in_array_left (this_buffer, len + 1, amnt); |
| unsigned int off = pd.offset / BITS_PER_UNIT; |
| gcc_assert (off < needed_len); |
| size = MIN (size, |
| (HOST_WIDE_INT) (needed_len - off) * BITS_PER_UNIT); |
| p = buffer + off; |
| if (amnt + size < BITS_PER_UNIT) |
| { |
| /* Low amnt bits come from *p, then size bits |
| from this_buffer[0] and the remaining again from |
| *p. */ |
| msk = ((1 << size) - 1) << amnt; |
| *p = (*p & ~msk) | (this_buffer[0] & msk); |
| size = 0; |
| } |
| else if (amnt) |
| { |
| msk = -1U << amnt; |
| *p = (*p & ~msk) | (this_buffer[0] & msk); |
| p++; |
| size -= (BITS_PER_UNIT - amnt); |
| } |
| } |
| else |
| { |
| amnt = (unsigned HOST_WIDE_INT) pd.offset % BITS_PER_UNIT; |
| if (amnt) |
| size -= BITS_PER_UNIT - amnt; |
| size = MIN (size, (HOST_WIDE_INT) needed_len * BITS_PER_UNIT); |
| if (amnt) |
| shift_bytes_in_array_left (this_buffer, len + 1, amnt); |
| } |
| memcpy (p, this_buffer + (amnt != 0), size / BITS_PER_UNIT); |
| p += size / BITS_PER_UNIT; |
| if (size % BITS_PER_UNIT) |
| { |
| unsigned int msk = -1U << (size % BITS_PER_UNIT); |
| *p = (this_buffer[(amnt != 0) + size / BITS_PER_UNIT] |
| & ~msk) | (*p & msk); |
| } |
| } |
| } |
| |
| tree type = vr->type; |
| /* Make sure to interpret in a type that has a range covering the whole |
| access size. */ |
| if (INTEGRAL_TYPE_P (vr->type) && maxsizei != TYPE_PRECISION (vr->type)) |
| type = build_nonstandard_integer_type (maxsizei, TYPE_UNSIGNED (type)); |
| tree val; |
| if (BYTES_BIG_ENDIAN) |
| { |
| unsigned sz = needed_len; |
| if (maxsizei % BITS_PER_UNIT) |
| shift_bytes_in_array_right (buffer, needed_len, |
| BITS_PER_UNIT |
| - (maxsizei % BITS_PER_UNIT)); |
| if (INTEGRAL_TYPE_P (type)) |
| sz = GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (type)); |
| if (sz > needed_len) |
| { |
| memcpy (this_buffer + (sz - needed_len), buffer, needed_len); |
| val = native_interpret_expr (type, this_buffer, sz); |
| } |
| else |
| val = native_interpret_expr (type, buffer, needed_len); |
| } |
| else |
| val = native_interpret_expr (type, buffer, bufsize); |
| /* If we chop off bits because the types precision doesn't match the memory |
| access size this is ok when optimizing reads but not when called from |
| the DSE code during elimination. */ |
| if (val && type != vr->type) |
| { |
| if (! int_fits_type_p (val, vr->type)) |
| val = NULL_TREE; |
| else |
| val = fold_convert (vr->type, val); |
| } |
| |
| if (val) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, |
| "Successfully combined %u partial definitions\n", ndefs); |
| /* We are using the alias-set of the first store we encounter which |
| should be appropriate here. */ |
| return finish (first_set, first_base_set, val); |
| } |
| else |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, |
| "Failed to interpret %u encoded partial definitions\n", ndefs); |
| return (void *)-1; |
| } |
| } |
| |
| /* Callback for walk_non_aliased_vuses. Adjusts the vn_reference_t VR_ |
| with the current VUSE and performs the expression lookup. */ |
| |
| static void * |
| vn_reference_lookup_2 (ao_ref *op ATTRIBUTE_UNUSED, tree vuse, void *data_) |
| { |
| vn_walk_cb_data *data = (vn_walk_cb_data *)data_; |
| vn_reference_t vr = data->vr; |
| vn_reference_s **slot; |
| hashval_t hash; |
| |
| /* If we have partial definitions recorded we have to go through |
| vn_reference_lookup_3. */ |
| if (!data->partial_defs.is_empty ()) |
| return NULL; |
| |
| if (data->last_vuse_ptr) |
| { |
| *data->last_vuse_ptr = vuse; |
| data->last_vuse = vuse; |
| } |
| |
| /* Fixup vuse and hash. */ |
| if (vr->vuse) |
| vr->hashcode = vr->hashcode - SSA_NAME_VERSION (vr->vuse); |
| vr->vuse = vuse_ssa_val (vuse); |
| if (vr->vuse) |
| vr->hashcode = vr->hashcode + SSA_NAME_VERSION (vr->vuse); |
| |
| hash = vr->hashcode; |
| slot = valid_info->references->find_slot_with_hash (vr, hash, NO_INSERT); |
| if (slot) |
| { |
| if ((*slot)->result && data->saved_operands.exists ()) |
| return data->finish (vr->set, vr->base_set, (*slot)->result); |
| return *slot; |
| } |
| |
| return NULL; |
| } |
| |
| /* Lookup an existing or insert a new vn_reference entry into the |
| value table for the VUSE, SET, TYPE, OPERANDS reference which |
| has the value VALUE which is either a constant or an SSA name. */ |
| |
| static vn_reference_t |
| vn_reference_lookup_or_insert_for_pieces (tree vuse, |
| alias_set_type set, |
| alias_set_type base_set, |
| tree type, |
| vec<vn_reference_op_s, |
| va_heap> operands, |
| tree value) |
| { |
| vn_reference_s vr1; |
| vn_reference_t result; |
| unsigned value_id; |
| vr1.vuse = vuse ? SSA_VAL (vuse) : NULL_TREE; |
| vr1.operands = operands; |
| vr1.type = type; |
| vr1.set = set; |
| vr1.base_set = base_set; |
| vr1.hashcode = vn_reference_compute_hash (&vr1); |
| if (vn_reference_lookup_1 (&vr1, &result)) |
| return result; |
| if (TREE_CODE (value) == SSA_NAME) |
| value_id = VN_INFO (value)->value_id; |
| else |
| value_id = get_or_alloc_constant_value_id (value); |
| return vn_reference_insert_pieces (vuse, set, base_set, type, |
| operands.copy (), value, value_id); |
| } |
| |
| /* Return a value-number for RCODE OPS... either by looking up an existing |
| value-number for the possibly simplified result or by inserting the |
| operation if INSERT is true. If SIMPLIFY is false, return a value |
| number for the unsimplified expression. */ |
| |
| static tree |
| vn_nary_build_or_lookup_1 (gimple_match_op *res_op, bool insert, |
| bool simplify) |
| { |
| tree result = NULL_TREE; |
| /* We will be creating a value number for |
| RCODE (OPS...). |
| So first simplify and lookup this expression to see if it |
| is already available. */ |
| /* For simplification valueize. */ |
| unsigned i = 0; |
| if (simplify) |
| for (i = 0; i < res_op->num_ops; ++i) |
| if (TREE_CODE (res_op->ops[i]) == SSA_NAME) |
| { |
| tree tem = vn_valueize (res_op->ops[i]); |
| if (!tem) |
| break; |
| res_op->ops[i] = tem; |
| } |
| /* If valueization of an operand fails (it is not available), skip |
| simplification. */ |
| bool res = false; |
| if (i == res_op->num_ops) |
| { |
| mprts_hook = vn_lookup_simplify_result; |
| res = res_op->resimplify (NULL, vn_valueize); |
| mprts_hook = NULL; |
| } |
| gimple *new_stmt = NULL; |
| if (res |
| && gimple_simplified_result_is_gimple_val (res_op)) |
| { |
| /* The expression is already available. */ |
| result = res_op->ops[0]; |
| /* Valueize it, simplification returns sth in AVAIL only. */ |
| if (TREE_CODE (result) == SSA_NAME) |
| result = SSA_VAL (result); |
| } |
| else |
| { |
| tree val = vn_lookup_simplify_result (res_op); |
| if (!val && insert) |
| { |
| gimple_seq stmts = NULL; |
| result = maybe_push_res_to_seq (res_op, &stmts); |
| if (result) |
| { |
| gcc_assert (gimple_seq_singleton_p (stmts)); |
| new_stmt = gimple_seq_first_stmt (stmts); |
| } |
| } |
| else |
| /* The expression is already available. */ |
| result = val; |
| } |
| if (new_stmt) |
| { |
| /* The expression is not yet available, value-number lhs to |
| the new SSA_NAME we created. */ |
| /* Initialize value-number information properly. */ |
| vn_ssa_aux_t result_info = VN_INFO (result); |
| result_info->valnum = result; |
| result_info->value_id = get_next_value_id (); |
| result_info->visited = 1; |
| gimple_seq_add_stmt_without_update (&VN_INFO (result)->expr, |
| new_stmt); |
| result_info->needs_insertion = true; |
| /* ??? PRE phi-translation inserts NARYs without corresponding |
| SSA name result. Re-use those but set their result according |
| to the stmt we just built. */ |
| vn_nary_op_t nary = NULL; |
| vn_nary_op_lookup_stmt (new_stmt, &nary); |
| if (nary) |
| { |
| gcc_assert (! nary->predicated_values && nary->u.result == NULL_TREE); |
| nary->u.result = gimple_assign_lhs (new_stmt); |
| } |
| /* As all "inserted" statements are singleton SCCs, insert |
| to the valid table. This is strictly needed to |
| avoid re-generating new value SSA_NAMEs for the same |
| expression during SCC iteration over and over (the |
| optimistic table gets cleared after each iteration). |
| We do not need to insert into the optimistic table, as |
| lookups there will fall back to the valid table. */ |
| else |
| { |
| unsigned int length = vn_nary_length_from_stmt (new_stmt); |
| vn_nary_op_t vno1 |
| = alloc_vn_nary_op_noinit (length, &vn_tables_insert_obstack); |
| vno1->value_id = result_info->value_id; |
| vno1->length = length; |
| vno1->predicated_values = 0; |
| vno1->u.result = result; |
| init_vn_nary_op_from_stmt (vno1, as_a <gassign *> (new_stmt)); |
| vn_nary_op_insert_into (vno1, valid_info->nary, true); |
| /* Also do not link it into the undo chain. */ |
| last_inserted_nary = vno1->next; |
| vno1->next = (vn_nary_op_t)(void *)-1; |
| } |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Inserting name "); |
| print_generic_expr (dump_file, result); |
| fprintf (dump_file, " for expression "); |
| print_gimple_expr (dump_file, new_stmt, 0, TDF_SLIM); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| return result; |
| } |
| |
| /* Return a value-number for RCODE OPS... either by looking up an existing |
| value-number for the simplified result or by inserting the operation. */ |
| |
| static tree |
| vn_nary_build_or_lookup (gimple_match_op *res_op) |
| { |
| return vn_nary_build_or_lookup_1 (res_op, true, true); |
| } |
| |
| /* Try to simplify the expression RCODE OPS... of type TYPE and return |
| its value if present. */ |
| |
| tree |
| vn_nary_simplify (vn_nary_op_t nary) |
| { |
| if (nary->length > gimple_match_op::MAX_NUM_OPS) |
| return NULL_TREE; |
| gimple_match_op op (gimple_match_cond::UNCOND, nary->opcode, |
| nary->type, nary->length); |
| memcpy (op.ops, nary->op, sizeof (tree) * nary->length); |
| return vn_nary_build_or_lookup_1 (&op, false, true); |
| } |
| |
| /* Elimination engine. */ |
| |
| class eliminate_dom_walker : public dom_walker |
| { |
| public: |
| eliminate_dom_walker (cdi_direction, bitmap); |
| ~eliminate_dom_walker (); |
| |
| virtual edge before_dom_children (basic_block); |
| virtual void after_dom_children (basic_block); |
| |
| virtual tree eliminate_avail (basic_block, tree op); |
| virtual void eliminate_push_avail (basic_block, tree op); |
| tree eliminate_insert (basic_block, gimple_stmt_iterator *gsi, tree val); |
| |
| void eliminate_stmt (basic_block, gimple_stmt_iterator *); |
| |
| unsigned eliminate_cleanup (bool region_p = false); |
| |
| bool do_pre; |
| unsigned int el_todo; |
| unsigned int eliminations; |
| unsigned int insertions; |
| |
| /* SSA names that had their defs inserted by PRE if do_pre. */ |
| bitmap inserted_exprs; |
| |
| /* Blocks with statements that have had their EH properties changed. */ |
| bitmap need_eh_cleanup; |
| |
| /* Blocks with statements that have had their AB properties changed. */ |
| bitmap need_ab_cleanup; |
| |
| /* Local state for the eliminate domwalk. */ |
| auto_vec<gimple *> to_remove; |
| auto_vec<gimple *> to_fixup; |
| auto_vec<tree> avail; |
| auto_vec<tree> avail_stack; |
| }; |
| |
| /* Adaptor to the elimination engine using RPO availability. */ |
| |
| class rpo_elim : public eliminate_dom_walker |
| { |
| public: |
| rpo_elim(basic_block entry_) |
| : eliminate_dom_walker (CDI_DOMINATORS, NULL), entry (entry_), |
| m_avail_freelist (NULL) {} |
| |
| virtual tree eliminate_avail (basic_block, tree op); |
| |
| virtual void eliminate_push_avail (basic_block, tree); |
| |
| basic_block entry; |
| /* Freelist of avail entries which are allocated from the vn_ssa_aux |
| obstack. */ |
| vn_avail *m_avail_freelist; |
| }; |
| |
| /* Global RPO state for access from hooks. */ |
| static eliminate_dom_walker *rpo_avail; |
| basic_block vn_context_bb; |
| |
| /* Return true if BASE1 and BASE2 can be adjusted so they have the |
| same address and adjust *OFFSET1 and *OFFSET2 accordingly. |
| Otherwise return false. */ |
| |
| static bool |
| adjust_offsets_for_equal_base_address (tree base1, poly_int64 *offset1, |
| tree base2, poly_int64 *offset2) |
| { |
| poly_int64 soff; |
| if (TREE_CODE (base1) == MEM_REF |
| && TREE_CODE (base2) == MEM_REF) |
| { |
| if (mem_ref_offset (base1).to_shwi (&soff)) |
| { |
| base1 = TREE_OPERAND (base1, 0); |
| *offset1 += soff * BITS_PER_UNIT; |
| } |
| if (mem_ref_offset (base2).to_shwi (&soff)) |
| { |
| base2 = TREE_OPERAND (base2, 0); |
| *offset2 += soff * BITS_PER_UNIT; |
| } |
| return operand_equal_p (base1, base2, 0); |
| } |
| return operand_equal_p (base1, base2, OEP_ADDRESS_OF); |
| } |
| |
| /* Callback for walk_non_aliased_vuses. Tries to perform a lookup |
| from the statement defining VUSE and if not successful tries to |
| translate *REFP and VR_ through an aggregate copy at the definition |
| of VUSE. If *DISAMBIGUATE_ONLY is true then do not perform translation |
| of *REF and *VR. If only disambiguation was performed then |
| *DISAMBIGUATE_ONLY is set to true. */ |
| |
| static void * |
| vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *data_, |
| translate_flags *disambiguate_only) |
| { |
| vn_walk_cb_data *data = (vn_walk_cb_data *)data_; |
| vn_reference_t vr = data->vr; |
| gimple *def_stmt = SSA_NAME_DEF_STMT (vuse); |
| tree base = ao_ref_base (ref); |
| HOST_WIDE_INT offseti = 0, maxsizei, sizei = 0; |
| static vec<vn_reference_op_s> lhs_ops; |
| ao_ref lhs_ref; |
| bool lhs_ref_ok = false; |
| poly_int64 copy_size; |
| |
| /* First try to disambiguate after value-replacing in the definitions LHS. */ |
| if (is_gimple_assign (def_stmt)) |
| { |
| tree lhs = gimple_assign_lhs (def_stmt); |
| bool valueized_anything = false; |
| /* Avoid re-allocation overhead. */ |
| lhs_ops.truncate (0); |
| basic_block saved_rpo_bb = vn_context_bb; |
| vn_context_bb = gimple_bb (def_stmt); |
| if (*disambiguate_only <= TR_VALUEIZE_AND_DISAMBIGUATE) |
| { |
| copy_reference_ops_from_ref (lhs, &lhs_ops); |
| valueize_refs_1 (&lhs_ops, &valueized_anything, true); |
| } |
| vn_context_bb = saved_rpo_bb; |
| ao_ref_init (&lhs_ref, lhs); |
| lhs_ref_ok = true; |
| if (valueized_anything |
| && ao_ref_init_from_vn_reference |
| (&lhs_ref, ao_ref_alias_set (&lhs_ref), |
| ao_ref_base_alias_set (&lhs_ref), TREE_TYPE (lhs), lhs_ops) |
| && !refs_may_alias_p_1 (ref, &lhs_ref, data->tbaa_p)) |
| { |
| *disambiguate_only = TR_VALUEIZE_AND_DISAMBIGUATE; |
| return NULL; |
| } |
| |
| /* Besides valueizing the LHS we can also use access-path based |
| disambiguation on the original non-valueized ref. */ |
| if (!ref->ref |
| && lhs_ref_ok |
| && data->orig_ref.ref) |
| { |
| /* We want to use the non-valueized LHS for this, but avoid redundant |
| work. */ |
| ao_ref *lref = &lhs_ref; |
| ao_ref lref_alt; |
| if (valueized_anything) |
| { |
| ao_ref_init (&lref_alt, lhs); |
| lref = &lref_alt; |
| } |
| if (!refs_may_alias_p_1 (&data->orig_ref, lref, data->tbaa_p)) |
| { |
| *disambiguate_only = (valueized_anything |
| ? TR_VALUEIZE_AND_DISAMBIGUATE |
| : TR_DISAMBIGUATE); |
| return NULL; |
| } |
| } |
| |
| /* If we reach a clobbering statement try to skip it and see if |
| we find a VN result with exactly the same value as the |
| possible clobber. In this case we can ignore the clobber |
| and return the found value. */ |
| if (is_gimple_reg_type (TREE_TYPE (lhs)) |
| && types_compatible_p (TREE_TYPE (lhs), vr->type) |
| && (ref->ref || data->orig_ref.ref)) |
| { |
| tree *saved_last_vuse_ptr = data->last_vuse_ptr; |
| /* Do not update last_vuse_ptr in vn_reference_lookup_2. */ |
| data->last_vuse_ptr = NULL; |
| tree saved_vuse = vr->vuse; |
| hashval_t saved_hashcode = vr->hashcode; |
| void *res = vn_reference_lookup_2 (ref, gimple_vuse (def_stmt), data); |
| /* Need to restore vr->vuse and vr->hashcode. */ |
| vr->vuse = saved_vuse; |
| vr->hashcode = saved_hashcode; |
| data->last_vuse_ptr = saved_last_vuse_ptr; |
| if (res && res != (void *)-1) |
| { |
| vn_reference_t vnresult = (vn_reference_t) res; |
| tree rhs = gimple_assign_rhs1 (def_stmt); |
| if (TREE_CODE (rhs) == SSA_NAME) |
| rhs = SSA_VAL (rhs); |
| if (vnresult->result |
| && operand_equal_p (vnresult->result, rhs, 0) |
| /* We have to honor our promise about union type punning |
| and also support arbitrary overlaps with |
| -fno-strict-aliasing. So simply resort to alignment to |
| rule out overlaps. Do this check last because it is |
| quite expensive compared to the hash-lookup above. */ |
| && multiple_p (get_object_alignment |
| (ref->ref ? ref->ref : data->orig_ref.ref), |
| ref->size) |
| && multiple_p (get_object_alignment (lhs), ref->size)) |
| return res; |
| } |
| } |
| } |
| else if (*disambiguate_only <= TR_VALUEIZE_AND_DISAMBIGUATE |
| && gimple_call_builtin_p (def_stmt, BUILT_IN_NORMAL) |
| && gimple_call_num_args (def_stmt) <= 4) |
| { |
| /* For builtin calls valueize its arguments and call the |
| alias oracle again. Valueization may improve points-to |
| info of pointers and constify size and position arguments. |
| Originally this was motivated by PR61034 which has |
| conditional calls to free falsely clobbering ref because |
| of imprecise points-to info of the argument. */ |
| tree oldargs[4]; |
| bool valueized_anything = false; |
| for (unsigned i = 0; i < gimple_call_num_args (def_stmt); ++i) |
| { |
| oldargs[i] = gimple_call_arg (def_stmt, i); |
| tree val = vn_valueize (oldargs[i]); |
| if (val != oldargs[i]) |
| { |
| gimple_call_set_arg (def_stmt, i, val); |
| valueized_anything = true; |
| } |
| } |
| if (valueized_anything) |
| { |
| bool res = call_may_clobber_ref_p_1 (as_a <gcall *> (def_stmt), |
| ref, data->tbaa_p); |
| for (unsigned i = 0; i < gimple_call_num_args (def_stmt); ++i) |
| gimple_call_set_arg (def_stmt, i, oldargs[i]); |
| if (!res) |
| { |
| *disambiguate_only = TR_VALUEIZE_AND_DISAMBIGUATE; |
| return NULL; |
| } |
| } |
| } |
| |
| if (*disambiguate_only > TR_TRANSLATE) |
| return (void *)-1; |
| |
| /* If we cannot constrain the size of the reference we cannot |
| test if anything kills it. */ |
| if (!ref->max_size_known_p ()) |
| return (void *)-1; |
| |
| poly_int64 offset = ref->offset; |
| poly_int64 maxsize = ref->max_size; |
| |
| /* def_stmt may-defs *ref. See if we can derive a value for *ref |
| from that definition. |
| 1) Memset. */ |
| if (is_gimple_reg_type (vr->type) |
| && (gimple_call_builtin_p (def_stmt, BUILT_IN_MEMSET) |
| || gimple_call_builtin_p (def_stmt, BUILT_IN_MEMSET_CHK)) |
| && (integer_zerop (gimple_call_arg (def_stmt, 1)) |
| || ((TREE_CODE (gimple_call_arg (def_stmt, 1)) == INTEGER_CST |
| || (INTEGRAL_TYPE_P (vr->type) && known_eq (ref->size, 8))) |
| && CHAR_BIT == 8 |
| && BITS_PER_UNIT == 8 |
| && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN |
| && offset.is_constant (&offseti) |
| && ref->size.is_constant (&sizei) |
| && (offseti % BITS_PER_UNIT == 0 |
| || TREE_CODE (gimple_call_arg (def_stmt, 1)) == INTEGER_CST))) |
| && (poly_int_tree_p (gimple_call_arg (def_stmt, 2)) |
| || (TREE_CODE (gimple_call_arg (def_stmt, 2)) == SSA_NAME |
| && poly_int_tree_p (SSA_VAL (gimple_call_arg (def_stmt, 2))))) |
| && (TREE_CODE (gimple_call_arg (def_stmt, 0)) == ADDR_EXPR |
| || TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME)) |
| { |
| tree base2; |
| poly_int64 offset2, size2, maxsize2; |
| bool reverse; |
| tree ref2 = gimple_call_arg (def_stmt, 0); |
| if (TREE_CODE (ref2) == SSA_NAME) |
| { |
| ref2 = SSA_VAL (ref2); |
| if (TREE_CODE (ref2) == SSA_NAME |
| && (TREE_CODE (base) != MEM_REF |
| || TREE_OPERAND (base, 0) != ref2)) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (ref2); |
| if (gimple_assign_single_p (def_stmt) |
| && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR) |
| ref2 = gimple_assign_rhs1 (def_stmt); |
| } |
| } |
| if (TREE_CODE (ref2) == ADDR_EXPR) |
| { |
| ref2 = TREE_OPERAND (ref2, 0); |
| base2 = get_ref_base_and_extent (ref2, &offset2, &size2, &maxsize2, |
| &reverse); |
| if (!known_size_p (maxsize2) |
| || !known_eq (maxsize2, size2) |
| || !operand_equal_p (base, base2, OEP_ADDRESS_OF)) |
| return (void *)-1; |
| } |
| else if (TREE_CODE (ref2) == SSA_NAME) |
| { |
| poly_int64 soff; |
| if (TREE_CODE (base) != MEM_REF |
| || !(mem_ref_offset (base) |
| << LOG2_BITS_PER_UNIT).to_shwi (&soff)) |
| return (void *)-1; |
| offset += soff; |
| offset2 = 0; |
| if (TREE_OPERAND (base, 0) != ref2) |
| { |
| gimple *def = SSA_NAME_DEF_STMT (ref2); |
| if (is_gimple_assign (def) |
| && gimple_assign_rhs_code (def) == POINTER_PLUS_EXPR |
| && gimple_assign_rhs1 (def) == TREE_OPERAND (base, 0) |
| && poly_int_tree_p (gimple_assign_rhs2 (def))) |
| { |
| tree rhs2 = gimple_assign_rhs2 (def); |
| if (!(poly_offset_int::from (wi::to_poly_wide (rhs2), |
| SIGNED) |
| << LOG2_BITS_PER_UNIT).to_shwi (&offset2)) |
| return (void *)-1; |
| ref2 = gimple_assign_rhs1 (def); |
| if (TREE_CODE (ref2) == SSA_NAME) |
| ref2 = SSA_VAL (ref2); |
| } |
| else |
| return (void *)-1; |
| } |
| } |
| else |
| return (void *)-1; |
| tree len = gimple_call_arg (def_stmt, 2); |
| HOST_WIDE_INT leni, offset2i; |
| if (TREE_CODE (len) == SSA_NAME) |
| len = SSA_VAL (len); |
| /* Sometimes the above trickery is smarter than alias analysis. Take |
| advantage of that. */ |
| if (!ranges_maybe_overlap_p (offset, maxsize, offset2, |
| (wi::to_poly_offset (len) |
| << LOG2_BITS_PER_UNIT))) |
| return NULL; |
| if (data->partial_defs.is_empty () |
| && known_subrange_p (offset, maxsize, offset2, |
| wi::to_poly_offset (len) << LOG2_BITS_PER_UNIT)) |
| { |
| tree val; |
| if (integer_zerop (gimple_call_arg (def_stmt, 1))) |
| val = build_zero_cst (vr->type); |
| else if (INTEGRAL_TYPE_P (vr->type) |
| && known_eq (ref->size, 8) |
| && offseti % BITS_PER_UNIT == 0) |
| { |
| gimple_match_op res_op (gimple_match_cond::UNCOND, NOP_EXPR, |
| vr->type, gimple_call_arg (def_stmt, 1)); |
| val = vn_nary_build_or_lookup (&res_op); |
| if (!val |
| || (TREE_CODE (val) == SSA_NAME |
| && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))) |
| return (void *)-1; |
| } |
| else |
| { |
| unsigned buflen = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (vr->type)) + 1; |
| if (INTEGRAL_TYPE_P (vr->type)) |
| buflen = GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (vr->type)) + 1; |
| unsigned char *buf = XALLOCAVEC (unsigned char, buflen); |
| memset (buf, TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 1)), |
| buflen); |
| if (BYTES_BIG_ENDIAN) |
| { |
| unsigned int amnt |
| = (((unsigned HOST_WIDE_INT) offseti + sizei) |
| % BITS_PER_UNIT); |
| if (amnt) |
| { |
| shift_bytes_in_array_right (buf, buflen, |
| BITS_PER_UNIT - amnt); |
| buf++; |
| buflen--; |
| } |
| } |
| else if (offseti % BITS_PER_UNIT != 0) |
| { |
| unsigned int amnt |
| = BITS_PER_UNIT - ((unsigned HOST_WIDE_INT) offseti |
| % BITS_PER_UNIT); |
| shift_bytes_in_array_left (buf, buflen, amnt); |
| buf++; |
| buflen--; |
| } |
| val = native_interpret_expr (vr->type, buf, buflen); |
| if (!val) |
| return (void *)-1; |
| } |
| return data->finish (0, 0, val); |
| } |
| /* For now handle clearing memory with partial defs. */ |
| else if (known_eq (ref->size, maxsize) |
| && integer_zerop (gimple_call_arg (def_stmt, 1)) |
| && tree_fits_poly_int64_p (len) |
| && tree_to_poly_int64 (len).is_constant (&leni) |
| && leni <= INTTYPE_MAXIMUM (HOST_WIDE_INT) / BITS_PER_UNIT |
| && offset.is_constant (&offseti) |
| && offset2.is_constant (&offset2i) |
| && maxsize.is_constant (&maxsizei) |
| && ranges_known_overlap_p (offseti, maxsizei, offset2i, |
| leni << LOG2_BITS_PER_UNIT)) |
| { |
| pd_data pd; |
| pd.rhs = build_constructor (NULL_TREE, NULL); |
| pd.offset = offset2i; |
| pd.size = leni << LOG2_BITS_PER_UNIT; |
| return data->push_partial_def (pd, 0, 0, offseti, maxsizei); |
| } |
| } |
| |
| /* 2) Assignment from an empty CONSTRUCTOR. */ |
| else if (is_gimple_reg_type (vr->type) |
| && gimple_assign_single_p (def_stmt) |
| && gimple_assign_rhs_code (def_stmt) == CONSTRUCTOR |
| && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (def_stmt)) == 0) |
| { |
| tree base2; |
| poly_int64 offset2, size2, maxsize2; |
| HOST_WIDE_INT offset2i, size2i; |
| gcc_assert (lhs_ref_ok); |
| base2 = ao_ref_base (&lhs_ref); |
| offset2 = lhs_ref.offset; |
| size2 = lhs_ref.size; |
| maxsize2 = lhs_ref.max_size; |
| if (known_size_p (maxsize2) |
| && known_eq (maxsize2, size2) |
| && adjust_offsets_for_equal_base_address (base, &offset, |
| base2, &offset2)) |
| { |
| if (data->partial_defs.is_empty () |
| && known_subrange_p (offset, maxsize, offset2, size2)) |
| { |
| /* While technically undefined behavior do not optimize |
| a full read from a clobber. */ |
| if (gimple_clobber_p (def_stmt)) |
| return (void *)-1; |
| tree val = build_zero_cst (vr->type); |
| return data->finish (ao_ref_alias_set (&lhs_ref), |
| ao_ref_base_alias_set (&lhs_ref), val); |
| } |
| else if (known_eq (ref->size, maxsize) |
| && maxsize.is_constant (&maxsizei) |
| && offset.is_constant (&offseti) |
| && offset2.is_constant (&offset2i) |
| && size2.is_constant (&size2i) |
| && ranges_known_overlap_p (offseti, maxsizei, |
| offset2i, size2i)) |
| { |
| /* Let clobbers be consumed by the partial-def tracker |
| which can choose to ignore them if they are shadowed |
| by a later def. */ |
| pd_data pd; |
| pd.rhs = gimple_assign_rhs1 (def_stmt); |
| pd.offset = offset2i; |
| pd.size = size2i; |
| return data->push_partial_def (pd, ao_ref_alias_set (&lhs_ref), |
| ao_ref_base_alias_set (&lhs_ref), |
| offseti, maxsizei); |
| } |
| } |
| } |
| |
| /* 3) Assignment from a constant. We can use folds native encode/interpret |
| routines to extract the assigned bits. */ |
| else if (known_eq (ref->size, maxsize) |
| && is_gimple_reg_type (vr->type) |
| && !reverse_storage_order_for_component_p (vr->operands) |
| && !contains_storage_order_barrier_p (vr->operands) |
| && gimple_assign_single_p (def_stmt) |
| && CHAR_BIT == 8 |
| && BITS_PER_UNIT == 8 |
| && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN |
| /* native_encode and native_decode operate on arrays of bytes |
| and so fundamentally need a compile-time size and offset. */ |
| && maxsize.is_constant (&maxsizei) |
| && offset.is_constant (&offseti) |
| && (is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)) |
| || (TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME |
| && is_gimple_min_invariant (SSA_VAL (gimple_assign_rhs1 (def_stmt)))))) |
| { |
| tree lhs = gimple_assign_lhs (def_stmt); |
| tree base2; |
| poly_int64 offset2, size2, maxsize2; |
| HOST_WIDE_INT offset2i, size2i; |
| bool reverse; |
| gcc_assert (lhs_ref_ok); |
| base2 = ao_ref_base (&lhs_ref); |
| offset2 = lhs_ref.offset; |
| size2 = lhs_ref.size; |
| maxsize2 = lhs_ref.max_size; |
| reverse = reverse_storage_order_for_component_p (lhs); |
| if (base2 |
| && !reverse |
| && !storage_order_barrier_p (lhs) |
| && known_eq (maxsize2, size2) |
| && adjust_offsets_for_equal_base_address (base, &offset, |
| base2, &offset2) |
| && offset.is_constant (&offseti) |
| && offset2.is_constant (&offset2i) |
| && size2.is_constant (&size2i)) |
| { |
| if (data->partial_defs.is_empty () |
| && known_subrange_p (offseti, maxsizei, offset2, size2)) |
| { |
| /* We support up to 512-bit values (for V8DFmode). */ |
| unsigned char buffer[65]; |
| int len; |
| |
| tree rhs = gimple_assign_rhs1 (def_stmt); |
| if (TREE_CODE (rhs) == SSA_NAME) |
| rhs = SSA_VAL (rhs); |
| len = native_encode_expr (rhs, |
| buffer, sizeof (buffer) - 1, |
| (offseti - offset2i) / BITS_PER_UNIT); |
| if (len > 0 && len * BITS_PER_UNIT >= maxsizei) |
| { |
| tree type = vr->type; |
| unsigned char *buf = buffer; |
| unsigned int amnt = |