| /* Reassociation for trees. |
| Copyright (C) 2005-2021 Free Software Foundation, Inc. |
| Contributed by Daniel Berlin <dan@dberlin.org> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "alloc-pool.h" |
| #include "tree-pass.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "gimple-pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "cfganal.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "tree-cfg.h" |
| #include "tree-ssa-loop.h" |
| #include "flags.h" |
| #include "tree-ssa.h" |
| #include "langhooks.h" |
| #include "cfgloop.h" |
| #include "builtins.h" |
| #include "gimplify.h" |
| #include "case-cfn-macros.h" |
| #include "tree-ssa-reassoc.h" |
| #include "tree-ssa-math-opts.h" |
| #include "gimple-range.h" |
| |
| /* This is a simple global reassociation pass. It is, in part, based |
| on the LLVM pass of the same name (They do some things more/less |
| than we do, in different orders, etc). |
| |
| It consists of five steps: |
| |
| 1. Breaking up subtract operations into addition + negate, where |
| it would promote the reassociation of adds. |
| |
| 2. Left linearization of the expression trees, so that (A+B)+(C+D) |
| becomes (((A+B)+C)+D), which is easier for us to rewrite later. |
| During linearization, we place the operands of the binary |
| expressions into a vector of operand_entry_* |
| |
| 3. Optimization of the operand lists, eliminating things like a + |
| -a, a & a, etc. |
| |
| 3a. Combine repeated factors with the same occurrence counts |
| into a __builtin_powi call that will later be optimized into |
| an optimal number of multiplies. |
| |
| 4. Rewrite the expression trees we linearized and optimized so |
| they are in proper rank order. |
| |
| 5. Repropagate negates, as nothing else will clean it up ATM. |
| |
| A bit of theory on #4, since nobody seems to write anything down |
| about why it makes sense to do it the way they do it: |
| |
| We could do this much nicer theoretically, but don't (for reasons |
| explained after how to do it theoretically nice :P). |
| |
| In order to promote the most redundancy elimination, you want |
| binary expressions whose operands are the same rank (or |
| preferably, the same value) exposed to the redundancy eliminator, |
| for possible elimination. |
| |
| So the way to do this if we really cared, is to build the new op |
| tree from the leaves to the roots, merging as you go, and putting the |
| new op on the end of the worklist, until you are left with one |
| thing on the worklist. |
| |
| IE if you have to rewrite the following set of operands (listed with |
| rank in parentheses), with opcode PLUS_EXPR: |
| |
| a (1), b (1), c (1), d (2), e (2) |
| |
| |
| We start with our merge worklist empty, and the ops list with all of |
| those on it. |
| |
| You want to first merge all leaves of the same rank, as much as |
| possible. |
| |
| So first build a binary op of |
| |
| mergetmp = a + b, and put "mergetmp" on the merge worklist. |
| |
| Because there is no three operand form of PLUS_EXPR, c is not going to |
| be exposed to redundancy elimination as a rank 1 operand. |
| |
| So you might as well throw it on the merge worklist (you could also |
| consider it to now be a rank two operand, and merge it with d and e, |
| but in this case, you then have evicted e from a binary op. So at |
| least in this situation, you can't win.) |
| |
| Then build a binary op of d + e |
| mergetmp2 = d + e |
| |
| and put mergetmp2 on the merge worklist. |
| |
| so merge worklist = {mergetmp, c, mergetmp2} |
| |
| Continue building binary ops of these operations until you have only |
| one operation left on the worklist. |
| |
| So we have |
| |
| build binary op |
| mergetmp3 = mergetmp + c |
| |
| worklist = {mergetmp2, mergetmp3} |
| |
| mergetmp4 = mergetmp2 + mergetmp3 |
| |
| worklist = {mergetmp4} |
| |
| because we have one operation left, we can now just set the original |
| statement equal to the result of that operation. |
| |
| This will at least expose a + b and d + e to redundancy elimination |
| as binary operations. |
| |
| For extra points, you can reuse the old statements to build the |
| mergetmps, since you shouldn't run out. |
| |
| So why don't we do this? |
| |
| Because it's expensive, and rarely will help. Most trees we are |
| reassociating have 3 or less ops. If they have 2 ops, they already |
| will be written into a nice single binary op. If you have 3 ops, a |
| single simple check suffices to tell you whether the first two are of the |
| same rank. If so, you know to order it |
| |
| mergetmp = op1 + op2 |
| newstmt = mergetmp + op3 |
| |
| instead of |
| mergetmp = op2 + op3 |
| newstmt = mergetmp + op1 |
| |
| If all three are of the same rank, you can't expose them all in a |
| single binary operator anyway, so the above is *still* the best you |
| can do. |
| |
| Thus, this is what we do. When we have three ops left, we check to see |
| what order to put them in, and call it a day. As a nod to vector sum |
| reduction, we check if any of the ops are really a phi node that is a |
| destructive update for the associating op, and keep the destructive |
| update together for vector sum reduction recognition. */ |
| |
| /* Enable insertion of __builtin_powi calls during execute_reassoc. See |
| point 3a in the pass header comment. */ |
| static bool reassoc_insert_powi_p; |
| |
| /* Enable biasing ranks of loop accumulators. We don't want this before |
| vectorization, since it interferes with reduction chains. */ |
| static bool reassoc_bias_loop_carried_phi_ranks_p; |
| |
| /* Statistics */ |
| static struct |
| { |
| int linearized; |
| int constants_eliminated; |
| int ops_eliminated; |
| int rewritten; |
| int pows_encountered; |
| int pows_created; |
| } reassociate_stats; |
| |
| |
| static object_allocator<operand_entry> operand_entry_pool |
| ("operand entry pool"); |
| |
| /* This is used to assign a unique ID to each struct operand_entry |
| so that qsort results are identical on different hosts. */ |
| static unsigned int next_operand_entry_id; |
| |
| /* Starting rank number for a given basic block, so that we can rank |
| operations using unmovable instructions in that BB based on the bb |
| depth. */ |
| static int64_t *bb_rank; |
| |
| /* Operand->rank hashtable. */ |
| static hash_map<tree, int64_t> *operand_rank; |
| |
| /* SSA_NAMEs that are forms of loop accumulators and whose ranks need to be |
| biased. */ |
| static auto_bitmap biased_names; |
| |
| /* Vector of SSA_NAMEs on which after reassociate_bb is done with |
| all basic blocks the CFG should be adjusted - basic blocks |
| split right after that SSA_NAME's definition statement and before |
| the only use, which must be a bit ior. */ |
| static vec<tree> reassoc_branch_fixups; |
| |
| /* Forward decls. */ |
| static int64_t get_rank (tree); |
| static bool reassoc_stmt_dominates_stmt_p (gimple *, gimple *); |
| |
| /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts |
| possibly added by gsi_remove. */ |
| |
| static bool |
| reassoc_remove_stmt (gimple_stmt_iterator *gsi) |
| { |
| gimple *stmt = gsi_stmt (*gsi); |
| |
| if (!MAY_HAVE_DEBUG_BIND_STMTS || gimple_code (stmt) == GIMPLE_PHI) |
| return gsi_remove (gsi, true); |
| |
| gimple_stmt_iterator prev = *gsi; |
| gsi_prev (&prev); |
| unsigned uid = gimple_uid (stmt); |
| basic_block bb = gimple_bb (stmt); |
| bool ret = gsi_remove (gsi, true); |
| if (!gsi_end_p (prev)) |
| gsi_next (&prev); |
| else |
| prev = gsi_start_bb (bb); |
| gimple *end_stmt = gsi_stmt (*gsi); |
| while ((stmt = gsi_stmt (prev)) != end_stmt) |
| { |
| gcc_assert (stmt && is_gimple_debug (stmt) && gimple_uid (stmt) == 0); |
| gimple_set_uid (stmt, uid); |
| gsi_next (&prev); |
| } |
| return ret; |
| } |
| |
| /* Bias amount for loop-carried phis. We want this to be larger than |
| the depth of any reassociation tree we can see, but not larger than |
| the rank difference between two blocks. */ |
| #define PHI_LOOP_BIAS (1 << 15) |
| |
| /* Return TRUE iff PHI_LOOP_BIAS should be propagated from one of the STMT's |
| operands to the STMT's left-hand side. The goal is to preserve bias in code |
| like this: |
| |
| x_1 = phi(x_0, x_2) |
| a = x_1 | 1 |
| b = a ^ 2 |
| .MEM = b |
| c = b + d |
| x_2 = c + e |
| |
| That is, we need to preserve bias along single-use chains originating from |
| loop-carried phis. Only GIMPLE_ASSIGNs to SSA_NAMEs are considered to be |
| uses, because only they participate in rank propagation. */ |
| static bool |
| propagate_bias_p (gimple *stmt) |
| { |
| use_operand_p use; |
| imm_use_iterator use_iter; |
| gimple *single_use_stmt = NULL; |
| |
| if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_reference) |
| return false; |
| |
| FOR_EACH_IMM_USE_FAST (use, use_iter, gimple_assign_lhs (stmt)) |
| { |
| gimple *current_use_stmt = USE_STMT (use); |
| |
| if (is_gimple_assign (current_use_stmt) |
| && TREE_CODE (gimple_assign_lhs (current_use_stmt)) == SSA_NAME) |
| { |
| if (single_use_stmt != NULL && single_use_stmt != current_use_stmt) |
| return false; |
| single_use_stmt = current_use_stmt; |
| } |
| } |
| |
| if (single_use_stmt == NULL) |
| return false; |
| |
| if (gimple_bb (stmt)->loop_father |
| != gimple_bb (single_use_stmt)->loop_father) |
| return false; |
| |
| return true; |
| } |
| |
| /* Rank assigned to a phi statement. If STMT is a loop-carried phi of |
| an innermost loop, and the phi has only a single use which is inside |
| the loop, then the rank is the block rank of the loop latch plus an |
| extra bias for the loop-carried dependence. This causes expressions |
| calculated into an accumulator variable to be independent for each |
| iteration of the loop. If STMT is some other phi, the rank is the |
| block rank of its containing block. */ |
| static int64_t |
| phi_rank (gimple *stmt) |
| { |
| basic_block bb = gimple_bb (stmt); |
| class loop *father = bb->loop_father; |
| tree res; |
| unsigned i; |
| use_operand_p use; |
| gimple *use_stmt; |
| |
| if (!reassoc_bias_loop_carried_phi_ranks_p) |
| return bb_rank[bb->index]; |
| |
| /* We only care about real loops (those with a latch). */ |
| if (!father->latch) |
| return bb_rank[bb->index]; |
| |
| /* Interesting phis must be in headers of innermost loops. */ |
| if (bb != father->header |
| || father->inner) |
| return bb_rank[bb->index]; |
| |
| /* Ignore virtual SSA_NAMEs. */ |
| res = gimple_phi_result (stmt); |
| if (virtual_operand_p (res)) |
| return bb_rank[bb->index]; |
| |
| /* The phi definition must have a single use, and that use must be |
| within the loop. Otherwise this isn't an accumulator pattern. */ |
| if (!single_imm_use (res, &use, &use_stmt) |
| || gimple_bb (use_stmt)->loop_father != father) |
| return bb_rank[bb->index]; |
| |
| /* Look for phi arguments from within the loop. If found, bias this phi. */ |
| for (i = 0; i < gimple_phi_num_args (stmt); i++) |
| { |
| tree arg = gimple_phi_arg_def (stmt, i); |
| if (TREE_CODE (arg) == SSA_NAME |
| && !SSA_NAME_IS_DEFAULT_DEF (arg)) |
| { |
| gimple *def_stmt = SSA_NAME_DEF_STMT (arg); |
| if (gimple_bb (def_stmt)->loop_father == father) |
| return bb_rank[father->latch->index] + PHI_LOOP_BIAS; |
| } |
| } |
| |
| /* Must be an uninteresting phi. */ |
| return bb_rank[bb->index]; |
| } |
| |
| /* Return the maximum of RANK and the rank that should be propagated |
| from expression OP. For most operands, this is just the rank of OP. |
| For loop-carried phis, the value is zero to avoid undoing the bias |
| in favor of the phi. */ |
| static int64_t |
| propagate_rank (int64_t rank, tree op, bool *maybe_biased_p) |
| { |
| int64_t op_rank; |
| |
| op_rank = get_rank (op); |
| |
| /* Check whether op is biased after the get_rank () call, since it might have |
| updated biased_names. */ |
| if (TREE_CODE (op) == SSA_NAME |
| && bitmap_bit_p (biased_names, SSA_NAME_VERSION (op))) |
| { |
| if (maybe_biased_p == NULL) |
| return rank; |
| *maybe_biased_p = true; |
| } |
| |
| return MAX (rank, op_rank); |
| } |
| |
| /* Look up the operand rank structure for expression E. */ |
| |
| static inline int64_t |
| find_operand_rank (tree e) |
| { |
| int64_t *slot = operand_rank->get (e); |
| return slot ? *slot : -1; |
| } |
| |
| /* Insert {E,RANK} into the operand rank hashtable. */ |
| |
| static inline void |
| insert_operand_rank (tree e, int64_t rank) |
| { |
| gcc_assert (rank > 0); |
| gcc_assert (!operand_rank->put (e, rank)); |
| } |
| |
| /* Given an expression E, return the rank of the expression. */ |
| |
| static int64_t |
| get_rank (tree e) |
| { |
| /* SSA_NAME's have the rank of the expression they are the result |
| of. |
| For globals and uninitialized values, the rank is 0. |
| For function arguments, use the pre-setup rank. |
| For PHI nodes, stores, asm statements, etc, we use the rank of |
| the BB. |
| For simple operations, the rank is the maximum rank of any of |
| its operands, or the bb_rank, whichever is less. |
| I make no claims that this is optimal, however, it gives good |
| results. */ |
| |
| /* We make an exception to the normal ranking system to break |
| dependences of accumulator variables in loops. Suppose we |
| have a simple one-block loop containing: |
| |
| x_1 = phi(x_0, x_2) |
| b = a + x_1 |
| c = b + d |
| x_2 = c + e |
| |
| As shown, each iteration of the calculation into x is fully |
| dependent upon the iteration before it. We would prefer to |
| see this in the form: |
| |
| x_1 = phi(x_0, x_2) |
| b = a + d |
| c = b + e |
| x_2 = c + x_1 |
| |
| If the loop is unrolled, the calculations of b and c from |
| different iterations can be interleaved. |
| |
| To obtain this result during reassociation, we bias the rank |
| of the phi definition x_1 upward, when it is recognized as an |
| accumulator pattern. The artificial rank causes it to be |
| added last, providing the desired independence. */ |
| |
| if (TREE_CODE (e) == SSA_NAME) |
| { |
| ssa_op_iter iter; |
| gimple *stmt; |
| int64_t rank; |
| tree op; |
| |
| /* If we already have a rank for this expression, use that. */ |
| rank = find_operand_rank (e); |
| if (rank != -1) |
| return rank; |
| |
| stmt = SSA_NAME_DEF_STMT (e); |
| if (gimple_code (stmt) == GIMPLE_PHI) |
| { |
| rank = phi_rank (stmt); |
| if (rank != bb_rank[gimple_bb (stmt)->index]) |
| bitmap_set_bit (biased_names, SSA_NAME_VERSION (e)); |
| } |
| |
| else if (!is_gimple_assign (stmt)) |
| rank = bb_rank[gimple_bb (stmt)->index]; |
| |
| else |
| { |
| bool biased_p = false; |
| bool *maybe_biased_p = propagate_bias_p (stmt) ? &biased_p : NULL; |
| |
| /* Otherwise, find the maximum rank for the operands. As an |
| exception, remove the bias from loop-carried phis when propagating |
| the rank so that dependent operations are not also biased. */ |
| /* Simply walk over all SSA uses - this takes advatage of the |
| fact that non-SSA operands are is_gimple_min_invariant and |
| thus have rank 0. */ |
| rank = 0; |
| FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) |
| rank = propagate_rank (rank, op, maybe_biased_p); |
| |
| rank += 1; |
| if (biased_p) |
| bitmap_set_bit (biased_names, SSA_NAME_VERSION (e)); |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Rank for "); |
| print_generic_expr (dump_file, e); |
| fprintf (dump_file, " is %" PRId64 "\n", rank); |
| } |
| |
| /* Note the rank in the hashtable so we don't recompute it. */ |
| insert_operand_rank (e, rank); |
| return rank; |
| } |
| |
| /* Constants, globals, etc., are rank 0 */ |
| return 0; |
| } |
| |
| |
| /* We want integer ones to end up last no matter what, since they are |
| the ones we can do the most with. */ |
| #define INTEGER_CONST_TYPE 1 << 4 |
| #define FLOAT_ONE_CONST_TYPE 1 << 3 |
| #define FLOAT_CONST_TYPE 1 << 2 |
| #define OTHER_CONST_TYPE 1 << 1 |
| |
| /* Classify an invariant tree into integer, float, or other, so that |
| we can sort them to be near other constants of the same type. */ |
| static inline int |
| constant_type (tree t) |
| { |
| if (INTEGRAL_TYPE_P (TREE_TYPE (t))) |
| return INTEGER_CONST_TYPE; |
| else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t))) |
| { |
| /* Sort -1.0 and 1.0 constants last, while in some cases |
| const_binop can't optimize some inexact operations, multiplication |
| by -1.0 or 1.0 can be always merged with others. */ |
| if (real_onep (t) || real_minus_onep (t)) |
| return FLOAT_ONE_CONST_TYPE; |
| return FLOAT_CONST_TYPE; |
| } |
| else |
| return OTHER_CONST_TYPE; |
| } |
| |
| /* qsort comparison function to sort operand entries PA and PB by rank |
| so that the sorted array is ordered by rank in decreasing order. */ |
| static int |
| sort_by_operand_rank (const void *pa, const void *pb) |
| { |
| const operand_entry *oea = *(const operand_entry *const *)pa; |
| const operand_entry *oeb = *(const operand_entry *const *)pb; |
| |
| if (oeb->rank != oea->rank) |
| return oeb->rank > oea->rank ? 1 : -1; |
| |
| /* It's nicer for optimize_expression if constants that are likely |
| to fold when added/multiplied/whatever are put next to each |
| other. Since all constants have rank 0, order them by type. */ |
| if (oea->rank == 0) |
| { |
| if (constant_type (oeb->op) != constant_type (oea->op)) |
| return constant_type (oea->op) - constant_type (oeb->op); |
| else |
| /* To make sorting result stable, we use unique IDs to determine |
| order. */ |
| return oeb->id > oea->id ? 1 : -1; |
| } |
| |
| if (TREE_CODE (oea->op) != SSA_NAME) |
| { |
| if (TREE_CODE (oeb->op) != SSA_NAME) |
| return oeb->id > oea->id ? 1 : -1; |
| else |
| return 1; |
| } |
| else if (TREE_CODE (oeb->op) != SSA_NAME) |
| return -1; |
| |
| /* Lastly, make sure the versions that are the same go next to each |
| other. */ |
| if (SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op)) |
| { |
| /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse |
| versions of removed SSA_NAMEs, so if possible, prefer to sort |
| based on basic block and gimple_uid of the SSA_NAME_DEF_STMT. |
| See PR60418. */ |
| gimple *stmta = SSA_NAME_DEF_STMT (oea->op); |
| gimple *stmtb = SSA_NAME_DEF_STMT (oeb->op); |
| basic_block bba = gimple_bb (stmta); |
| basic_block bbb = gimple_bb (stmtb); |
| if (bbb != bba) |
| { |
| /* One of the SSA_NAMEs can be defined in oeN->stmt_to_insert |
| but the other might not. */ |
| if (!bba) |
| return 1; |
| if (!bbb) |
| return -1; |
| /* If neither is, compare bb_rank. */ |
| if (bb_rank[bbb->index] != bb_rank[bba->index]) |
| return (bb_rank[bbb->index] >> 16) - (bb_rank[bba->index] >> 16); |
| } |
| |
| bool da = reassoc_stmt_dominates_stmt_p (stmta, stmtb); |
| bool db = reassoc_stmt_dominates_stmt_p (stmtb, stmta); |
| if (da != db) |
| return da ? 1 : -1; |
| |
| return SSA_NAME_VERSION (oeb->op) > SSA_NAME_VERSION (oea->op) ? 1 : -1; |
| } |
| |
| return oeb->id > oea->id ? 1 : -1; |
| } |
| |
| /* Add an operand entry to *OPS for the tree operand OP. */ |
| |
| static void |
| add_to_ops_vec (vec<operand_entry *> *ops, tree op, gimple *stmt_to_insert = NULL) |
| { |
| operand_entry *oe = operand_entry_pool.allocate (); |
| |
| oe->op = op; |
| oe->rank = get_rank (op); |
| oe->id = next_operand_entry_id++; |
| oe->count = 1; |
| oe->stmt_to_insert = stmt_to_insert; |
| ops->safe_push (oe); |
| } |
| |
| /* Add an operand entry to *OPS for the tree operand OP with repeat |
| count REPEAT. */ |
| |
| static void |
| add_repeat_to_ops_vec (vec<operand_entry *> *ops, tree op, |
| HOST_WIDE_INT repeat) |
| { |
| operand_entry *oe = operand_entry_pool.allocate (); |
| |
| oe->op = op; |
| oe->rank = get_rank (op); |
| oe->id = next_operand_entry_id++; |
| oe->count = repeat; |
| oe->stmt_to_insert = NULL; |
| ops->safe_push (oe); |
| |
| reassociate_stats.pows_encountered++; |
| } |
| |
| /* Returns true if we can associate the SSA def OP. */ |
| |
| static bool |
| can_reassociate_op_p (tree op) |
| { |
| if (TREE_CODE (op) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) |
| return false; |
| /* Make sure asm goto outputs do not participate in reassociation since |
| we have no way to find an insertion place after asm goto. */ |
| if (TREE_CODE (op) == SSA_NAME |
| && gimple_code (SSA_NAME_DEF_STMT (op)) == GIMPLE_ASM |
| && gimple_asm_nlabels (as_a <gasm *> (SSA_NAME_DEF_STMT (op))) != 0) |
| return false; |
| return true; |
| } |
| |
| /* Returns true if we can reassociate operations of TYPE. |
| That is for integral or non-saturating fixed-point types, and for |
| floating point type when associative-math is enabled. */ |
| |
| static bool |
| can_reassociate_type_p (tree type) |
| { |
| if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) |
| || NON_SAT_FIXED_POINT_TYPE_P (type) |
| || (flag_associative_math && FLOAT_TYPE_P (type))) |
| return true; |
| return false; |
| } |
| |
| /* Return true if STMT is reassociable operation containing a binary |
| operation with tree code CODE, and is inside LOOP. */ |
| |
| static bool |
| is_reassociable_op (gimple *stmt, enum tree_code code, class loop *loop) |
| { |
| basic_block bb = gimple_bb (stmt); |
| |
| if (gimple_bb (stmt) == NULL) |
| return false; |
| |
| if (!flow_bb_inside_loop_p (loop, bb)) |
| return false; |
| |
| if (is_gimple_assign (stmt) |
| && gimple_assign_rhs_code (stmt) == code |
| && has_single_use (gimple_assign_lhs (stmt))) |
| { |
| tree rhs1 = gimple_assign_rhs1 (stmt); |
| tree rhs2 = gimple_assign_rhs2 (stmt); |
| if (!can_reassociate_op_p (rhs1) |
| || (rhs2 && !can_reassociate_op_p (rhs2))) |
| return false; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Return true if STMT is a nop-conversion. */ |
| |
| static bool |
| gimple_nop_conversion_p (gimple *stmt) |
| { |
| if (gassign *ass = dyn_cast <gassign *> (stmt)) |
| { |
| if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass)) |
| && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (ass)), |
| TREE_TYPE (gimple_assign_rhs1 (ass)))) |
| return true; |
| } |
| return false; |
| } |
| |
| /* Given NAME, if NAME is defined by a unary operation OPCODE, return the |
| operand of the negate operation. Otherwise, return NULL. */ |
| |
| static tree |
| get_unary_op (tree name, enum tree_code opcode) |
| { |
| gimple *stmt = SSA_NAME_DEF_STMT (name); |
| |
| /* Look through nop conversions (sign changes). */ |
| if (gimple_nop_conversion_p (stmt) |
| && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) |
| stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
| |
| if (!is_gimple_assign (stmt)) |
| return NULL_TREE; |
| |
| if (gimple_assign_rhs_code (stmt) == opcode) |
| return gimple_assign_rhs1 (stmt); |
| return NULL_TREE; |
| } |
| |
| /* Return true if OP1 and OP2 have the same value if casted to either type. */ |
| |
| static bool |
| ops_equal_values_p (tree op1, tree op2) |
| { |
| if (op1 == op2) |
| return true; |
| |
| tree orig_op1 = op1; |
| if (TREE_CODE (op1) == SSA_NAME) |
| { |
| gimple *stmt = SSA_NAME_DEF_STMT (op1); |
| if (gimple_nop_conversion_p (stmt)) |
| { |
| op1 = gimple_assign_rhs1 (stmt); |
| if (op1 == op2) |
| return true; |
| } |
| } |
| |
| if (TREE_CODE (op2) == SSA_NAME) |
| { |
| gimple *stmt = SSA_NAME_DEF_STMT (op2); |
| if (gimple_nop_conversion_p (stmt)) |
| { |
| op2 = gimple_assign_rhs1 (stmt); |
| if (op1 == op2 |
| || orig_op1 == op2) |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| |
| /* If CURR and LAST are a pair of ops that OPCODE allows us to |
| eliminate through equivalences, do so, remove them from OPS, and |
| return true. Otherwise, return false. */ |
| |
| static bool |
| eliminate_duplicate_pair (enum tree_code opcode, |
| vec<operand_entry *> *ops, |
| bool *all_done, |
| unsigned int i, |
| operand_entry *curr, |
| operand_entry *last) |
| { |
| |
| /* If we have two of the same op, and the opcode is & |, min, or max, |
| we can eliminate one of them. |
| If we have two of the same op, and the opcode is ^, we can |
| eliminate both of them. */ |
| |
| if (last && last->op == curr->op) |
| { |
| switch (opcode) |
| { |
| case MAX_EXPR: |
| case MIN_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_AND_EXPR: |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op); |
| fprintf (dump_file, " [&|minmax] "); |
| print_generic_expr (dump_file, last->op); |
| fprintf (dump_file, " -> "); |
| print_generic_stmt (dump_file, last->op); |
| } |
| |
| ops->ordered_remove (i); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| |
| case BIT_XOR_EXPR: |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op); |
| fprintf (dump_file, " ^ "); |
| print_generic_expr (dump_file, last->op); |
| fprintf (dump_file, " -> nothing\n"); |
| } |
| |
| reassociate_stats.ops_eliminated += 2; |
| |
| if (ops->length () == 2) |
| { |
| ops->truncate (0); |
| add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (last->op))); |
| *all_done = true; |
| } |
| else |
| { |
| ops->ordered_remove (i-1); |
| ops->ordered_remove (i-1); |
| } |
| |
| return true; |
| |
| default: |
| break; |
| } |
| } |
| return false; |
| } |
| |
| static vec<tree> plus_negates; |
| |
| /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not |
| expression, look in OPS for a corresponding positive operation to cancel |
| it out. If we find one, remove the other from OPS, replace |
| OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise, |
| return false. */ |
| |
| static bool |
| eliminate_plus_minus_pair (enum tree_code opcode, |
| vec<operand_entry *> *ops, |
| unsigned int currindex, |
| operand_entry *curr) |
| { |
| tree negateop; |
| tree notop; |
| unsigned int i; |
| operand_entry *oe; |
| |
| if (opcode != PLUS_EXPR || TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| |
| negateop = get_unary_op (curr->op, NEGATE_EXPR); |
| notop = get_unary_op (curr->op, BIT_NOT_EXPR); |
| if (negateop == NULL_TREE && notop == NULL_TREE) |
| return false; |
| |
| /* Any non-negated version will have a rank that is one less than |
| the current rank. So once we hit those ranks, if we don't find |
| one, we can stop. */ |
| |
| for (i = currindex + 1; |
| ops->iterate (i, &oe) |
| && oe->rank >= curr->rank - 1 ; |
| i++) |
| { |
| if (negateop |
| && ops_equal_values_p (oe->op, negateop)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, negateop); |
| fprintf (dump_file, " + -"); |
| print_generic_expr (dump_file, oe->op); |
| fprintf (dump_file, " -> 0\n"); |
| } |
| |
| ops->ordered_remove (i); |
| add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (oe->op))); |
| ops->ordered_remove (currindex); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| } |
| else if (notop |
| && ops_equal_values_p (oe->op, notop)) |
| { |
| tree op_type = TREE_TYPE (oe->op); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, notop); |
| fprintf (dump_file, " + ~"); |
| print_generic_expr (dump_file, oe->op); |
| fprintf (dump_file, " -> -1\n"); |
| } |
| |
| ops->ordered_remove (i); |
| add_to_ops_vec (ops, build_all_ones_cst (op_type)); |
| ops->ordered_remove (currindex); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| } |
| } |
| |
| /* If CURR->OP is a negate expr without nop conversion in a plus expr: |
| save it for later inspection in repropagate_negates(). */ |
| if (negateop != NULL_TREE |
| && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (curr->op)) == NEGATE_EXPR) |
| plus_negates.safe_push (curr->op); |
| |
| return false; |
| } |
| |
| /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a |
| bitwise not expression, look in OPS for a corresponding operand to |
| cancel it out. If we find one, remove the other from OPS, replace |
| OPS[CURRINDEX] with 0, and return true. Otherwise, return |
| false. */ |
| |
| static bool |
| eliminate_not_pairs (enum tree_code opcode, |
| vec<operand_entry *> *ops, |
| unsigned int currindex, |
| operand_entry *curr) |
| { |
| tree notop; |
| unsigned int i; |
| operand_entry *oe; |
| |
| if ((opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) |
| || TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| |
| notop = get_unary_op (curr->op, BIT_NOT_EXPR); |
| if (notop == NULL_TREE) |
| return false; |
| |
| /* Any non-not version will have a rank that is one less than |
| the current rank. So once we hit those ranks, if we don't find |
| one, we can stop. */ |
| |
| for (i = currindex + 1; |
| ops->iterate (i, &oe) |
| && oe->rank >= curr->rank - 1; |
| i++) |
| { |
| if (oe->op == notop) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, notop); |
| if (opcode == BIT_AND_EXPR) |
| fprintf (dump_file, " & ~"); |
| else if (opcode == BIT_IOR_EXPR) |
| fprintf (dump_file, " | ~"); |
| print_generic_expr (dump_file, oe->op); |
| if (opcode == BIT_AND_EXPR) |
| fprintf (dump_file, " -> 0\n"); |
| else if (opcode == BIT_IOR_EXPR) |
| fprintf (dump_file, " -> -1\n"); |
| } |
| |
| if (opcode == BIT_AND_EXPR) |
| oe->op = build_zero_cst (TREE_TYPE (oe->op)); |
| else if (opcode == BIT_IOR_EXPR) |
| oe->op = build_all_ones_cst (TREE_TYPE (oe->op)); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| ops->truncate (0); |
| ops->quick_push (oe); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Use constant value that may be present in OPS to try to eliminate |
| operands. Note that this function is only really used when we've |
| eliminated ops for other reasons, or merged constants. Across |
| single statements, fold already does all of this, plus more. There |
| is little point in duplicating logic, so I've only included the |
| identities that I could ever construct testcases to trigger. */ |
| |
| static void |
| eliminate_using_constants (enum tree_code opcode, |
| vec<operand_entry *> *ops) |
| { |
| operand_entry *oelast = ops->last (); |
| tree type = TREE_TYPE (oelast->op); |
| |
| if (oelast->rank == 0 |
| && (ANY_INTEGRAL_TYPE_P (type) || FLOAT_TYPE_P (type))) |
| { |
| switch (opcode) |
| { |
| case BIT_AND_EXPR: |
| if (integer_zerop (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found & 0, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| |
| ops->truncate (0); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_all_onesp (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found & -1, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| } |
| } |
| break; |
| case BIT_IOR_EXPR: |
| if (integer_all_onesp (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found | -1, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| |
| ops->truncate (0); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_zerop (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found | 0, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| } |
| } |
| break; |
| case MULT_EXPR: |
| if (integer_zerop (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_NANS (type) |
| && !HONOR_SIGNED_ZEROS (type) |
| && real_zerop (oelast->op))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found * 0, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| ops->truncate (0); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_onep (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_SNANS (type) |
| && real_onep (oelast->op))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found * 1, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| return; |
| } |
| } |
| break; |
| case BIT_XOR_EXPR: |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| if (integer_zerop (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && (opcode == PLUS_EXPR || opcode == MINUS_EXPR) |
| && fold_real_zero_addition_p (type, 0, oelast->op, |
| opcode == MINUS_EXPR))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found [|^+] 0, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| return; |
| } |
| } |
| break; |
| default: |
| break; |
| } |
| } |
| } |
| |
| |
| static void linearize_expr_tree (vec<operand_entry *> *, gimple *, |
| bool, bool); |
| |
| /* Structure for tracking and counting operands. */ |
| struct oecount { |
| unsigned int cnt; |
| unsigned int id; |
| enum tree_code oecode; |
| tree op; |
| }; |
| |
| |
| /* The heap for the oecount hashtable and the sorted list of operands. */ |
| static vec<oecount> cvec; |
| |
| |
| /* Oecount hashtable helpers. */ |
| |
| struct oecount_hasher : int_hash <int, 0, 1> |
| { |
| static inline hashval_t hash (int); |
| static inline bool equal (int, int); |
| }; |
| |
| /* Hash function for oecount. */ |
| |
| inline hashval_t |
| oecount_hasher::hash (int p) |
| { |
| const oecount *c = &cvec[p - 42]; |
| return htab_hash_pointer (c->op) ^ (hashval_t)c->oecode; |
| } |
| |
| /* Comparison function for oecount. */ |
| |
| inline bool |
| oecount_hasher::equal (int p1, int p2) |
| { |
| const oecount *c1 = &cvec[p1 - 42]; |
| const oecount *c2 = &cvec[p2 - 42]; |
| return c1->oecode == c2->oecode && c1->op == c2->op; |
| } |
| |
| /* Comparison function for qsort sorting oecount elements by count. */ |
| |
| static int |
| oecount_cmp (const void *p1, const void *p2) |
| { |
| const oecount *c1 = (const oecount *)p1; |
| const oecount *c2 = (const oecount *)p2; |
| if (c1->cnt != c2->cnt) |
| return c1->cnt > c2->cnt ? 1 : -1; |
| else |
| /* If counts are identical, use unique IDs to stabilize qsort. */ |
| return c1->id > c2->id ? 1 : -1; |
| } |
| |
| /* Return TRUE iff STMT represents a builtin call that raises OP |
| to some exponent. */ |
| |
| static bool |
| stmt_is_power_of_op (gimple *stmt, tree op) |
| { |
| if (!is_gimple_call (stmt)) |
| return false; |
| |
| switch (gimple_call_combined_fn (stmt)) |
| { |
| CASE_CFN_POW: |
| CASE_CFN_POWI: |
| return (operand_equal_p (gimple_call_arg (stmt, 0), op, 0)); |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Given STMT which is a __builtin_pow* call, decrement its exponent |
| in place and return the result. Assumes that stmt_is_power_of_op |
| was previously called for STMT and returned TRUE. */ |
| |
| static HOST_WIDE_INT |
| decrement_power (gimple *stmt) |
| { |
| REAL_VALUE_TYPE c, cint; |
| HOST_WIDE_INT power; |
| tree arg1; |
| |
| switch (gimple_call_combined_fn (stmt)) |
| { |
| CASE_CFN_POW: |
| arg1 = gimple_call_arg (stmt, 1); |
| c = TREE_REAL_CST (arg1); |
| power = real_to_integer (&c) - 1; |
| real_from_integer (&cint, VOIDmode, power, SIGNED); |
| gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint)); |
| return power; |
| |
| CASE_CFN_POWI: |
| arg1 = gimple_call_arg (stmt, 1); |
| power = TREE_INT_CST_LOW (arg1) - 1; |
| gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power)); |
| return power; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Replace SSA defined by STMT and replace all its uses with new |
| SSA. Also return the new SSA. */ |
| |
| static tree |
| make_new_ssa_for_def (gimple *stmt, enum tree_code opcode, tree op) |
| { |
| gimple *use_stmt; |
| use_operand_p use; |
| imm_use_iterator iter; |
| tree new_lhs, new_debug_lhs = NULL_TREE; |
| tree lhs = gimple_get_lhs (stmt); |
| |
| new_lhs = make_ssa_name (TREE_TYPE (lhs)); |
| gimple_set_lhs (stmt, new_lhs); |
| |
| /* Also need to update GIMPLE_DEBUGs. */ |
| FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) |
| { |
| tree repl = new_lhs; |
| if (is_gimple_debug (use_stmt)) |
| { |
| if (new_debug_lhs == NULL_TREE) |
| { |
| new_debug_lhs = make_node (DEBUG_EXPR_DECL); |
| gdebug *def_temp |
| = gimple_build_debug_bind (new_debug_lhs, |
| build2 (opcode, TREE_TYPE (lhs), |
| new_lhs, op), |
| stmt); |
| DECL_ARTIFICIAL (new_debug_lhs) = 1; |
| TREE_TYPE (new_debug_lhs) = TREE_TYPE (lhs); |
| SET_DECL_MODE (new_debug_lhs, TYPE_MODE (TREE_TYPE (lhs))); |
| gimple_set_uid (def_temp, gimple_uid (stmt)); |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gsi_insert_after (&gsi, def_temp, GSI_SAME_STMT); |
| } |
| repl = new_debug_lhs; |
| } |
| FOR_EACH_IMM_USE_ON_STMT (use, iter) |
| SET_USE (use, repl); |
| update_stmt (use_stmt); |
| } |
| return new_lhs; |
| } |
| |
| /* Replace all SSAs defined in STMTS_TO_FIX and replace its |
| uses with new SSAs. Also do this for the stmt that defines DEF |
| if *DEF is not OP. */ |
| |
| static void |
| make_new_ssa_for_all_defs (tree *def, enum tree_code opcode, tree op, |
| vec<gimple *> &stmts_to_fix) |
| { |
| unsigned i; |
| gimple *stmt; |
| |
| if (*def != op |
| && TREE_CODE (*def) == SSA_NAME |
| && (stmt = SSA_NAME_DEF_STMT (*def)) |
| && gimple_code (stmt) != GIMPLE_NOP) |
| *def = make_new_ssa_for_def (stmt, opcode, op); |
| |
| FOR_EACH_VEC_ELT (stmts_to_fix, i, stmt) |
| make_new_ssa_for_def (stmt, opcode, op); |
| } |
| |
| /* Find the single immediate use of STMT's LHS, and replace it |
| with OP. Remove STMT. If STMT's LHS is the same as *DEF, |
| replace *DEF with OP as well. */ |
| |
| static void |
| propagate_op_to_single_use (tree op, gimple *stmt, tree *def) |
| { |
| tree lhs; |
| gimple *use_stmt; |
| use_operand_p use; |
| gimple_stmt_iterator gsi; |
| |
| if (is_gimple_call (stmt)) |
| lhs = gimple_call_lhs (stmt); |
| else |
| lhs = gimple_assign_lhs (stmt); |
| |
| gcc_assert (has_single_use (lhs)); |
| single_imm_use (lhs, &use, &use_stmt); |
| if (lhs == *def) |
| *def = op; |
| SET_USE (use, op); |
| if (TREE_CODE (op) != SSA_NAME) |
| update_stmt (use_stmt); |
| gsi = gsi_for_stmt (stmt); |
| unlink_stmt_vdef (stmt); |
| reassoc_remove_stmt (&gsi); |
| release_defs (stmt); |
| } |
| |
| /* Walks the linear chain with result *DEF searching for an operation |
| with operand OP and code OPCODE removing that from the chain. *DEF |
| is updated if there is only one operand but no operation left. */ |
| |
| static void |
| zero_one_operation (tree *def, enum tree_code opcode, tree op) |
| { |
| tree orig_def = *def; |
| gimple *stmt = SSA_NAME_DEF_STMT (*def); |
| /* PR72835 - Record the stmt chain that has to be updated such that |
| we dont use the same LHS when the values computed are different. */ |
| auto_vec<gimple *, 64> stmts_to_fix; |
| |
| do |
| { |
| tree name; |
| |
| if (opcode == MULT_EXPR) |
| { |
| if (stmt_is_power_of_op (stmt, op)) |
| { |
| if (decrement_power (stmt) == 1) |
| { |
| if (stmts_to_fix.length () > 0) |
| stmts_to_fix.pop (); |
| propagate_op_to_single_use (op, stmt, def); |
| } |
| break; |
| } |
| else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR) |
| { |
| if (gimple_assign_rhs1 (stmt) == op) |
| { |
| tree cst = build_minus_one_cst (TREE_TYPE (op)); |
| if (stmts_to_fix.length () > 0) |
| stmts_to_fix.pop (); |
| propagate_op_to_single_use (cst, stmt, def); |
| break; |
| } |
| else if (integer_minus_onep (op) |
| || real_minus_onep (op)) |
| { |
| gimple_assign_set_rhs_code |
| (stmt, TREE_CODE (gimple_assign_rhs1 (stmt))); |
| break; |
| } |
| } |
| } |
| |
| name = gimple_assign_rhs1 (stmt); |
| |
| /* If this is the operation we look for and one of the operands |
| is ours simply propagate the other operand into the stmts |
| single use. */ |
| if (gimple_assign_rhs_code (stmt) == opcode |
| && (name == op |
| || gimple_assign_rhs2 (stmt) == op)) |
| { |
| if (name == op) |
| name = gimple_assign_rhs2 (stmt); |
| if (stmts_to_fix.length () > 0) |
| stmts_to_fix.pop (); |
| propagate_op_to_single_use (name, stmt, def); |
| break; |
| } |
| |
| /* We might have a multiply of two __builtin_pow* calls, and |
| the operand might be hiding in the rightmost one. Likewise |
| this can happen for a negate. */ |
| if (opcode == MULT_EXPR |
| && gimple_assign_rhs_code (stmt) == opcode |
| && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME |
| && has_single_use (gimple_assign_rhs2 (stmt))) |
| { |
| gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); |
| if (stmt_is_power_of_op (stmt2, op)) |
| { |
| if (decrement_power (stmt2) == 1) |
| propagate_op_to_single_use (op, stmt2, def); |
| else |
| stmts_to_fix.safe_push (stmt2); |
| break; |
| } |
| else if (is_gimple_assign (stmt2) |
| && gimple_assign_rhs_code (stmt2) == NEGATE_EXPR) |
| { |
| if (gimple_assign_rhs1 (stmt2) == op) |
| { |
| tree cst = build_minus_one_cst (TREE_TYPE (op)); |
| propagate_op_to_single_use (cst, stmt2, def); |
| break; |
| } |
| else if (integer_minus_onep (op) |
| || real_minus_onep (op)) |
| { |
| stmts_to_fix.safe_push (stmt2); |
| gimple_assign_set_rhs_code |
| (stmt2, TREE_CODE (gimple_assign_rhs1 (stmt2))); |
| break; |
| } |
| } |
| } |
| |
| /* Continue walking the chain. */ |
| gcc_assert (name != op |
| && TREE_CODE (name) == SSA_NAME); |
| stmt = SSA_NAME_DEF_STMT (name); |
| stmts_to_fix.safe_push (stmt); |
| } |
| while (1); |
| |
| if (stmts_to_fix.length () > 0 || *def == orig_def) |
| make_new_ssa_for_all_defs (def, opcode, op, stmts_to_fix); |
| } |
| |
| /* Returns true if statement S1 dominates statement S2. Like |
| stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */ |
| |
| static bool |
| reassoc_stmt_dominates_stmt_p (gimple *s1, gimple *s2) |
| { |
| basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2); |
| |
| /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D) |
| SSA_NAME. Assume it lives at the beginning of function and |
| thus dominates everything. */ |
| if (!bb1 || s1 == s2) |
| return true; |
| |
| /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */ |
| if (!bb2) |
| return false; |
| |
| if (bb1 == bb2) |
| { |
| /* PHIs in the same basic block are assumed to be |
| executed all in parallel, if only one stmt is a PHI, |
| it dominates the other stmt in the same basic block. */ |
| if (gimple_code (s1) == GIMPLE_PHI) |
| return true; |
| |
| if (gimple_code (s2) == GIMPLE_PHI) |
| return false; |
| |
| gcc_assert (gimple_uid (s1) && gimple_uid (s2)); |
| |
| if (gimple_uid (s1) < gimple_uid (s2)) |
| return true; |
| |
| if (gimple_uid (s1) > gimple_uid (s2)) |
| return false; |
| |
| gimple_stmt_iterator gsi = gsi_for_stmt (s1); |
| unsigned int uid = gimple_uid (s1); |
| for (gsi_next (&gsi); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *s = gsi_stmt (gsi); |
| if (gimple_uid (s) != uid) |
| break; |
| if (s == s2) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| return dominated_by_p (CDI_DOMINATORS, bb2, bb1); |
| } |
| |
| /* Insert STMT after INSERT_POINT. */ |
| |
| static void |
| insert_stmt_after (gimple *stmt, gimple *insert_point) |
| { |
| gimple_stmt_iterator gsi; |
| basic_block bb; |
| |
| if (gimple_code (insert_point) == GIMPLE_PHI) |
| bb = gimple_bb (insert_point); |
| else if (!stmt_ends_bb_p (insert_point)) |
| { |
| gsi = gsi_for_stmt (insert_point); |
| gimple_set_uid (stmt, gimple_uid (insert_point)); |
| gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); |
| return; |
| } |
| else if (gimple_code (insert_point) == GIMPLE_ASM) |
| /* We have no idea where to insert - it depends on where the |
| uses will be placed. */ |
| gcc_unreachable (); |
| else |
| /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME, |
| thus if it must end a basic block, it should be a call that can |
| throw, or some assignment that can throw. If it throws, the LHS |
| of it will not be initialized though, so only valid places using |
| the SSA_NAME should be dominated by the fallthru edge. */ |
| bb = find_fallthru_edge (gimple_bb (insert_point)->succs)->dest; |
| gsi = gsi_after_labels (bb); |
| if (gsi_end_p (gsi)) |
| { |
| gimple_stmt_iterator gsi2 = gsi_last_bb (bb); |
| gimple_set_uid (stmt, |
| gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); |
| } |
| else |
| gimple_set_uid (stmt, gimple_uid (gsi_stmt (gsi))); |
| gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
| } |
| |
| /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for |
| the result. Places the statement after the definition of either |
| OP1 or OP2. Returns the new statement. */ |
| |
| static gimple * |
| build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode) |
| { |
| gimple *op1def = NULL, *op2def = NULL; |
| gimple_stmt_iterator gsi; |
| tree op; |
| gassign *sum; |
| |
| /* Create the addition statement. */ |
| op = make_ssa_name (type); |
| sum = gimple_build_assign (op, opcode, op1, op2); |
| |
| /* Find an insertion place and insert. */ |
| if (TREE_CODE (op1) == SSA_NAME) |
| op1def = SSA_NAME_DEF_STMT (op1); |
| if (TREE_CODE (op2) == SSA_NAME) |
| op2def = SSA_NAME_DEF_STMT (op2); |
| if ((!op1def || gimple_nop_p (op1def)) |
| && (!op2def || gimple_nop_p (op2def))) |
| { |
| gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
| if (gsi_end_p (gsi)) |
| { |
| gimple_stmt_iterator gsi2 |
| = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
| gimple_set_uid (sum, |
| gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); |
| } |
| else |
| gimple_set_uid (sum, gimple_uid (gsi_stmt (gsi))); |
| gsi_insert_before (&gsi, sum, GSI_NEW_STMT); |
| } |
| else |
| { |
| gimple *insert_point; |
| if ((!op1def || gimple_nop_p (op1def)) |
| || (op2def && !gimple_nop_p (op2def) |
| && reassoc_stmt_dominates_stmt_p (op1def, op2def))) |
| insert_point = op2def; |
| else |
| insert_point = op1def; |
| insert_stmt_after (sum, insert_point); |
| } |
| update_stmt (sum); |
| |
| return sum; |
| } |
| |
| /* Perform un-distribution of divisions and multiplications. |
| A * X + B * X is transformed into (A + B) * X and A / X + B / X |
| to (A + B) / X for real X. |
| |
| The algorithm is organized as follows. |
| |
| - First we walk the addition chain *OPS looking for summands that |
| are defined by a multiplication or a real division. This results |
| in the candidates bitmap with relevant indices into *OPS. |
| |
| - Second we build the chains of multiplications or divisions for |
| these candidates, counting the number of occurrences of (operand, code) |
| pairs in all of the candidates chains. |
| |
| - Third we sort the (operand, code) pairs by number of occurrence and |
| process them starting with the pair with the most uses. |
| |
| * For each such pair we walk the candidates again to build a |
| second candidate bitmap noting all multiplication/division chains |
| that have at least one occurrence of (operand, code). |
| |
| * We build an alternate addition chain only covering these |
| candidates with one (operand, code) operation removed from their |
| multiplication/division chain. |
| |
| * The first candidate gets replaced by the alternate addition chain |
| multiplied/divided by the operand. |
| |
| * All candidate chains get disabled for further processing and |
| processing of (operand, code) pairs continues. |
| |
| The alternate addition chains built are re-processed by the main |
| reassociation algorithm which allows optimizing a * x * y + b * y * x |
| to (a + b ) * x * y in one invocation of the reassociation pass. */ |
| |
| static bool |
| undistribute_ops_list (enum tree_code opcode, |
| vec<operand_entry *> *ops, class loop *loop) |
| { |
| unsigned int length = ops->length (); |
| operand_entry *oe1; |
| unsigned i, j; |
| unsigned nr_candidates, nr_candidates2; |
| sbitmap_iterator sbi0; |
| vec<operand_entry *> *subops; |
| bool changed = false; |
| unsigned int next_oecount_id = 0; |
| |
| if (length <= 1 |
| || opcode != PLUS_EXPR) |
| return false; |
| |
| /* Build a list of candidates to process. */ |
| auto_sbitmap candidates (length); |
| bitmap_clear (candidates); |
| nr_candidates = 0; |
| FOR_EACH_VEC_ELT (*ops, i, oe1) |
| { |
| enum tree_code dcode; |
| gimple *oe1def; |
| |
| if (TREE_CODE (oe1->op) != SSA_NAME) |
| continue; |
| oe1def = SSA_NAME_DEF_STMT (oe1->op); |
| if (!is_gimple_assign (oe1def)) |
| continue; |
| dcode = gimple_assign_rhs_code (oe1def); |
| if ((dcode != MULT_EXPR |
| && dcode != RDIV_EXPR) |
| || !is_reassociable_op (oe1def, dcode, loop)) |
| continue; |
| |
| bitmap_set_bit (candidates, i); |
| nr_candidates++; |
| } |
| |
| if (nr_candidates < 2) |
| return false; |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "searching for un-distribute opportunities "); |
| print_generic_expr (dump_file, |
| (*ops)[bitmap_first_set_bit (candidates)]->op, TDF_NONE); |
| fprintf (dump_file, " %d\n", nr_candidates); |
| } |
| |
| /* Build linearized sub-operand lists and the counting table. */ |
| cvec.create (0); |
| |
| hash_table<oecount_hasher> ctable (15); |
| |
| /* ??? Macro arguments cannot have multi-argument template types in |
| them. This typedef is needed to workaround that limitation. */ |
| typedef vec<operand_entry *> vec_operand_entry_t_heap; |
| subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ()); |
| EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
| { |
| gimple *oedef; |
| enum tree_code oecode; |
| unsigned j; |
| |
| oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op); |
| oecode = gimple_assign_rhs_code (oedef); |
| linearize_expr_tree (&subops[i], oedef, |
| associative_tree_code (oecode), false); |
| |
| FOR_EACH_VEC_ELT (subops[i], j, oe1) |
| { |
| oecount c; |
| int *slot; |
| int idx; |
| c.oecode = oecode; |
| c.cnt = 1; |
| c.id = next_oecount_id++; |
| c.op = oe1->op; |
| cvec.safe_push (c); |
| idx = cvec.length () + 41; |
| slot = ctable.find_slot (idx, INSERT); |
| if (!*slot) |
| { |
| *slot = idx; |
| } |
| else |
| { |
| cvec.pop (); |
| cvec[*slot - 42].cnt++; |
| } |
| } |
| } |
| |
| /* Sort the counting table. */ |
| cvec.qsort (oecount_cmp); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| oecount *c; |
| fprintf (dump_file, "Candidates:\n"); |
| FOR_EACH_VEC_ELT (cvec, j, c) |
| { |
| fprintf (dump_file, " %u %s: ", c->cnt, |
| c->oecode == MULT_EXPR |
| ? "*" : c->oecode == RDIV_EXPR ? "/" : "?"); |
| print_generic_expr (dump_file, c->op); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| |
| /* Process the (operand, code) pairs in order of most occurrence. */ |
| auto_sbitmap candidates2 (length); |
| while (!cvec.is_empty ()) |
| { |
| oecount *c = &cvec.last (); |
| if (c->cnt < 2) |
| break; |
| |
| /* Now collect the operands in the outer chain that contain |
| the common operand in their inner chain. */ |
| bitmap_clear (candidates2); |
| nr_candidates2 = 0; |
| EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
| { |
| gimple *oedef; |
| enum tree_code oecode; |
| unsigned j; |
| tree op = (*ops)[i]->op; |
| |
| /* If we undistributed in this chain already this may be |
| a constant. */ |
| if (TREE_CODE (op) != SSA_NAME) |
| continue; |
| |
| oedef = SSA_NAME_DEF_STMT (op); |
| oecode = gimple_assign_rhs_code (oedef); |
| if (oecode != c->oecode) |
| continue; |
| |
| FOR_EACH_VEC_ELT (subops[i], j, oe1) |
| { |
| if (oe1->op == c->op) |
| { |
| bitmap_set_bit (candidates2, i); |
| ++nr_candidates2; |
| break; |
| } |
| } |
| } |
| |
| if (nr_candidates2 >= 2) |
| { |
| operand_entry *oe1, *oe2; |
| gimple *prod; |
| int first = bitmap_first_set_bit (candidates2); |
| |
| /* Build the new addition chain. */ |
| oe1 = (*ops)[first]; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Building ("); |
| print_generic_expr (dump_file, oe1->op); |
| } |
| zero_one_operation (&oe1->op, c->oecode, c->op); |
| EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0) |
| { |
| gimple *sum; |
| oe2 = (*ops)[i]; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, " + "); |
| print_generic_expr (dump_file, oe2->op); |
| } |
| zero_one_operation (&oe2->op, c->oecode, c->op); |
| sum = build_and_add_sum (TREE_TYPE (oe1->op), |
| oe1->op, oe2->op, opcode); |
| oe2->op = build_zero_cst (TREE_TYPE (oe2->op)); |
| oe2->rank = 0; |
| oe1->op = gimple_get_lhs (sum); |
| } |
| |
| /* Apply the multiplication/division. */ |
| prod = build_and_add_sum (TREE_TYPE (oe1->op), |
| oe1->op, c->op, c->oecode); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, ") %s ", c->oecode == MULT_EXPR ? "*" : "/"); |
| print_generic_expr (dump_file, c->op); |
| fprintf (dump_file, "\n"); |
| } |
| |
| /* Record it in the addition chain and disable further |
| undistribution with this op. */ |
| oe1->op = gimple_assign_lhs (prod); |
| oe1->rank = get_rank (oe1->op); |
| subops[first].release (); |
| |
| changed = true; |
| } |
| |
| cvec.pop (); |
| } |
| |
| for (i = 0; i < ops->length (); ++i) |
| subops[i].release (); |
| free (subops); |
| cvec.release (); |
| |
| return changed; |
| } |
| |
| /* Pair to hold the information of one specific VECTOR_TYPE SSA_NAME: |
| first: element index for each relevant BIT_FIELD_REF. |
| second: the index of vec ops* for each relevant BIT_FIELD_REF. */ |
| typedef std::pair<unsigned, unsigned> v_info_elem; |
| struct v_info { |
| tree vec_type; |
| auto_vec<v_info_elem, 32> vec; |
| }; |
| typedef v_info *v_info_ptr; |
| |
| /* Comparison function for qsort on VECTOR SSA_NAME trees by machine mode. */ |
| static int |
| sort_by_mach_mode (const void *p_i, const void *p_j) |
| { |
| const tree tr1 = *((const tree *) p_i); |
| const tree tr2 = *((const tree *) p_j); |
| unsigned int mode1 = TYPE_MODE (TREE_TYPE (tr1)); |
| unsigned int mode2 = TYPE_MODE (TREE_TYPE (tr2)); |
| if (mode1 > mode2) |
| return 1; |
| else if (mode1 < mode2) |
| return -1; |
| if (SSA_NAME_VERSION (tr1) < SSA_NAME_VERSION (tr2)) |
| return -1; |
| else if (SSA_NAME_VERSION (tr1) > SSA_NAME_VERSION (tr2)) |
| return 1; |
| return 0; |
| } |
| |
| /* Cleanup hash map for VECTOR information. */ |
| static void |
| cleanup_vinfo_map (hash_map<tree, v_info_ptr> &info_map) |
| { |
| for (hash_map<tree, v_info_ptr>::iterator it = info_map.begin (); |
| it != info_map.end (); ++it) |
| { |
| v_info_ptr info = (*it).second; |
| delete info; |
| (*it).second = NULL; |
| } |
| } |
| |
| /* Perform un-distribution of BIT_FIELD_REF on VECTOR_TYPE. |
| V1[0] + V1[1] + ... + V1[k] + V2[0] + V2[1] + ... + V2[k] + ... Vn[k] |
| is transformed to |
| Vs = (V1 + V2 + ... + Vn) |
| Vs[0] + Vs[1] + ... + Vs[k] |
| |
| The basic steps are listed below: |
| |
| 1) Check the addition chain *OPS by looking those summands coming from |
| VECTOR bit_field_ref on VECTOR type. Put the information into |
| v_info_map for each satisfied summand, using VECTOR SSA_NAME as key. |
| |
| 2) For each key (VECTOR SSA_NAME), validate all its BIT_FIELD_REFs are |
| continuous, they can cover the whole VECTOR perfectly without any holes. |
| Obtain one VECTOR list which contain candidates to be transformed. |
| |
| 3) Sort the VECTOR list by machine mode of VECTOR type, for each group of |
| candidates with same mode, build the addition statements for them and |
| generate BIT_FIELD_REFs accordingly. |
| |
| TODO: |
| The current implementation requires the whole VECTORs should be fully |
| covered, but it can be extended to support partial, checking adjacent |
| but not fill the whole, it may need some cost model to define the |
| boundary to do or not. |
| */ |
| static bool |
| undistribute_bitref_for_vector (enum tree_code opcode, |
| vec<operand_entry *> *ops, struct loop *loop) |
| { |
| if (ops->length () <= 1) |
| return false; |
| |
| if (opcode != PLUS_EXPR |
| && opcode != MULT_EXPR |
| && opcode != BIT_XOR_EXPR |
| && opcode != BIT_IOR_EXPR |
| && opcode != BIT_AND_EXPR) |
| return false; |
| |
| hash_map<tree, v_info_ptr> v_info_map; |
| operand_entry *oe1; |
| unsigned i; |
| |
| /* Find those summands from VECTOR BIT_FIELD_REF in addition chain, put the |
| information into map. */ |
| FOR_EACH_VEC_ELT (*ops, i, oe1) |
| { |
| enum tree_code dcode; |
| gimple *oe1def; |
| |
| if (TREE_CODE (oe1->op) != SSA_NAME) |
| continue; |
| oe1def = SSA_NAME_DEF_STMT (oe1->op); |
| if (!is_gimple_assign (oe1def)) |
| continue; |
| dcode = gimple_assign_rhs_code (oe1def); |
| if (dcode != BIT_FIELD_REF || !is_reassociable_op (oe1def, dcode, loop)) |
| continue; |
| |
| tree rhs = gimple_assign_rhs1 (oe1def); |
| tree vec = TREE_OPERAND (rhs, 0); |
| tree vec_type = TREE_TYPE (vec); |
| |
| if (TREE_CODE (vec) != SSA_NAME || !VECTOR_TYPE_P (vec_type)) |
| continue; |
| |
| /* Ignore it if target machine can't support this VECTOR type. */ |
| if (!VECTOR_MODE_P (TYPE_MODE (vec_type))) |
| continue; |
| |
| /* Check const vector type, constrain BIT_FIELD_REF offset and size. */ |
| if (!TYPE_VECTOR_SUBPARTS (vec_type).is_constant ()) |
| continue; |
| |
| if (VECTOR_TYPE_P (TREE_TYPE (rhs)) |
| || !is_a <scalar_mode> (TYPE_MODE (TREE_TYPE (rhs)))) |
| continue; |
| |
| /* The type of BIT_FIELD_REF might not be equal to the element type of |
| the vector. We want to use a vector type with element type the |
| same as the BIT_FIELD_REF and size the same as TREE_TYPE (vec). */ |
| if (!useless_type_conversion_p (TREE_TYPE (rhs), TREE_TYPE (vec_type))) |
| { |
| machine_mode simd_mode; |
| unsigned HOST_WIDE_INT size, nunits; |
| unsigned HOST_WIDE_INT elem_size |
| = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs))); |
| if (!GET_MODE_BITSIZE (TYPE_MODE (vec_type)).is_constant (&size)) |
| continue; |
| if (size <= elem_size || (size % elem_size) != 0) |
| continue; |
| nunits = size / elem_size; |
| if (!mode_for_vector (SCALAR_TYPE_MODE (TREE_TYPE (rhs)), |
| nunits).exists (&simd_mode)) |
| continue; |
| vec_type = build_vector_type_for_mode (TREE_TYPE (rhs), simd_mode); |
| |
| /* Ignore it if target machine can't support this VECTOR type. */ |
| if (!VECTOR_MODE_P (TYPE_MODE (vec_type))) |
| continue; |
| |
| /* Check const vector type, constrain BIT_FIELD_REF offset and |
| size. */ |
| if (!TYPE_VECTOR_SUBPARTS (vec_type).is_constant ()) |
| continue; |
| |
| if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vec_type)), |
| GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (vec))))) |
| continue; |
| } |
| |
| tree elem_type = TREE_TYPE (vec_type); |
| unsigned HOST_WIDE_INT elem_size = tree_to_uhwi (TYPE_SIZE (elem_type)); |
| if (maybe_ne (bit_field_size (rhs), elem_size)) |
| continue; |
| |
| unsigned idx; |
| if (!constant_multiple_p (bit_field_offset (rhs), elem_size, &idx)) |
| continue; |
| |
| /* Ignore it if target machine can't support this type of VECTOR |
| operation. */ |
| optab op_tab = optab_for_tree_code (opcode, vec_type, optab_vector); |
| if (optab_handler (op_tab, TYPE_MODE (vec_type)) == CODE_FOR_nothing) |
| continue; |
| |
| bool existed; |
| v_info_ptr &info = v_info_map.get_or_insert (vec, &existed); |
| if (!existed) |
| { |
| info = new v_info; |
| info->vec_type = vec_type; |
| } |
| else if (!types_compatible_p (vec_type, info->vec_type)) |
| continue; |
| info->vec.safe_push (std::make_pair (idx, i)); |
| } |
| |
| /* At least two VECTOR to combine. */ |
| if (v_info_map.elements () <= 1) |
| { |
| cleanup_vinfo_map (v_info_map); |
| return false; |
| } |
| |
| /* Verify all VECTOR candidates by checking two conditions: |
| 1) sorted offsets are adjacent, no holes. |
| 2) can fill the whole VECTOR perfectly. |
| And add the valid candidates to a vector for further handling. */ |
| auto_vec<tree> valid_vecs (v_info_map.elements ()); |
| for (hash_map<tree, v_info_ptr>::iterator it = v_info_map.begin (); |
| it != v_info_map.end (); ++it) |
| { |
| tree cand_vec = (*it).first; |
| v_info_ptr cand_info = (*it).second; |
| unsigned int num_elems |
| = TYPE_VECTOR_SUBPARTS (cand_info->vec_type).to_constant (); |
| if (cand_info->vec.length () != num_elems) |
| continue; |
| sbitmap holes = sbitmap_alloc (num_elems); |
| bitmap_ones (holes); |
| bool valid = true; |
| v_info_elem *curr; |
| FOR_EACH_VEC_ELT (cand_info->vec, i, curr) |
| { |
| if (!bitmap_bit_p (holes, curr->first)) |
| { |
| valid = false; |
| break; |
| } |
| else |
| bitmap_clear_bit (holes, curr->first); |
| } |
| if (valid && bitmap_empty_p (holes)) |
| valid_vecs.quick_push (cand_vec); |
| sbitmap_free (holes); |
| } |
| |
| /* At least two VECTOR to combine. */ |
| if (valid_vecs.length () <= 1) |
| { |
| cleanup_vinfo_map (v_info_map); |
| return false; |
| } |
| |
| valid_vecs.qsort (sort_by_mach_mode); |
| /* Go through all candidates by machine mode order, query the mode_to_total |
| to get the total number for each mode and skip the single one. */ |
| for (unsigned i = 0; i < valid_vecs.length () - 1; ++i) |
| { |
| tree tvec = valid_vecs[i]; |
| enum machine_mode mode = TYPE_MODE (TREE_TYPE (tvec)); |
| |
| /* Skip modes with only a single candidate. */ |
| if (TYPE_MODE (TREE_TYPE (valid_vecs[i + 1])) != mode) |
| continue; |
| |
| unsigned int idx, j; |
| gimple *sum = NULL; |
| tree sum_vec = tvec; |
| v_info_ptr info_ptr = *(v_info_map.get (tvec)); |
| v_info_elem *elem; |
| tree vec_type = info_ptr->vec_type; |
| |
| /* Build the sum for all candidates with same mode. */ |
| do |
| { |
| sum = build_and_add_sum (vec_type, sum_vec, |
| valid_vecs[i + 1], opcode); |
| if (!useless_type_conversion_p (vec_type, |
| TREE_TYPE (valid_vecs[i + 1]))) |
| { |
| /* Update the operands only after build_and_add_sum, |
| so that we don't have to repeat the placement algorithm |
| of build_and_add_sum. */ |
| gimple_stmt_iterator gsi = gsi_for_stmt (sum); |
| tree vce = build1 (VIEW_CONVERT_EXPR, vec_type, |
| valid_vecs[i + 1]); |
| tree lhs = make_ssa_name (vec_type); |
| gimple *g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR, vce); |
| gimple_set_uid (g, gimple_uid (sum)); |
| gsi_insert_before (&gsi, g, GSI_NEW_STMT); |
| gimple_assign_set_rhs2 (sum, lhs); |
| if (sum_vec == tvec) |
| { |
| vce = build1 (VIEW_CONVERT_EXPR, vec_type, sum_vec); |
| lhs = make_ssa_name (vec_type); |
| g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR, vce); |
| gimple_set_uid (g, gimple_uid (sum)); |
| gsi_insert_before (&gsi, g, GSI_NEW_STMT); |
| gimple_assign_set_rhs1 (sum, lhs); |
| } |
| update_stmt (sum); |
| } |
| sum_vec = gimple_get_lhs (sum); |
| info_ptr = *(v_info_map.get (valid_vecs[i + 1])); |
| gcc_assert (types_compatible_p (vec_type, info_ptr->vec_type)); |
| /* Update those related ops of current candidate VECTOR. */ |
| FOR_EACH_VEC_ELT (info_ptr->vec, j, elem) |
| { |
| idx = elem->second; |
| gimple *def = SSA_NAME_DEF_STMT ((*ops)[idx]->op); |
| /* Set this then op definition will get DCEd later. */ |
| gimple_set_visited (def, true); |
| if (opcode == PLUS_EXPR |
| || opcode == BIT_XOR_EXPR |
| || opcode == BIT_IOR_EXPR) |
| (*ops)[idx]->op = build_zero_cst (TREE_TYPE ((*ops)[idx]->op)); |
| else if (opcode == MULT_EXPR) |
| (*ops)[idx]->op = build_one_cst (TREE_TYPE ((*ops)[idx]->op)); |
| else |
| { |
| gcc_assert (opcode == BIT_AND_EXPR); |
| (*ops)[idx]->op |
| = build_all_ones_cst (TREE_TYPE ((*ops)[idx]->op)); |
| } |
| (*ops)[idx]->rank = 0; |
| } |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Generating addition -> "); |
| print_gimple_stmt (dump_file, sum, 0); |
| } |
| i++; |
| } |
| while ((i < valid_vecs.length () - 1) |
| && TYPE_MODE (TREE_TYPE (valid_vecs[i + 1])) == mode); |
| |
| /* Referring to first valid VECTOR with this mode, generate the |
| BIT_FIELD_REF statements accordingly. */ |
| info_ptr = *(v_info_map.get (tvec)); |
| gcc_assert (sum); |
| tree elem_type = TREE_TYPE (vec_type); |
| FOR_EACH_VEC_ELT (info_ptr->vec, j, elem) |
| { |
| idx = elem->second; |
| tree dst = make_ssa_name (elem_type); |
| tree pos = bitsize_int (elem->first |
| * tree_to_uhwi (TYPE_SIZE (elem_type))); |
| tree bfr = build3 (BIT_FIELD_REF, elem_type, sum_vec, |
| TYPE_SIZE (elem_type), pos); |
| gimple *gs = gimple_build_assign (dst, BIT_FIELD_REF, bfr); |
| insert_stmt_after (gs, sum); |
| gimple *def = SSA_NAME_DEF_STMT ((*ops)[idx]->op); |
| /* Set this then op definition will get DCEd later. */ |
| gimple_set_visited (def, true); |
| (*ops)[idx]->op = gimple_assign_lhs (gs); |
| (*ops)[idx]->rank = get_rank ((*ops)[idx]->op); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Generating bit_field_ref -> "); |
| print_gimple_stmt (dump_file, gs, 0); |
| } |
| } |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "undistributiong bit_field_ref for vector done.\n"); |
| |
| cleanup_vinfo_map (v_info_map); |
| |
| return true; |
| } |
| |
| /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison |
| expression, examine the other OPS to see if any of them are comparisons |
| of the same values, which we may be able to combine or eliminate. |
| For example, we can rewrite (a < b) | (a == b) as (a <= b). */ |
| |
| static bool |
| eliminate_redundant_comparison (enum tree_code opcode, |
| vec<operand_entry *> *ops, |
| unsigned int currindex, |
| operand_entry *curr) |
| { |
| tree op1, op2; |
| enum tree_code lcode, rcode; |
| gimple *def1, *def2; |
| int i; |
| operand_entry *oe; |
| |
| if (opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) |
| return false; |
| |
| /* Check that CURR is a comparison. */ |
| if (TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| def1 = SSA_NAME_DEF_STMT (curr->op); |
| if (!is_gimple_assign (def1)) |
| return false; |
| lcode = gimple_assign_rhs_code (def1); |
| if (TREE_CODE_CLASS (lcode) != tcc_comparison) |
| return false; |
| op1 = gimple_assign_rhs1 (def1); |
| op2 = gimple_assign_rhs2 (def1); |
| |
| /* Now look for a similar comparison in the remaining OPS. */ |
| for (i = currindex + 1; ops->iterate (i, &oe); i++) |
| { |
| tree t; |
| |
| if (TREE_CODE (oe->op) != SSA_NAME) |
| continue; |
| def2 = SSA_NAME_DEF_STMT (oe->op); |
| if (!is_gimple_assign (def2)) |
| continue; |
| rcode = gimple_assign_rhs_code (def2); |
| if (TREE_CODE_CLASS (rcode) != tcc_comparison) |
| continue; |
| |
| /* If we got here, we have a match. See if we can combine the |
| two comparisons. */ |
| tree type = TREE_TYPE (gimple_assign_lhs (def1)); |
| if (opcode == BIT_IOR_EXPR) |
| t = maybe_fold_or_comparisons (type, |
| lcode, op1, op2, |
| rcode, gimple_assign_rhs1 (def2), |
| gimple_assign_rhs2 (def2)); |
| else |
| t = maybe_fold_and_comparisons (type, |
| lcode, op1, op2, |
| rcode, gimple_assign_rhs1 (def2), |
| gimple_assign_rhs2 (def2)); |
| if (!t) |
| continue; |
| |
| /* maybe_fold_and_comparisons and maybe_fold_or_comparisons |
| always give us a boolean_type_node value back. If the original |
| BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type, |
| we need to convert. */ |
| if (!useless_type_conversion_p (TREE_TYPE (curr->op), TREE_TYPE (t))) |
| t = fold_convert (TREE_TYPE (curr->op), t); |
| |
| if (TREE_CODE (t) != INTEGER_CST |
| && !operand_equal_p (t, curr->op, 0)) |
| { |
| enum tree_code subcode; |
| tree newop1, newop2; |
| if (!COMPARISON_CLASS_P (t)) |
| continue; |
| extract_ops_from_tree (t, &subcode, &newop1, &newop2); |
| STRIP_USELESS_TYPE_CONVERSION (newop1); |
| STRIP_USELESS_TYPE_CONVERSION (newop2); |
| if (!is_gimple_val (newop1) || !is_gimple_val (newop2)) |
| continue; |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op); |
| fprintf (dump_file, " %s ", op_symbol_code (opcode)); |
| print_generic_expr (dump_file, oe->op); |
| fprintf (dump_file, " -> "); |
| print_generic_expr (dump_file, t); |
| fprintf (dump_file, "\n"); |
| } |
| |
| /* Now we can delete oe, as it has been subsumed by the new combined |
| expression t. */ |
| ops->ordered_remove (i); |
| reassociate_stats.ops_eliminated ++; |
| |
| /* If t is the same as curr->op, we're done. Otherwise we must |
| replace curr->op with t. Special case is if we got a constant |
| back, in which case we add it to the end instead of in place of |
| the current entry. */ |
| if (TREE_CODE (t) == INTEGER_CST) |
| { |
| ops->ordered_remove (currindex); |
| add_to_ops_vec (ops, t); |
| } |
| else if (!operand_equal_p (t, curr->op, 0)) |
| { |
| gimple *sum; |
| enum tree_code subcode; |
| tree newop1; |
| tree newop2; |
| gcc_assert (COMPARISON_CLASS_P (t)); |
| extract_ops_from_tree (t, &subcode, &newop1, &newop2); |
| STRIP_USELESS_TYPE_CONVERSION (newop1); |
| STRIP_USELESS_TYPE_CONVERSION (newop2); |
| gcc_checking_assert (is_gimple_val (newop1) |
| && is_gimple_val (newop2)); |
| sum = build_and_add_sum (TREE_TYPE (t), newop1, newop2, subcode); |
| curr->op = gimple_get_lhs (sum); |
| } |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Transform repeated addition of same values into multiply with |
| constant. */ |
| static bool |
| transform_add_to_multiply (vec<operand_entry *> *ops) |
| { |
| operand_entry *oe; |
| tree op = NULL_TREE; |
| int j; |
| int i, start = -1, end = 0, count = 0; |
| auto_vec<std::pair <int, int> > indxs; |
| bool changed = false; |
| |
| if (!INTEGRAL_TYPE_P (TREE_TYPE ((*ops)[0]->op)) |
| && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE ((*ops)[0]->op)) |
| || !flag_unsafe_math_optimizations)) |
| return false; |
| |
| /* Look for repeated operands. */ |
| FOR_EACH_VEC_ELT (*ops, i, oe) |
| { |
| if (start == -1) |
| { |
| count = 1; |
| op = oe->op; |
| start = i; |
| } |
| else if (operand_equal_p (oe->op, op, 0)) |
| { |
| count++; |
| end = i; |
| } |
| else |
| { |
| if (count > 1) |
| indxs.safe_push (std::make_pair (start, end)); |
| count = 1; |
| op = oe->op; |
| start = i; |
| } |
| } |
| |
| if (count > 1) |
| indxs.safe_push (std::make_pair (start, end)); |
| |
| for (j = indxs.length () - 1; j >= 0; --j) |
| { |
| /* Convert repeated operand addition to multiplication. */ |
| start = indxs[j].first; |
| end = indxs[j].second; |
| op = (*ops)[start]->op; |
| count = end - start + 1; |
| for (i = end; i >= start; --i) |
| ops->unordered_remove (i); |
| tree tmp = make_ssa_name (TREE_TYPE (op)); |
| tree cst = build_int_cst (integer_type_node, count); |
| gassign *mul_stmt |
| = gimple_build_assign (tmp, MULT_EXPR, |
| op, fold_convert (TREE_TYPE (op), cst)); |
| gimple_set_visited (mul_stmt, true); |
| add_to_ops_vec (ops, tmp, mul_stmt); |
| changed = true; |
| } |
| |
| return changed; |
| } |
| |
| |
| /* Perform various identities and other optimizations on the list of |
| operand entries, stored in OPS. The tree code for the binary |
| operation between all the operands is OPCODE. */ |
| |
| static void |
| optimize_ops_list (enum tree_code opcode, |
| vec<operand_entry *> *ops) |
| { |
| unsigned int length = ops->length (); |
| unsigned int i; |
| operand_entry *oe; |
| operand_entry *oelast = NULL; |
| bool iterate = false; |
| |
| if (length == 1) |
| return; |
| |
| oelast = ops->last (); |
| |
| /* If the last two are constants, pop the constants off, merge them |
| and try the next two. */ |
| if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op)) |
| { |
| operand_entry *oelm1 = (*ops)[length - 2]; |
| |
| if (oelm1->rank == 0 |
| && is_gimple_min_invariant (oelm1->op) |
| && useless_type_conversion_p (TREE_TYPE (oelm1->op), |
| TREE_TYPE (oelast->op))) |
| { |
| tree folded = fold_binary (opcode, TREE_TYPE (oelm1->op), |
| oelm1->op, oelast->op); |
| |
| if (folded && is_gimple_min_invariant (folded)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Merging constants\n"); |
| |
| ops->pop (); |
| ops->pop (); |
| |
| add_to_ops_vec (ops, folded); |
| reassociate_stats.constants_eliminated++; |
| |
| optimize_ops_list (opcode, ops); |
| return; |
| } |
| } |
| } |
| |
| eliminate_using_constants (opcode, ops); |
| oelast = NULL; |
| |
| for (i = 0; ops->iterate (i, &oe);) |
| { |
| bool done = false; |
| |
| if (eliminate_not_pairs (opcode, ops, i, oe)) |
| return; |
| if (eliminate_duplicate_pair (opcode, ops, &done, i, oe, oelast) |
| || (!done && eliminate_plus_minus_pair (opcode, ops, i, oe)) |
| || (!done && eliminate_redundant_comparison (opcode, ops, i, oe))) |
| { |
| if (done) |
| return; |
| iterate = true; |
| oelast = NULL; |
| continue; |
| } |
| oelast = oe; |
| i++; |
| } |
| |
| if (iterate) |
| optimize_ops_list (opcode, ops); |
| } |
| |
| /* The following functions are subroutines to optimize_range_tests and allow |
| it to try to change a logical combination of comparisons into a range |
| test. |
| |
| For example, both |
| X == 2 || X == 5 || X == 3 || X == 4 |
| and |
| X >= 2 && X <= 5 |
| are converted to |
| (unsigned) (X - 2) <= 3 |
| |
| For more information see comments above fold_test_range in fold-const.c, |
| this implementation is for GIMPLE. */ |
| |
| |
| |
| /* Dump the range entry R to FILE, skipping its expression if SKIP_EXP. */ |
| |
| void |
| dump_range_entry (FILE *file, struct range_entry *r, bool skip_exp) |
| { |
| if (!skip_exp) |
| print_generic_expr (file, r->exp); |
| fprintf (file, " %c[", r->in_p ? '+' : '-'); |
| print_generic_expr (file, r->low); |
| fputs (", ", file); |
| print_generic_expr (file, r->high); |
| fputc (']', file); |
| } |
| |
| /* Dump the range entry R to STDERR. */ |
| |
| DEBUG_FUNCTION void |
| debug_range_entry (struct range_entry *r) |
| { |
| dump_range_entry (stderr, r, false); |
| fputc ('\n', stderr); |
| } |
| |
| /* This is similar to make_range in fold-const.c, but on top of |
| GIMPLE instead of trees. If EXP is non-NULL, it should be |
| an SSA_NAME and STMT argument is ignored, otherwise STMT |
| argument should be a GIMPLE_COND. */ |
| |
| void |
| init_range_entry (struct range_entry *r, tree exp, gimple *stmt) |
| { |
| int in_p; |
| tree low, high; |
| bool is_bool, strict_overflow_p; |
| |
| r->exp = NULL_TREE; |
| r->in_p = false; |
| r->strict_overflow_p = false; |
| r->low = NULL_TREE; |
| r->high = NULL_TREE; |
| if (exp != NULL_TREE |
| && (TREE_CODE (exp) != SSA_NAME || !INTEGRAL_TYPE_P (TREE_TYPE (exp)))) |
| return; |
| |
| /* Start with simply saying "EXP != 0" and then look at the code of EXP |
| and see if we can refine the range. Some of the cases below may not |
| happen, but it doesn't seem worth worrying about this. We "continue" |
| the outer loop when we've changed something; otherwise we "break" |
| the switch, which will "break" the while. */ |
| low = exp ? build_int_cst (TREE_TYPE (exp), 0) : boolean_false_node; |
| high = low; |
| in_p = 0; |
| strict_overflow_p = false; |
| is_bool = false; |
| if (exp == NULL_TREE) |
| is_bool = true; |
| else if (TYPE_PRECISION (TREE_TYPE (exp)) == 1) |
| { |
| if (TYPE_UNSIGNED (TREE_TYPE (exp))) |
| is_bool = true; |
| else |
| return; |
| } |
| else if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE) |
| is_bool = true; |
| |
| while (1) |
| { |
| enum tree_code code; |
| tree arg0, arg1, exp_type; |
| tree nexp; |
| location_t loc; |
| |
| if (exp != NULL_TREE) |
| { |
| if (TREE_CODE (exp) != SSA_NAME |
| || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp)) |
| break; |
| |
| stmt = SSA_NAME_DEF_STMT (exp); |
| if (!is_gimple_assign (stmt)) |
| break; |
| |
| code = gimple_assign_rhs_code (stmt); |
| arg0 = gimple_assign_rhs1 (stmt); |
| arg1 = gimple_assign_rhs2 (stmt); |
| exp_type = TREE_TYPE (exp); |
| } |
| else |
| { |
| code = gimple_cond_code (stmt); |
| arg0 = gimple_cond_lhs (stmt); |
| arg1 = gimple_cond_rhs (stmt); |
| exp_type = boolean_type_node; |
| } |
| |
| if (TREE_CODE (arg0) != SSA_NAME |
| || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg0)) |
| break; |
| loc = gimple_location (stmt); |
| switch (code) |
| { |
| case BIT_NOT_EXPR: |
| if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE |
| /* Ensure the range is either +[-,0], +[0,0], |
| -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or |
| -[1,1]. If it is e.g. +[-,-] or -[-,-] |
| or similar expression of unconditional true or |
| false, it should not be negated. */ |
| && ((high && integer_zerop (high)) |
| || (low && integer_onep (low)))) |
| { |
| in_p = !in_p; |
| exp = arg0; |
| continue; |
| } |
| break; |
| case SSA_NAME: |
| exp = arg0; |
| continue; |
| CASE_CONVERT: |
| if (is_bool) |
| { |
| if ((TYPE_PRECISION (exp_type) == 1 |
| || TREE_CODE (exp_type) == BOOLEAN_TYPE) |
| && TYPE_PRECISION (TREE_TYPE (arg0)) > 1) |
| return; |
| } |
| else if (TYPE_PRECISION (TREE_TYPE (arg0)) == 1) |
| { |
| if (TYPE_UNSIGNED (TREE_TYPE (arg0))) |
| is_bool = true; |
| else |
| return; |
| } |
| else if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE) |
| is_bool = true; |
| goto do_default; |
| case EQ_EXPR: |
| case NE_EXPR: |
| case LT_EXPR: |
| case LE_EXPR: |
| case GE_EXPR: |
| case GT_EXPR: |
| is_bool = true; |
| /* FALLTHRU */ |
| default: |
| if (!is_bool) |
| return; |
| do_default: |
| nexp = make_range_step (loc, code, arg0, arg1, exp_type, |
| &low, &high, &in_p, |
| &strict_overflow_p); |
| if (nexp != NULL_TREE) |
| { |
| exp = nexp; |
| gcc_assert (TREE_CODE (exp) == SSA_NAME); |
| continue; |
| } |
| break; |
| } |
| break; |
| } |
| if (is_bool) |
| { |
| r->exp = exp; |
| r->in_p = in_p; |
| r->low = low; |
| r->high = high; |
| r->strict_overflow_p = strict_overflow_p; |
| } |
| } |
| |
| /* Comparison function for qsort. Sort entries |
| without SSA_NAME exp first, then with SSA_NAMEs sorted |
| by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs |
| by increasing ->low and if ->low is the same, by increasing |
| ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE |
| maximum. */ |
| |
| static int |
| range_entry_cmp (const void *a, const void *b) |
| { |
| const struct range_entry *p = (const struct range_entry *) a; |
| const struct range_entry *q = (const struct range_entry *) b; |
| |
| if (p->exp != NULL_TREE && TREE_CODE (p->exp) == SSA_NAME) |
| { |
| if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) |
| { |
| /* Group range_entries for the same SSA_NAME together. */ |
| if (SSA_NAME_VERSION (p->exp) < SSA_NAME_VERSION (q->exp)) |
| return -1; |
| else if (SSA_NAME_VERSION (p->exp) > SSA_NAME_VERSION (q->exp)) |
| return 1; |
| /* If ->low is different, NULL low goes first, then by |
| ascending low. */ |
| if (p->low != NULL_TREE) |
| { |
| if (q->low != NULL_TREE) |
| { |
| tree tem = fold_binary (LT_EXPR, boolean_type_node, |
| p->low, q->low); |
| if (tem && integer_onep (tem)) |
| return -1; |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| p->low, q->low); |
| if (tem && integer_onep (tem)) |
| return 1; |
| } |
| else |
| return 1; |
| } |
| else if (q->low != NULL_TREE) |
| return -1; |
| /* If ->high is different, NULL high goes last, before that by |
| ascending high. */ |
| if (p->high != NULL_TREE) |
| { |
| if (q->high != NULL_TREE) |
| { |
| tree tem = fold_binary (LT_EXPR, boolean_type_node, |
| p->high, q->high); |
| if (tem && integer_onep (tem)) |
| return -1; |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| p->high, q->high); |
| if (tem && integer_onep (tem)) |
| return 1; |
| } |
| else |
| return -1; |
| } |
| else if (q->high != NULL_TREE) |
| return 1; |
| /* If both ranges are the same, sort below by ascending idx. */ |
| } |
| else |
| return 1; |
| } |
| else if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) |
| return -1; |
| |
| if (p->idx < q->idx) |
| return -1; |
| else |
| { |
| gcc_checking_assert (p->idx > q->idx); |
| return 1; |
| } |
| } |
| |
| /* Helper function for update_range_test. Force EXPR into an SSA_NAME, |
| insert needed statements BEFORE or after GSI. */ |
| |
| static tree |
| force_into_ssa_name (gimple_stmt_iterator *gsi, tree expr, bool before) |
| { |
| enum gsi_iterator_update m = before ? GSI_SAME_STMT : GSI_CONTINUE_LINKING; |
| tree ret = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, before, m); |
| if (TREE_CODE (ret) != SSA_NAME) |
| { |
| gimple *g = gimple_build_assign (make_ssa_name (TREE_TYPE (ret)), ret); |
| if (before) |
| gsi_insert_before (gsi, g, GSI_SAME_STMT); |
| else |
| gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); |
| ret = gimple_assign_lhs (g); |
| } |
| return ret; |
| } |
| |
| /* Helper routine of optimize_range_test. |
| [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for |
| RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges, |
| OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE |
| is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to |
| an array of COUNT pointers to other ranges. Return |
| true if the range merge has been successful. |
| If OPCODE is ERROR_MARK, this is called from within |
| maybe_optimize_range_tests and is performing inter-bb range optimization. |
| In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in |
| oe->rank. */ |
| |
| static bool |
| update_range_test (struct range_entry *range, struct range_entry *otherrange, |
| struct range_entry **otherrangep, |
| unsigned int count, enum tree_code opcode, |
| vec<operand_entry *> *ops, tree exp, gimple_seq seq, |
| bool in_p, tree low, tree high, bool strict_overflow_p) |
| { |
| operand_entry *oe = (*ops)[range->idx]; |
| tree op = oe->op; |
| gimple *stmt = op ? SSA_NAME_DEF_STMT (op) |
| : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); |
| location_t loc = gimple_location (stmt); |
| tree optype = op ? TREE_TYPE (op) : boolean_type_node; |
| tree tem = build_range_check (loc, optype, unshare_expr (exp), |
| in_p, low, high); |
| enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; |
| gimple_stmt_iterator gsi; |
| unsigned int i, uid; |
| |
| if (tem == NULL_TREE) |
| return false; |
| |
| /* If op is default def SSA_NAME, there is no place to insert the |
| new comparison. Give up, unless we can use OP itself as the |
| range test. */ |
| if (op && SSA_NAME_IS_DEFAULT_DEF (op)) |
| { |
| if (op == range->exp |
| && ((TYPE_PRECISION (optype) == 1 && TYPE_UNSIGNED (optype)) |
| || TREE_CODE (optype) == BOOLEAN_TYPE) |
| && (op == tem |
| || (TREE_CODE (tem) == EQ_EXPR |
| && TREE_OPERAND (tem, 0) == op |
| && integer_onep (TREE_OPERAND (tem, 1)))) |
| && opcode != BIT_IOR_EXPR |
| && (opcode != ERROR_MARK || oe->rank != BIT_IOR_EXPR)) |
| { |
| stmt = NULL; |
| tem = op; |
| } |
| else |
| return false; |
| } |
| |
| if (strict_overflow_p && issue_strict_overflow_warning (wc)) |
| warning_at (loc, OPT_Wstrict_overflow, |
| "assuming signed overflow does not occur " |
| "when simplifying range test"); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| struct range_entry *r; |
| fprintf (dump_file, "Optimizing range tests "); |
| dump_range_entry (dump_file, range, false); |
| for (i = 0; i < count; i++) |
| { |
| if (otherrange) |
| r = otherrange + i; |
| else |
| r = otherrangep[i]; |
| if (r->exp |
| && r->exp != range->exp |
| && TREE_CODE (r->exp) == SSA_NAME) |
| { |
| fprintf (dump_file, " and "); |
| dump_range_entry (dump_file, r, false); |
| } |
| else |
| { |
| fprintf (dump_file, " and"); |
| dump_range_entry (dump_file, r, true); |
| } |
| } |
| fprintf (dump_file, "\n into "); |
| print_generic_expr (dump_file, tem); |
| fprintf (dump_file, "\n"); |
| } |
| |
| if (opcode == BIT_IOR_EXPR |
| || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) |
| tem = invert_truthvalue_loc (loc, tem); |
| |
| tem = fold_convert_loc (loc, optype, tem); |
| if (stmt) |
| { |
| gsi = gsi_for_stmt (stmt); |
| uid = gimple_uid (stmt); |
| } |
| else |
| { |
| gsi = gsi_none (); |
| uid = 0; |
| } |
| if (stmt == NULL) |
| gcc_checking_assert (tem == op); |
| /* In rare cases range->exp can be equal to lhs of stmt. |
| In that case we have to insert after the stmt rather then before |
| it. If stmt is a PHI, insert it at the start of the basic block. */ |
| else if (op != range->exp) |
| { |
| gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); |
| tem = force_into_ssa_name (&gsi, tem, true); |
| gsi_prev (&gsi); |
| } |
| else if (gimple_code (stmt) != GIMPLE_PHI) |
| { |
| gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); |
| tem = force_into_ssa_name (&gsi, tem, false); |
| } |
| else |
| { |
| gsi = gsi_after_labels (gimple_bb (stmt)); |
| if (!gsi_end_p (gsi)) |
| uid = gimple_uid (gsi_stmt (gsi)); |
| else |
| { |
| gsi = gsi_start_bb (gimple_bb (stmt)); |
| uid = 1; |
| while (!gsi_end_p (gsi)) |
| { |
| uid = gimple_uid (gsi_stmt (gsi)); |
| gsi_next (&gsi); |
| } |
| } |
| gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); |
| tem = force_into_ssa_name (&gsi, tem, true); |
| if (gsi_end_p (gsi)) |
| gsi = gsi_last_bb (gimple_bb (stmt)); |
| else |
| gsi_prev (&gsi); |
| } |
| for (; !gsi_end_p (gsi); gsi_prev (&gsi)) |
| if (gimple_uid (gsi_stmt (gsi))) |
| break; |
| else |
| gimple_set_uid (gsi_stmt (gsi), uid); |
| |
| oe->op = tem; |
| range->exp = exp; |
| range->low = low; |
| range->high = high; |
| range->in_p = in_p; |
| range->strict_overflow_p = false; |
| |
| for (i = 0; i < count; i++) |
| { |
| if (otherrange) |
| range = otherrange + i; |
| else |
| range = otherrangep[i]; |
| oe = (*ops)[range->idx]; |
| /* Now change all the other range test immediate uses, so that |
| those tests will be optimized away. */ |
| if (opcode == ERROR_MARK) |
| { |
| if (oe->op) |
| oe->op = build_int_cst (TREE_TYPE (oe->op), |
| oe->rank == BIT_IOR_EXPR ? 0 : 1); |
| else |
| oe->op = (oe->rank == BIT_IOR_EXPR |
| ? boolean_false_node : boolean_true_node); |
| } |
| else |
| oe->op = error_mark_node; |
| range->exp = NULL_TREE; |
| range->low = NULL_TREE; |
| range->high = NULL_TREE; |
| } |
| return true; |
| } |
| |
| /* Optimize X == CST1 || X == CST2 |
| if popcount (CST1 ^ CST2) == 1 into |
| (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)). |
| Similarly for ranges. E.g. |
| X != 2 && X != 3 && X != 10 && X != 11 |
| will be transformed by the previous optimization into |
| !((X - 2U) <= 1U || (X - 10U) <= 1U) |
| and this loop can transform that into |
| !(((X & ~8) - 2U) <= 1U). */ |
| |
| static bool |
| optimize_range_tests_xor (enum tree_code opcode, tree type, |
| tree lowi, tree lowj, tree highi, tree highj, |
| vec<operand_entry *> *ops, |
| struct range_entry *rangei, |
| struct range_entry *rangej) |
| { |
| tree lowxor, highxor, tem, exp; |
| /* Check lowi ^ lowj == highi ^ highj and |
| popcount (lowi ^ lowj) == 1. */ |
| lowxor = fold_binary (BIT_XOR_EXPR, type, lowi, lowj); |
| if (lowxor == NULL_TREE || TREE_CODE (lowxor) != INTEGER_CST) |
| return false; |
| if (!integer_pow2p (lowxor)) |
| return false; |
| highxor = fold_binary (BIT_XOR_EXPR, type, highi, highj); |
| if (!tree_int_cst_equal (lowxor, highxor)) |
| return false; |
| |
| exp = rangei->exp; |
| scalar_int_mode mode = as_a <scalar_int_mode> (TYPE_MODE (type)); |
| int prec = GET_MODE_PRECISION (mode); |
| if (TYPE_PRECISION (type) < prec |
| || (wi::to_wide (TYPE_MIN_VALUE (type)) |
| != wi::min_value (prec, TYPE_SIGN (type))) |
| || (wi::to_wide (TYPE_MAX_VALUE (type)) |
| != wi::max_value (prec, TYPE_SIGN (type)))) |
| { |
| type = build_nonstandard_integer_type (prec, TYPE_UNSIGNED (type)); |
| exp = fold_convert (type, exp); |
| lowxor = fold_convert (type, lowxor); |
| lowi = fold_convert (type, lowi); |
| highi = fold_convert (type, highi); |
| } |
| tem = fold_build1 (BIT_NOT_EXPR, type, lowxor); |
| exp = fold_build2 (BIT_AND_EXPR, type, exp, tem); |
| lowj = fold_build2 (BIT_AND_EXPR, type, lowi, tem); |
| highj = fold_build2 (BIT_AND_EXPR, type, highi, tem); |
| if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, exp, |
| NULL, rangei->in_p, lowj, highj, |
| rangei->strict_overflow_p |
| || rangej->strict_overflow_p)) |
| return true; |
| return false; |
| } |
| |
| /* Optimize X == CST1 || X == CST2 |
| if popcount (CST2 - CST1) == 1 into |
| ((X - CST1) & ~(CST2 - CST1)) == 0. |
| Similarly for ranges. E.g. |
| X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46 |
| || X == 75 || X == 45 |
| will be transformed by the previous optimization into |
| (X - 43U) <= 3U || (X - 75U) <= 3U |
| and this loop can transform that into |
| ((X - 43U) & ~(75U - 43U)) <= 3U. */ |
| static bool |
| optimize_range_tests_diff (enum tree_code opcode, tree type, |
| tree lowi, tree lowj, tree highi, tree highj, |
| vec<operand_entry *> *ops, |
| struct range_entry *rangei, |
| struct range_entry *rangej) |
| { |
| tree tem1, tem2, mask; |
| /* Check highi - lowi == highj - lowj. */ |
| tem1 = fold_binary (MINUS_EXPR, type, highi, lowi); |
| if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) |
| return false; |
| tem2 = fold_binary (MINUS_EXPR, type, highj, lowj); |
| if (!tree_int_cst_equal (tem1, tem2)) |
| return false; |
| /* Check popcount (lowj - lowi) == 1. */ |
| tem1 = fold_binary (MINUS_EXPR, type, lowj, lowi); |
| if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) |
| return false; |
| if (!integer_pow2p (tem1)) |
| return false; |
| |
| scalar_int_mode mode = as_a <scalar_int_mode> (TYPE_MODE (type)); |
| int prec = GET_MODE_PRECISION (mode); |
| if (TYPE_PRECISION (type) < prec |
| || (wi::to_wide (TYPE_MIN_VALUE (type)) |
| != wi::min_value (prec, TYPE_SIGN (type))) |
| || (wi::to_wide (TYPE_MAX_VALUE (type)) |
| != wi::max_value (prec, TYPE_SIGN (type)))) |
| type = build_nonstandard_integer_type (prec, 1); |
| else |
| type = unsigned_type_for (type); |
| tem1 = fold_convert (type, tem1); |
| tem2 = fold_convert (type, tem2); |
| lowi = fold_convert (type, lowi); |
| mask = fold_build1 (BIT_NOT_EXPR, type, tem1); |
| tem1 = fold_build2 (MINUS_EXPR, type, |
| fold_convert (type, rangei->exp), lowi); |
| tem1 = fold_build2 (BIT_AND_EXPR, type, tem1, mask); |
| lowj = build_int_cst (type, 0); |
| if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, tem1, |
| NULL, rangei->in_p, lowj, tem2, |
| rangei->strict_overflow_p |
| || rangej->strict_overflow_p)) |
| return true; |
| return false; |
| } |
| |
| /* It does some common checks for function optimize_range_tests_xor and |
| optimize_range_tests_diff. |
| If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor. |
| Else it calls optimize_range_tests_diff. */ |
| |
| static bool |
| optimize_range_tests_1 (enum tree_code opcode, int first, int length, |
| bool optimize_xor, vec<operand_entry *> *ops, |
| struct range_entry *ranges) |
| { |
| int i, j; |
| bool any_changes = false; |
| for (i = first; i < length; i++) |
| { |
| tree lowi, highi, lowj, highj, type, tem; |
| |
| if (ranges[i].exp == NULL_TREE || ranges[i].in_p) |
| continue; |
| type = TREE_TYPE (ranges[i].exp); |
| if (!INTEGRAL_TYPE_P (type)) |
| continue; |
| lowi = ranges[i].low; |
| if (lowi == NULL_TREE) |
| lowi = TYPE_MIN_VALUE (type); |
| highi = ranges[i].high; |
| if (highi == NULL_TREE) |
| continue; |
| for (j = i + 1; j < length && j < i + 64; j++) |
| { |
| bool changes; |
| if (ranges[i].exp != ranges[j].exp || ranges[j].in_p) |
| continue; |
| lowj = ranges[j].low; |
| if (lowj == NULL_TREE) |
| continue; |
| highj = ranges[j].high; |
| if (highj == NULL_TREE) |
| highj = TYPE_MAX_VALUE (type); |
| /* Check lowj > highi. */ |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| lowj, highi); |
| if (tem == NULL_TREE || !integer_onep (tem)) |
| continue; |
| if (optimize_xor) |
| changes = optimize_range_tests_xor (opcode, type, lowi, lowj, |
| highi, highj, ops, |
| ranges + i, ranges + j); |
| else |
| changes = optimize_range_tests_diff (opcode, type, lowi, lowj, |
| highi, highj, ops, |
| ranges + i, ranges + j); |
| if (changes) |
| { |
| any_changes = true; |
| break; |
| } |
| } |
| } |
| return any_changes; |
| } |
| |
| /* Helper function of optimize_range_tests_to_bit_test. Handle a single |
| range, EXP, LOW, HIGH, compute bit mask of bits to test and return |
| EXP on success, NULL otherwise. */ |
| |
| static tree |
| extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high, |
| wide_int *mask, tree *totallowp) |
| { |
| tree tem = int_const_binop (MINUS_EXPR, high, low); |
| if (tem == NULL_TREE |
| || TREE_CODE (tem) != INTEGER_CST |
| || TREE_OVERFLOW (tem) |
| || tree_int_cst_sgn (tem) == -1 |
| || compare_tree_int (tem, prec) != -1) |
|