| /* Reassociation for trees. |
| Copyright (C) 2005-2015 Free Software Foundation, Inc. |
| Contributed by Daniel Berlin <dan@dberlin.org> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "hash-table.h" |
| #include "tm.h" |
| #include "rtl.h" |
| #include "tm_p.h" |
| #include "hash-set.h" |
| #include "machmode.h" |
| #include "vec.h" |
| #include "double-int.h" |
| #include "input.h" |
| #include "alias.h" |
| #include "symtab.h" |
| #include "wide-int.h" |
| #include "inchash.h" |
| #include "tree.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "predict.h" |
| #include "hard-reg-set.h" |
| #include "function.h" |
| #include "dominance.h" |
| #include "cfg.h" |
| #include "cfganal.h" |
| #include "basic-block.h" |
| #include "gimple-pretty-print.h" |
| #include "tree-inline.h" |
| #include "hash-map.h" |
| #include "tree-ssa-alias.h" |
| #include "internal-fn.h" |
| #include "gimple-fold.h" |
| #include "tree-eh.h" |
| #include "gimple-expr.h" |
| #include "is-a.h" |
| #include "gimple.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-ssa.h" |
| #include "tree-cfg.h" |
| #include "tree-phinodes.h" |
| #include "ssa-iterators.h" |
| #include "stringpool.h" |
| #include "tree-ssanames.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-ssa-loop.h" |
| #include "hashtab.h" |
| #include "flags.h" |
| #include "statistics.h" |
| #include "real.h" |
| #include "fixed-value.h" |
| #include "insn-config.h" |
| #include "expmed.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "calls.h" |
| #include "emit-rtl.h" |
| #include "varasm.h" |
| #include "stmt.h" |
| #include "expr.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "tree-iterator.h" |
| #include "tree-pass.h" |
| #include "alloc-pool.h" |
| #include "langhooks.h" |
| #include "cfgloop.h" |
| #include "target.h" |
| #include "params.h" |
| #include "diagnostic-core.h" |
| #include "builtins.h" |
| #include "gimplify.h" |
| #include "insn-codes.h" |
| #include "optabs.h" |
| |
| /* This is a simple global reassociation pass. It is, in part, based |
| on the LLVM pass of the same name (They do some things more/less |
| than we do, in different orders, etc). |
| |
| It consists of five steps: |
| |
| 1. Breaking up subtract operations into addition + negate, where |
| it would promote the reassociation of adds. |
| |
| 2. Left linearization of the expression trees, so that (A+B)+(C+D) |
| becomes (((A+B)+C)+D), which is easier for us to rewrite later. |
| During linearization, we place the operands of the binary |
| expressions into a vector of operand_entry_t |
| |
| 3. Optimization of the operand lists, eliminating things like a + |
| -a, a & a, etc. |
| |
| 3a. Combine repeated factors with the same occurrence counts |
| into a __builtin_powi call that will later be optimized into |
| an optimal number of multiplies. |
| |
| 4. Rewrite the expression trees we linearized and optimized so |
| they are in proper rank order. |
| |
| 5. Repropagate negates, as nothing else will clean it up ATM. |
| |
| A bit of theory on #4, since nobody seems to write anything down |
| about why it makes sense to do it the way they do it: |
| |
| We could do this much nicer theoretically, but don't (for reasons |
| explained after how to do it theoretically nice :P). |
| |
| In order to promote the most redundancy elimination, you want |
| binary expressions whose operands are the same rank (or |
| preferably, the same value) exposed to the redundancy eliminator, |
| for possible elimination. |
| |
| So the way to do this if we really cared, is to build the new op |
| tree from the leaves to the roots, merging as you go, and putting the |
| new op on the end of the worklist, until you are left with one |
| thing on the worklist. |
| |
| IE if you have to rewrite the following set of operands (listed with |
| rank in parentheses), with opcode PLUS_EXPR: |
| |
| a (1), b (1), c (1), d (2), e (2) |
| |
| |
| We start with our merge worklist empty, and the ops list with all of |
| those on it. |
| |
| You want to first merge all leaves of the same rank, as much as |
| possible. |
| |
| So first build a binary op of |
| |
| mergetmp = a + b, and put "mergetmp" on the merge worklist. |
| |
| Because there is no three operand form of PLUS_EXPR, c is not going to |
| be exposed to redundancy elimination as a rank 1 operand. |
| |
| So you might as well throw it on the merge worklist (you could also |
| consider it to now be a rank two operand, and merge it with d and e, |
| but in this case, you then have evicted e from a binary op. So at |
| least in this situation, you can't win.) |
| |
| Then build a binary op of d + e |
| mergetmp2 = d + e |
| |
| and put mergetmp2 on the merge worklist. |
| |
| so merge worklist = {mergetmp, c, mergetmp2} |
| |
| Continue building binary ops of these operations until you have only |
| one operation left on the worklist. |
| |
| So we have |
| |
| build binary op |
| mergetmp3 = mergetmp + c |
| |
| worklist = {mergetmp2, mergetmp3} |
| |
| mergetmp4 = mergetmp2 + mergetmp3 |
| |
| worklist = {mergetmp4} |
| |
| because we have one operation left, we can now just set the original |
| statement equal to the result of that operation. |
| |
| This will at least expose a + b and d + e to redundancy elimination |
| as binary operations. |
| |
| For extra points, you can reuse the old statements to build the |
| mergetmps, since you shouldn't run out. |
| |
| So why don't we do this? |
| |
| Because it's expensive, and rarely will help. Most trees we are |
| reassociating have 3 or less ops. If they have 2 ops, they already |
| will be written into a nice single binary op. If you have 3 ops, a |
| single simple check suffices to tell you whether the first two are of the |
| same rank. If so, you know to order it |
| |
| mergetmp = op1 + op2 |
| newstmt = mergetmp + op3 |
| |
| instead of |
| mergetmp = op2 + op3 |
| newstmt = mergetmp + op1 |
| |
| If all three are of the same rank, you can't expose them all in a |
| single binary operator anyway, so the above is *still* the best you |
| can do. |
| |
| Thus, this is what we do. When we have three ops left, we check to see |
| what order to put them in, and call it a day. As a nod to vector sum |
| reduction, we check if any of the ops are really a phi node that is a |
| destructive update for the associating op, and keep the destructive |
| update together for vector sum reduction recognition. */ |
| |
| |
| /* Statistics */ |
| static struct |
| { |
| int linearized; |
| int constants_eliminated; |
| int ops_eliminated; |
| int rewritten; |
| int pows_encountered; |
| int pows_created; |
| } reassociate_stats; |
| |
| /* Operator, rank pair. */ |
| typedef struct operand_entry |
| { |
| unsigned int rank; |
| int id; |
| tree op; |
| unsigned int count; |
| } *operand_entry_t; |
| |
| static alloc_pool operand_entry_pool; |
| |
| /* This is used to assign a unique ID to each struct operand_entry |
| so that qsort results are identical on different hosts. */ |
| static int next_operand_entry_id; |
| |
| /* Starting rank number for a given basic block, so that we can rank |
| operations using unmovable instructions in that BB based on the bb |
| depth. */ |
| static long *bb_rank; |
| |
| /* Operand->rank hashtable. */ |
| static hash_map<tree, long> *operand_rank; |
| |
| /* Vector of SSA_NAMEs on which after reassociate_bb is done with |
| all basic blocks the CFG should be adjusted - basic blocks |
| split right after that SSA_NAME's definition statement and before |
| the only use, which must be a bit ior. */ |
| static vec<tree> reassoc_branch_fixups; |
| |
| /* Forward decls. */ |
| static long get_rank (tree); |
| static bool reassoc_stmt_dominates_stmt_p (gimple, gimple); |
| |
| /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts |
| possibly added by gsi_remove. */ |
| |
| bool |
| reassoc_remove_stmt (gimple_stmt_iterator *gsi) |
| { |
| gimple stmt = gsi_stmt (*gsi); |
| |
| if (!MAY_HAVE_DEBUG_STMTS || gimple_code (stmt) == GIMPLE_PHI) |
| return gsi_remove (gsi, true); |
| |
| gimple_stmt_iterator prev = *gsi; |
| gsi_prev (&prev); |
| unsigned uid = gimple_uid (stmt); |
| basic_block bb = gimple_bb (stmt); |
| bool ret = gsi_remove (gsi, true); |
| if (!gsi_end_p (prev)) |
| gsi_next (&prev); |
| else |
| prev = gsi_start_bb (bb); |
| gimple end_stmt = gsi_stmt (*gsi); |
| while ((stmt = gsi_stmt (prev)) != end_stmt) |
| { |
| gcc_assert (stmt && is_gimple_debug (stmt) && gimple_uid (stmt) == 0); |
| gimple_set_uid (stmt, uid); |
| gsi_next (&prev); |
| } |
| return ret; |
| } |
| |
| /* Bias amount for loop-carried phis. We want this to be larger than |
| the depth of any reassociation tree we can see, but not larger than |
| the rank difference between two blocks. */ |
| #define PHI_LOOP_BIAS (1 << 15) |
| |
| /* Rank assigned to a phi statement. If STMT is a loop-carried phi of |
| an innermost loop, and the phi has only a single use which is inside |
| the loop, then the rank is the block rank of the loop latch plus an |
| extra bias for the loop-carried dependence. This causes expressions |
| calculated into an accumulator variable to be independent for each |
| iteration of the loop. If STMT is some other phi, the rank is the |
| block rank of its containing block. */ |
| static long |
| phi_rank (gimple stmt) |
| { |
| basic_block bb = gimple_bb (stmt); |
| struct loop *father = bb->loop_father; |
| tree res; |
| unsigned i; |
| use_operand_p use; |
| gimple use_stmt; |
| |
| /* We only care about real loops (those with a latch). */ |
| if (!father->latch) |
| return bb_rank[bb->index]; |
| |
| /* Interesting phis must be in headers of innermost loops. */ |
| if (bb != father->header |
| || father->inner) |
| return bb_rank[bb->index]; |
| |
| /* Ignore virtual SSA_NAMEs. */ |
| res = gimple_phi_result (stmt); |
| if (virtual_operand_p (res)) |
| return bb_rank[bb->index]; |
| |
| /* The phi definition must have a single use, and that use must be |
| within the loop. Otherwise this isn't an accumulator pattern. */ |
| if (!single_imm_use (res, &use, &use_stmt) |
| || gimple_bb (use_stmt)->loop_father != father) |
| return bb_rank[bb->index]; |
| |
| /* Look for phi arguments from within the loop. If found, bias this phi. */ |
| for (i = 0; i < gimple_phi_num_args (stmt); i++) |
| { |
| tree arg = gimple_phi_arg_def (stmt, i); |
| if (TREE_CODE (arg) == SSA_NAME |
| && !SSA_NAME_IS_DEFAULT_DEF (arg)) |
| { |
| gimple def_stmt = SSA_NAME_DEF_STMT (arg); |
| if (gimple_bb (def_stmt)->loop_father == father) |
| return bb_rank[father->latch->index] + PHI_LOOP_BIAS; |
| } |
| } |
| |
| /* Must be an uninteresting phi. */ |
| return bb_rank[bb->index]; |
| } |
| |
| /* If EXP is an SSA_NAME defined by a PHI statement that represents a |
| loop-carried dependence of an innermost loop, return TRUE; else |
| return FALSE. */ |
| static bool |
| loop_carried_phi (tree exp) |
| { |
| gimple phi_stmt; |
| long block_rank; |
| |
| if (TREE_CODE (exp) != SSA_NAME |
| || SSA_NAME_IS_DEFAULT_DEF (exp)) |
| return false; |
| |
| phi_stmt = SSA_NAME_DEF_STMT (exp); |
| |
| if (gimple_code (SSA_NAME_DEF_STMT (exp)) != GIMPLE_PHI) |
| return false; |
| |
| /* Non-loop-carried phis have block rank. Loop-carried phis have |
| an additional bias added in. If this phi doesn't have block rank, |
| it's biased and should not be propagated. */ |
| block_rank = bb_rank[gimple_bb (phi_stmt)->index]; |
| |
| if (phi_rank (phi_stmt) != block_rank) |
| return true; |
| |
| return false; |
| } |
| |
| /* Return the maximum of RANK and the rank that should be propagated |
| from expression OP. For most operands, this is just the rank of OP. |
| For loop-carried phis, the value is zero to avoid undoing the bias |
| in favor of the phi. */ |
| static long |
| propagate_rank (long rank, tree op) |
| { |
| long op_rank; |
| |
| if (loop_carried_phi (op)) |
| return rank; |
| |
| op_rank = get_rank (op); |
| |
| return MAX (rank, op_rank); |
| } |
| |
| /* Look up the operand rank structure for expression E. */ |
| |
| static inline long |
| find_operand_rank (tree e) |
| { |
| long *slot = operand_rank->get (e); |
| return slot ? *slot : -1; |
| } |
| |
| /* Insert {E,RANK} into the operand rank hashtable. */ |
| |
| static inline void |
| insert_operand_rank (tree e, long rank) |
| { |
| gcc_assert (rank > 0); |
| gcc_assert (!operand_rank->put (e, rank)); |
| } |
| |
| /* Given an expression E, return the rank of the expression. */ |
| |
| static long |
| get_rank (tree e) |
| { |
| /* Constants have rank 0. */ |
| if (is_gimple_min_invariant (e)) |
| return 0; |
| |
| /* SSA_NAME's have the rank of the expression they are the result |
| of. |
| For globals and uninitialized values, the rank is 0. |
| For function arguments, use the pre-setup rank. |
| For PHI nodes, stores, asm statements, etc, we use the rank of |
| the BB. |
| For simple operations, the rank is the maximum rank of any of |
| its operands, or the bb_rank, whichever is less. |
| I make no claims that this is optimal, however, it gives good |
| results. */ |
| |
| /* We make an exception to the normal ranking system to break |
| dependences of accumulator variables in loops. Suppose we |
| have a simple one-block loop containing: |
| |
| x_1 = phi(x_0, x_2) |
| b = a + x_1 |
| c = b + d |
| x_2 = c + e |
| |
| As shown, each iteration of the calculation into x is fully |
| dependent upon the iteration before it. We would prefer to |
| see this in the form: |
| |
| x_1 = phi(x_0, x_2) |
| b = a + d |
| c = b + e |
| x_2 = c + x_1 |
| |
| If the loop is unrolled, the calculations of b and c from |
| different iterations can be interleaved. |
| |
| To obtain this result during reassociation, we bias the rank |
| of the phi definition x_1 upward, when it is recognized as an |
| accumulator pattern. The artificial rank causes it to be |
| added last, providing the desired independence. */ |
| |
| if (TREE_CODE (e) == SSA_NAME) |
| { |
| gimple stmt; |
| long rank; |
| int i, n; |
| tree op; |
| |
| if (SSA_NAME_IS_DEFAULT_DEF (e)) |
| return find_operand_rank (e); |
| |
| stmt = SSA_NAME_DEF_STMT (e); |
| if (gimple_code (stmt) == GIMPLE_PHI) |
| return phi_rank (stmt); |
| |
| if (!is_gimple_assign (stmt) |
| || gimple_vdef (stmt)) |
| return bb_rank[gimple_bb (stmt)->index]; |
| |
| /* If we already have a rank for this expression, use that. */ |
| rank = find_operand_rank (e); |
| if (rank != -1) |
| return rank; |
| |
| /* Otherwise, find the maximum rank for the operands. As an |
| exception, remove the bias from loop-carried phis when propagating |
| the rank so that dependent operations are not also biased. */ |
| rank = 0; |
| if (gimple_assign_single_p (stmt)) |
| { |
| tree rhs = gimple_assign_rhs1 (stmt); |
| n = TREE_OPERAND_LENGTH (rhs); |
| if (n == 0) |
| rank = propagate_rank (rank, rhs); |
| else |
| { |
| for (i = 0; i < n; i++) |
| { |
| op = TREE_OPERAND (rhs, i); |
| |
| if (op != NULL_TREE) |
| rank = propagate_rank (rank, op); |
| } |
| } |
| } |
| else |
| { |
| n = gimple_num_ops (stmt); |
| for (i = 1; i < n; i++) |
| { |
| op = gimple_op (stmt, i); |
| gcc_assert (op); |
| rank = propagate_rank (rank, op); |
| } |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Rank for "); |
| print_generic_expr (dump_file, e, 0); |
| fprintf (dump_file, " is %ld\n", (rank + 1)); |
| } |
| |
| /* Note the rank in the hashtable so we don't recompute it. */ |
| insert_operand_rank (e, (rank + 1)); |
| return (rank + 1); |
| } |
| |
| /* Globals, etc, are rank 0 */ |
| return 0; |
| } |
| |
| |
| /* We want integer ones to end up last no matter what, since they are |
| the ones we can do the most with. */ |
| #define INTEGER_CONST_TYPE 1 << 3 |
| #define FLOAT_CONST_TYPE 1 << 2 |
| #define OTHER_CONST_TYPE 1 << 1 |
| |
| /* Classify an invariant tree into integer, float, or other, so that |
| we can sort them to be near other constants of the same type. */ |
| static inline int |
| constant_type (tree t) |
| { |
| if (INTEGRAL_TYPE_P (TREE_TYPE (t))) |
| return INTEGER_CONST_TYPE; |
| else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t))) |
| return FLOAT_CONST_TYPE; |
| else |
| return OTHER_CONST_TYPE; |
| } |
| |
| /* qsort comparison function to sort operand entries PA and PB by rank |
| so that the sorted array is ordered by rank in decreasing order. */ |
| static int |
| sort_by_operand_rank (const void *pa, const void *pb) |
| { |
| const operand_entry_t oea = *(const operand_entry_t *)pa; |
| const operand_entry_t oeb = *(const operand_entry_t *)pb; |
| |
| /* It's nicer for optimize_expression if constants that are likely |
| to fold when added/multiplied//whatever are put next to each |
| other. Since all constants have rank 0, order them by type. */ |
| if (oeb->rank == 0 && oea->rank == 0) |
| { |
| if (constant_type (oeb->op) != constant_type (oea->op)) |
| return constant_type (oeb->op) - constant_type (oea->op); |
| else |
| /* To make sorting result stable, we use unique IDs to determine |
| order. */ |
| return oeb->id - oea->id; |
| } |
| |
| /* Lastly, make sure the versions that are the same go next to each |
| other. */ |
| if ((oeb->rank - oea->rank == 0) |
| && TREE_CODE (oea->op) == SSA_NAME |
| && TREE_CODE (oeb->op) == SSA_NAME) |
| { |
| /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse |
| versions of removed SSA_NAMEs, so if possible, prefer to sort |
| based on basic block and gimple_uid of the SSA_NAME_DEF_STMT. |
| See PR60418. */ |
| if (!SSA_NAME_IS_DEFAULT_DEF (oea->op) |
| && !SSA_NAME_IS_DEFAULT_DEF (oeb->op) |
| && SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op)) |
| { |
| gimple stmta = SSA_NAME_DEF_STMT (oea->op); |
| gimple stmtb = SSA_NAME_DEF_STMT (oeb->op); |
| basic_block bba = gimple_bb (stmta); |
| basic_block bbb = gimple_bb (stmtb); |
| if (bbb != bba) |
| { |
| if (bb_rank[bbb->index] != bb_rank[bba->index]) |
| return bb_rank[bbb->index] - bb_rank[bba->index]; |
| } |
| else |
| { |
| bool da = reassoc_stmt_dominates_stmt_p (stmta, stmtb); |
| bool db = reassoc_stmt_dominates_stmt_p (stmtb, stmta); |
| if (da != db) |
| return da ? 1 : -1; |
| } |
| } |
| |
| if (SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op)) |
| return SSA_NAME_VERSION (oeb->op) - SSA_NAME_VERSION (oea->op); |
| else |
| return oeb->id - oea->id; |
| } |
| |
| if (oeb->rank != oea->rank) |
| return oeb->rank - oea->rank; |
| else |
| return oeb->id - oea->id; |
| } |
| |
| /* Add an operand entry to *OPS for the tree operand OP. */ |
| |
| static void |
| add_to_ops_vec (vec<operand_entry_t> *ops, tree op) |
| { |
| operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool); |
| |
| oe->op = op; |
| oe->rank = get_rank (op); |
| oe->id = next_operand_entry_id++; |
| oe->count = 1; |
| ops->safe_push (oe); |
| } |
| |
| /* Add an operand entry to *OPS for the tree operand OP with repeat |
| count REPEAT. */ |
| |
| static void |
| add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op, |
| HOST_WIDE_INT repeat) |
| { |
| operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool); |
| |
| oe->op = op; |
| oe->rank = get_rank (op); |
| oe->id = next_operand_entry_id++; |
| oe->count = repeat; |
| ops->safe_push (oe); |
| |
| reassociate_stats.pows_encountered++; |
| } |
| |
| /* Return true if STMT is reassociable operation containing a binary |
| operation with tree code CODE, and is inside LOOP. */ |
| |
| static bool |
| is_reassociable_op (gimple stmt, enum tree_code code, struct loop *loop) |
| { |
| basic_block bb = gimple_bb (stmt); |
| |
| if (gimple_bb (stmt) == NULL) |
| return false; |
| |
| if (!flow_bb_inside_loop_p (loop, bb)) |
| return false; |
| |
| if (is_gimple_assign (stmt) |
| && gimple_assign_rhs_code (stmt) == code |
| && has_single_use (gimple_assign_lhs (stmt))) |
| { |
| tree rhs1 = gimple_assign_rhs1 (stmt); |
| tree rhs2 = gimple_assign_rhs1 (stmt); |
| if (TREE_CODE (rhs1) == SSA_NAME |
| && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)) |
| return false; |
| if (rhs2 |
| && TREE_CODE (rhs2) == SSA_NAME |
| && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2)) |
| return false; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| /* Given NAME, if NAME is defined by a unary operation OPCODE, return the |
| operand of the negate operation. Otherwise, return NULL. */ |
| |
| static tree |
| get_unary_op (tree name, enum tree_code opcode) |
| { |
| gimple stmt = SSA_NAME_DEF_STMT (name); |
| |
| if (!is_gimple_assign (stmt)) |
| return NULL_TREE; |
| |
| if (gimple_assign_rhs_code (stmt) == opcode) |
| return gimple_assign_rhs1 (stmt); |
| return NULL_TREE; |
| } |
| |
| /* If CURR and LAST are a pair of ops that OPCODE allows us to |
| eliminate through equivalences, do so, remove them from OPS, and |
| return true. Otherwise, return false. */ |
| |
| static bool |
| eliminate_duplicate_pair (enum tree_code opcode, |
| vec<operand_entry_t> *ops, |
| bool *all_done, |
| unsigned int i, |
| operand_entry_t curr, |
| operand_entry_t last) |
| { |
| |
| /* If we have two of the same op, and the opcode is & |, min, or max, |
| we can eliminate one of them. |
| If we have two of the same op, and the opcode is ^, we can |
| eliminate both of them. */ |
| |
| if (last && last->op == curr->op) |
| { |
| switch (opcode) |
| { |
| case MAX_EXPR: |
| case MIN_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_AND_EXPR: |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op, 0); |
| fprintf (dump_file, " [&|minmax] "); |
| print_generic_expr (dump_file, last->op, 0); |
| fprintf (dump_file, " -> "); |
| print_generic_stmt (dump_file, last->op, 0); |
| } |
| |
| ops->ordered_remove (i); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| |
| case BIT_XOR_EXPR: |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op, 0); |
| fprintf (dump_file, " ^ "); |
| print_generic_expr (dump_file, last->op, 0); |
| fprintf (dump_file, " -> nothing\n"); |
| } |
| |
| reassociate_stats.ops_eliminated += 2; |
| |
| if (ops->length () == 2) |
| { |
| ops->create (0); |
| add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (last->op))); |
| *all_done = true; |
| } |
| else |
| { |
| ops->ordered_remove (i-1); |
| ops->ordered_remove (i-1); |
| } |
| |
| return true; |
| |
| default: |
| break; |
| } |
| } |
| return false; |
| } |
| |
| static vec<tree> plus_negates; |
| |
| /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not |
| expression, look in OPS for a corresponding positive operation to cancel |
| it out. If we find one, remove the other from OPS, replace |
| OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise, |
| return false. */ |
| |
| static bool |
| eliminate_plus_minus_pair (enum tree_code opcode, |
| vec<operand_entry_t> *ops, |
| unsigned int currindex, |
| operand_entry_t curr) |
| { |
| tree negateop; |
| tree notop; |
| unsigned int i; |
| operand_entry_t oe; |
| |
| if (opcode != PLUS_EXPR || TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| |
| negateop = get_unary_op (curr->op, NEGATE_EXPR); |
| notop = get_unary_op (curr->op, BIT_NOT_EXPR); |
| if (negateop == NULL_TREE && notop == NULL_TREE) |
| return false; |
| |
| /* Any non-negated version will have a rank that is one less than |
| the current rank. So once we hit those ranks, if we don't find |
| one, we can stop. */ |
| |
| for (i = currindex + 1; |
| ops->iterate (i, &oe) |
| && oe->rank >= curr->rank - 1 ; |
| i++) |
| { |
| if (oe->op == negateop) |
| { |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, negateop, 0); |
| fprintf (dump_file, " + -"); |
| print_generic_expr (dump_file, oe->op, 0); |
| fprintf (dump_file, " -> 0\n"); |
| } |
| |
| ops->ordered_remove (i); |
| add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (oe->op))); |
| ops->ordered_remove (currindex); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| } |
| else if (oe->op == notop) |
| { |
| tree op_type = TREE_TYPE (oe->op); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, notop, 0); |
| fprintf (dump_file, " + ~"); |
| print_generic_expr (dump_file, oe->op, 0); |
| fprintf (dump_file, " -> -1\n"); |
| } |
| |
| ops->ordered_remove (i); |
| add_to_ops_vec (ops, build_int_cst_type (op_type, -1)); |
| ops->ordered_remove (currindex); |
| reassociate_stats.ops_eliminated ++; |
| |
| return true; |
| } |
| } |
| |
| /* CURR->OP is a negate expr in a plus expr: save it for later |
| inspection in repropagate_negates(). */ |
| if (negateop != NULL_TREE) |
| plus_negates.safe_push (curr->op); |
| |
| return false; |
| } |
| |
| /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a |
| bitwise not expression, look in OPS for a corresponding operand to |
| cancel it out. If we find one, remove the other from OPS, replace |
| OPS[CURRINDEX] with 0, and return true. Otherwise, return |
| false. */ |
| |
| static bool |
| eliminate_not_pairs (enum tree_code opcode, |
| vec<operand_entry_t> *ops, |
| unsigned int currindex, |
| operand_entry_t curr) |
| { |
| tree notop; |
| unsigned int i; |
| operand_entry_t oe; |
| |
| if ((opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) |
| || TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| |
| notop = get_unary_op (curr->op, BIT_NOT_EXPR); |
| if (notop == NULL_TREE) |
| return false; |
| |
| /* Any non-not version will have a rank that is one less than |
| the current rank. So once we hit those ranks, if we don't find |
| one, we can stop. */ |
| |
| for (i = currindex + 1; |
| ops->iterate (i, &oe) |
| && oe->rank >= curr->rank - 1; |
| i++) |
| { |
| if (oe->op == notop) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, notop, 0); |
| if (opcode == BIT_AND_EXPR) |
| fprintf (dump_file, " & ~"); |
| else if (opcode == BIT_IOR_EXPR) |
| fprintf (dump_file, " | ~"); |
| print_generic_expr (dump_file, oe->op, 0); |
| if (opcode == BIT_AND_EXPR) |
| fprintf (dump_file, " -> 0\n"); |
| else if (opcode == BIT_IOR_EXPR) |
| fprintf (dump_file, " -> -1\n"); |
| } |
| |
| if (opcode == BIT_AND_EXPR) |
| oe->op = build_zero_cst (TREE_TYPE (oe->op)); |
| else if (opcode == BIT_IOR_EXPR) |
| oe->op = build_all_ones_cst (TREE_TYPE (oe->op)); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| ops->truncate (0); |
| ops->quick_push (oe); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Use constant value that may be present in OPS to try to eliminate |
| operands. Note that this function is only really used when we've |
| eliminated ops for other reasons, or merged constants. Across |
| single statements, fold already does all of this, plus more. There |
| is little point in duplicating logic, so I've only included the |
| identities that I could ever construct testcases to trigger. */ |
| |
| static void |
| eliminate_using_constants (enum tree_code opcode, |
| vec<operand_entry_t> *ops) |
| { |
| operand_entry_t oelast = ops->last (); |
| tree type = TREE_TYPE (oelast->op); |
| |
| if (oelast->rank == 0 |
| && (INTEGRAL_TYPE_P (type) || FLOAT_TYPE_P (type))) |
| { |
| switch (opcode) |
| { |
| case BIT_AND_EXPR: |
| if (integer_zerop (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found & 0, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| |
| ops->truncate (0); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_all_onesp (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found & -1, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| } |
| } |
| break; |
| case BIT_IOR_EXPR: |
| if (integer_all_onesp (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found | -1, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| |
| ops->truncate (0); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_zerop (oelast->op)) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found | 0, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| } |
| } |
| break; |
| case MULT_EXPR: |
| if (integer_zerop (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_NANS (type) |
| && !HONOR_SIGNED_ZEROS (type) |
| && real_zerop (oelast->op))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found * 0, removing all other ops\n"); |
| |
| reassociate_stats.ops_eliminated += ops->length () - 1; |
| ops->truncate (1); |
| ops->quick_push (oelast); |
| return; |
| } |
| } |
| else if (integer_onep (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && !HONOR_SNANS (type) |
| && real_onep (oelast->op))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found * 1, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| return; |
| } |
| } |
| break; |
| case BIT_XOR_EXPR: |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| if (integer_zerop (oelast->op) |
| || (FLOAT_TYPE_P (type) |
| && (opcode == PLUS_EXPR || opcode == MINUS_EXPR) |
| && fold_real_zero_addition_p (type, oelast->op, |
| opcode == MINUS_EXPR))) |
| { |
| if (ops->length () != 1) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Found [|^+] 0, removing\n"); |
| ops->pop (); |
| reassociate_stats.ops_eliminated++; |
| return; |
| } |
| } |
| break; |
| default: |
| break; |
| } |
| } |
| } |
| |
| |
| static void linearize_expr_tree (vec<operand_entry_t> *, gimple, |
| bool, bool); |
| |
| /* Structure for tracking and counting operands. */ |
| typedef struct oecount_s { |
| int cnt; |
| int id; |
| enum tree_code oecode; |
| tree op; |
| } oecount; |
| |
| |
| /* The heap for the oecount hashtable and the sorted list of operands. */ |
| static vec<oecount> cvec; |
| |
| |
| /* Oecount hashtable helpers. */ |
| |
| struct oecount_hasher |
| { |
| typedef int value_type; |
| typedef int compare_type; |
| typedef int store_values_directly; |
| static inline hashval_t hash (const value_type &); |
| static inline bool equal (const value_type &, const compare_type &); |
| static bool is_deleted (int &v) { return v == 1; } |
| static void mark_deleted (int &e) { e = 1; } |
| static bool is_empty (int &v) { return v == 0; } |
| static void mark_empty (int &e) { e = 0; } |
| static void remove (int &) {} |
| }; |
| |
| /* Hash function for oecount. */ |
| |
| inline hashval_t |
| oecount_hasher::hash (const value_type &p) |
| { |
| const oecount *c = &cvec[p - 42]; |
| return htab_hash_pointer (c->op) ^ (hashval_t)c->oecode; |
| } |
| |
| /* Comparison function for oecount. */ |
| |
| inline bool |
| oecount_hasher::equal (const value_type &p1, const compare_type &p2) |
| { |
| const oecount *c1 = &cvec[p1 - 42]; |
| const oecount *c2 = &cvec[p2 - 42]; |
| return (c1->oecode == c2->oecode |
| && c1->op == c2->op); |
| } |
| |
| /* Comparison function for qsort sorting oecount elements by count. */ |
| |
| static int |
| oecount_cmp (const void *p1, const void *p2) |
| { |
| const oecount *c1 = (const oecount *)p1; |
| const oecount *c2 = (const oecount *)p2; |
| if (c1->cnt != c2->cnt) |
| return c1->cnt - c2->cnt; |
| else |
| /* If counts are identical, use unique IDs to stabilize qsort. */ |
| return c1->id - c2->id; |
| } |
| |
| /* Return TRUE iff STMT represents a builtin call that raises OP |
| to some exponent. */ |
| |
| static bool |
| stmt_is_power_of_op (gimple stmt, tree op) |
| { |
| tree fndecl; |
| |
| if (!is_gimple_call (stmt)) |
| return false; |
| |
| fndecl = gimple_call_fndecl (stmt); |
| |
| if (!fndecl |
| || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL) |
| return false; |
| |
| switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))) |
| { |
| CASE_FLT_FN (BUILT_IN_POW): |
| CASE_FLT_FN (BUILT_IN_POWI): |
| return (operand_equal_p (gimple_call_arg (stmt, 0), op, 0)); |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Given STMT which is a __builtin_pow* call, decrement its exponent |
| in place and return the result. Assumes that stmt_is_power_of_op |
| was previously called for STMT and returned TRUE. */ |
| |
| static HOST_WIDE_INT |
| decrement_power (gimple stmt) |
| { |
| REAL_VALUE_TYPE c, cint; |
| HOST_WIDE_INT power; |
| tree arg1; |
| |
| switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))) |
| { |
| CASE_FLT_FN (BUILT_IN_POW): |
| arg1 = gimple_call_arg (stmt, 1); |
| c = TREE_REAL_CST (arg1); |
| power = real_to_integer (&c) - 1; |
| real_from_integer (&cint, VOIDmode, power, SIGNED); |
| gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint)); |
| return power; |
| |
| CASE_FLT_FN (BUILT_IN_POWI): |
| arg1 = gimple_call_arg (stmt, 1); |
| power = TREE_INT_CST_LOW (arg1) - 1; |
| gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power)); |
| return power; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Find the single immediate use of STMT's LHS, and replace it |
| with OP. Remove STMT. If STMT's LHS is the same as *DEF, |
| replace *DEF with OP as well. */ |
| |
| static void |
| propagate_op_to_single_use (tree op, gimple stmt, tree *def) |
| { |
| tree lhs; |
| gimple use_stmt; |
| use_operand_p use; |
| gimple_stmt_iterator gsi; |
| |
| if (is_gimple_call (stmt)) |
| lhs = gimple_call_lhs (stmt); |
| else |
| lhs = gimple_assign_lhs (stmt); |
| |
| gcc_assert (has_single_use (lhs)); |
| single_imm_use (lhs, &use, &use_stmt); |
| if (lhs == *def) |
| *def = op; |
| SET_USE (use, op); |
| if (TREE_CODE (op) != SSA_NAME) |
| update_stmt (use_stmt); |
| gsi = gsi_for_stmt (stmt); |
| unlink_stmt_vdef (stmt); |
| reassoc_remove_stmt (&gsi); |
| release_defs (stmt); |
| } |
| |
| /* Walks the linear chain with result *DEF searching for an operation |
| with operand OP and code OPCODE removing that from the chain. *DEF |
| is updated if there is only one operand but no operation left. */ |
| |
| static void |
| zero_one_operation (tree *def, enum tree_code opcode, tree op) |
| { |
| gimple stmt = SSA_NAME_DEF_STMT (*def); |
| |
| do |
| { |
| tree name; |
| |
| if (opcode == MULT_EXPR |
| && stmt_is_power_of_op (stmt, op)) |
| { |
| if (decrement_power (stmt) == 1) |
| propagate_op_to_single_use (op, stmt, def); |
| return; |
| } |
| |
| name = gimple_assign_rhs1 (stmt); |
| |
| /* If this is the operation we look for and one of the operands |
| is ours simply propagate the other operand into the stmts |
| single use. */ |
| if (gimple_assign_rhs_code (stmt) == opcode |
| && (name == op |
| || gimple_assign_rhs2 (stmt) == op)) |
| { |
| if (name == op) |
| name = gimple_assign_rhs2 (stmt); |
| propagate_op_to_single_use (name, stmt, def); |
| return; |
| } |
| |
| /* We might have a multiply of two __builtin_pow* calls, and |
| the operand might be hiding in the rightmost one. */ |
| if (opcode == MULT_EXPR |
| && gimple_assign_rhs_code (stmt) == opcode |
| && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME |
| && has_single_use (gimple_assign_rhs2 (stmt))) |
| { |
| gimple stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); |
| if (stmt_is_power_of_op (stmt2, op)) |
| { |
| if (decrement_power (stmt2) == 1) |
| propagate_op_to_single_use (op, stmt2, def); |
| return; |
| } |
| } |
| |
| /* Continue walking the chain. */ |
| gcc_assert (name != op |
| && TREE_CODE (name) == SSA_NAME); |
| stmt = SSA_NAME_DEF_STMT (name); |
| } |
| while (1); |
| } |
| |
| /* Returns true if statement S1 dominates statement S2. Like |
| stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */ |
| |
| static bool |
| reassoc_stmt_dominates_stmt_p (gimple s1, gimple s2) |
| { |
| basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2); |
| |
| /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D) |
| SSA_NAME. Assume it lives at the beginning of function and |
| thus dominates everything. */ |
| if (!bb1 || s1 == s2) |
| return true; |
| |
| /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */ |
| if (!bb2) |
| return false; |
| |
| if (bb1 == bb2) |
| { |
| /* PHIs in the same basic block are assumed to be |
| executed all in parallel, if only one stmt is a PHI, |
| it dominates the other stmt in the same basic block. */ |
| if (gimple_code (s1) == GIMPLE_PHI) |
| return true; |
| |
| if (gimple_code (s2) == GIMPLE_PHI) |
| return false; |
| |
| gcc_assert (gimple_uid (s1) && gimple_uid (s2)); |
| |
| if (gimple_uid (s1) < gimple_uid (s2)) |
| return true; |
| |
| if (gimple_uid (s1) > gimple_uid (s2)) |
| return false; |
| |
| gimple_stmt_iterator gsi = gsi_for_stmt (s1); |
| unsigned int uid = gimple_uid (s1); |
| for (gsi_next (&gsi); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple s = gsi_stmt (gsi); |
| if (gimple_uid (s) != uid) |
| break; |
| if (s == s2) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| return dominated_by_p (CDI_DOMINATORS, bb2, bb1); |
| } |
| |
| /* Insert STMT after INSERT_POINT. */ |
| |
| static void |
| insert_stmt_after (gimple stmt, gimple insert_point) |
| { |
| gimple_stmt_iterator gsi; |
| basic_block bb; |
| |
| if (gimple_code (insert_point) == GIMPLE_PHI) |
| bb = gimple_bb (insert_point); |
| else if (!stmt_ends_bb_p (insert_point)) |
| { |
| gsi = gsi_for_stmt (insert_point); |
| gimple_set_uid (stmt, gimple_uid (insert_point)); |
| gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); |
| return; |
| } |
| else |
| /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME, |
| thus if it must end a basic block, it should be a call that can |
| throw, or some assignment that can throw. If it throws, the LHS |
| of it will not be initialized though, so only valid places using |
| the SSA_NAME should be dominated by the fallthru edge. */ |
| bb = find_fallthru_edge (gimple_bb (insert_point)->succs)->dest; |
| gsi = gsi_after_labels (bb); |
| if (gsi_end_p (gsi)) |
| { |
| gimple_stmt_iterator gsi2 = gsi_last_bb (bb); |
| gimple_set_uid (stmt, |
| gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); |
| } |
| else |
| gimple_set_uid (stmt, gimple_uid (gsi_stmt (gsi))); |
| gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
| } |
| |
| /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for |
| the result. Places the statement after the definition of either |
| OP1 or OP2. Returns the new statement. */ |
| |
| static gimple |
| build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode) |
| { |
| gimple op1def = NULL, op2def = NULL; |
| gimple_stmt_iterator gsi; |
| tree op; |
| gassign *sum; |
| |
| /* Create the addition statement. */ |
| op = make_ssa_name (type); |
| sum = gimple_build_assign (op, opcode, op1, op2); |
| |
| /* Find an insertion place and insert. */ |
| if (TREE_CODE (op1) == SSA_NAME) |
| op1def = SSA_NAME_DEF_STMT (op1); |
| if (TREE_CODE (op2) == SSA_NAME) |
| op2def = SSA_NAME_DEF_STMT (op2); |
| if ((!op1def || gimple_nop_p (op1def)) |
| && (!op2def || gimple_nop_p (op2def))) |
| { |
| gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
| if (gsi_end_p (gsi)) |
| { |
| gimple_stmt_iterator gsi2 |
| = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
| gimple_set_uid (sum, |
| gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); |
| } |
| else |
| gimple_set_uid (sum, gimple_uid (gsi_stmt (gsi))); |
| gsi_insert_before (&gsi, sum, GSI_NEW_STMT); |
| } |
| else |
| { |
| gimple insert_point; |
| if ((!op1def || gimple_nop_p (op1def)) |
| || (op2def && !gimple_nop_p (op2def) |
| && reassoc_stmt_dominates_stmt_p (op1def, op2def))) |
| insert_point = op2def; |
| else |
| insert_point = op1def; |
| insert_stmt_after (sum, insert_point); |
| } |
| update_stmt (sum); |
| |
| return sum; |
| } |
| |
| /* Perform un-distribution of divisions and multiplications. |
| A * X + B * X is transformed into (A + B) * X and A / X + B / X |
| to (A + B) / X for real X. |
| |
| The algorithm is organized as follows. |
| |
| - First we walk the addition chain *OPS looking for summands that |
| are defined by a multiplication or a real division. This results |
| in the candidates bitmap with relevant indices into *OPS. |
| |
| - Second we build the chains of multiplications or divisions for |
| these candidates, counting the number of occurrences of (operand, code) |
| pairs in all of the candidates chains. |
| |
| - Third we sort the (operand, code) pairs by number of occurrence and |
| process them starting with the pair with the most uses. |
| |
| * For each such pair we walk the candidates again to build a |
| second candidate bitmap noting all multiplication/division chains |
| that have at least one occurrence of (operand, code). |
| |
| * We build an alternate addition chain only covering these |
| candidates with one (operand, code) operation removed from their |
| multiplication/division chain. |
| |
| * The first candidate gets replaced by the alternate addition chain |
| multiplied/divided by the operand. |
| |
| * All candidate chains get disabled for further processing and |
| processing of (operand, code) pairs continues. |
| |
| The alternate addition chains built are re-processed by the main |
| reassociation algorithm which allows optimizing a * x * y + b * y * x |
| to (a + b ) * x * y in one invocation of the reassociation pass. */ |
| |
| static bool |
| undistribute_ops_list (enum tree_code opcode, |
| vec<operand_entry_t> *ops, struct loop *loop) |
| { |
| unsigned int length = ops->length (); |
| operand_entry_t oe1; |
| unsigned i, j; |
| sbitmap candidates, candidates2; |
| unsigned nr_candidates, nr_candidates2; |
| sbitmap_iterator sbi0; |
| vec<operand_entry_t> *subops; |
| bool changed = false; |
| int next_oecount_id = 0; |
| |
| if (length <= 1 |
| || opcode != PLUS_EXPR) |
| return false; |
| |
| /* Build a list of candidates to process. */ |
| candidates = sbitmap_alloc (length); |
| bitmap_clear (candidates); |
| nr_candidates = 0; |
| FOR_EACH_VEC_ELT (*ops, i, oe1) |
| { |
| enum tree_code dcode; |
| gimple oe1def; |
| |
| if (TREE_CODE (oe1->op) != SSA_NAME) |
| continue; |
| oe1def = SSA_NAME_DEF_STMT (oe1->op); |
| if (!is_gimple_assign (oe1def)) |
| continue; |
| dcode = gimple_assign_rhs_code (oe1def); |
| if ((dcode != MULT_EXPR |
| && dcode != RDIV_EXPR) |
| || !is_reassociable_op (oe1def, dcode, loop)) |
| continue; |
| |
| bitmap_set_bit (candidates, i); |
| nr_candidates++; |
| } |
| |
| if (nr_candidates < 2) |
| { |
| sbitmap_free (candidates); |
| return false; |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "searching for un-distribute opportunities "); |
| print_generic_expr (dump_file, |
| (*ops)[bitmap_first_set_bit (candidates)]->op, 0); |
| fprintf (dump_file, " %d\n", nr_candidates); |
| } |
| |
| /* Build linearized sub-operand lists and the counting table. */ |
| cvec.create (0); |
| |
| hash_table<oecount_hasher> ctable (15); |
| |
| /* ??? Macro arguments cannot have multi-argument template types in |
| them. This typedef is needed to workaround that limitation. */ |
| typedef vec<operand_entry_t> vec_operand_entry_t_heap; |
| subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ()); |
| EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
| { |
| gimple oedef; |
| enum tree_code oecode; |
| unsigned j; |
| |
| oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op); |
| oecode = gimple_assign_rhs_code (oedef); |
| linearize_expr_tree (&subops[i], oedef, |
| associative_tree_code (oecode), false); |
| |
| FOR_EACH_VEC_ELT (subops[i], j, oe1) |
| { |
| oecount c; |
| int *slot; |
| int idx; |
| c.oecode = oecode; |
| c.cnt = 1; |
| c.id = next_oecount_id++; |
| c.op = oe1->op; |
| cvec.safe_push (c); |
| idx = cvec.length () + 41; |
| slot = ctable.find_slot (idx, INSERT); |
| if (!*slot) |
| { |
| *slot = idx; |
| } |
| else |
| { |
| cvec.pop (); |
| cvec[*slot - 42].cnt++; |
| } |
| } |
| } |
| |
| /* Sort the counting table. */ |
| cvec.qsort (oecount_cmp); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| oecount *c; |
| fprintf (dump_file, "Candidates:\n"); |
| FOR_EACH_VEC_ELT (cvec, j, c) |
| { |
| fprintf (dump_file, " %u %s: ", c->cnt, |
| c->oecode == MULT_EXPR |
| ? "*" : c->oecode == RDIV_EXPR ? "/" : "?"); |
| print_generic_expr (dump_file, c->op, 0); |
| fprintf (dump_file, "\n"); |
| } |
| } |
| |
| /* Process the (operand, code) pairs in order of most occurrence. */ |
| candidates2 = sbitmap_alloc (length); |
| while (!cvec.is_empty ()) |
| { |
| oecount *c = &cvec.last (); |
| if (c->cnt < 2) |
| break; |
| |
| /* Now collect the operands in the outer chain that contain |
| the common operand in their inner chain. */ |
| bitmap_clear (candidates2); |
| nr_candidates2 = 0; |
| EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
| { |
| gimple oedef; |
| enum tree_code oecode; |
| unsigned j; |
| tree op = (*ops)[i]->op; |
| |
| /* If we undistributed in this chain already this may be |
| a constant. */ |
| if (TREE_CODE (op) != SSA_NAME) |
| continue; |
| |
| oedef = SSA_NAME_DEF_STMT (op); |
| oecode = gimple_assign_rhs_code (oedef); |
| if (oecode != c->oecode) |
| continue; |
| |
| FOR_EACH_VEC_ELT (subops[i], j, oe1) |
| { |
| if (oe1->op == c->op) |
| { |
| bitmap_set_bit (candidates2, i); |
| ++nr_candidates2; |
| break; |
| } |
| } |
| } |
| |
| if (nr_candidates2 >= 2) |
| { |
| operand_entry_t oe1, oe2; |
| gimple prod; |
| int first = bitmap_first_set_bit (candidates2); |
| |
| /* Build the new addition chain. */ |
| oe1 = (*ops)[first]; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Building ("); |
| print_generic_expr (dump_file, oe1->op, 0); |
| } |
| zero_one_operation (&oe1->op, c->oecode, c->op); |
| EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0) |
| { |
| gimple sum; |
| oe2 = (*ops)[i]; |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, " + "); |
| print_generic_expr (dump_file, oe2->op, 0); |
| } |
| zero_one_operation (&oe2->op, c->oecode, c->op); |
| sum = build_and_add_sum (TREE_TYPE (oe1->op), |
| oe1->op, oe2->op, opcode); |
| oe2->op = build_zero_cst (TREE_TYPE (oe2->op)); |
| oe2->rank = 0; |
| oe1->op = gimple_get_lhs (sum); |
| } |
| |
| /* Apply the multiplication/division. */ |
| prod = build_and_add_sum (TREE_TYPE (oe1->op), |
| oe1->op, c->op, c->oecode); |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, ") %s ", c->oecode == MULT_EXPR ? "*" : "/"); |
| print_generic_expr (dump_file, c->op, 0); |
| fprintf (dump_file, "\n"); |
| } |
| |
| /* Record it in the addition chain and disable further |
| undistribution with this op. */ |
| oe1->op = gimple_assign_lhs (prod); |
| oe1->rank = get_rank (oe1->op); |
| subops[first].release (); |
| |
| changed = true; |
| } |
| |
| cvec.pop (); |
| } |
| |
| for (i = 0; i < ops->length (); ++i) |
| subops[i].release (); |
| free (subops); |
| cvec.release (); |
| sbitmap_free (candidates); |
| sbitmap_free (candidates2); |
| |
| return changed; |
| } |
| |
| /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison |
| expression, examine the other OPS to see if any of them are comparisons |
| of the same values, which we may be able to combine or eliminate. |
| For example, we can rewrite (a < b) | (a == b) as (a <= b). */ |
| |
| static bool |
| eliminate_redundant_comparison (enum tree_code opcode, |
| vec<operand_entry_t> *ops, |
| unsigned int currindex, |
| operand_entry_t curr) |
| { |
| tree op1, op2; |
| enum tree_code lcode, rcode; |
| gimple def1, def2; |
| int i; |
| operand_entry_t oe; |
| |
| if (opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) |
| return false; |
| |
| /* Check that CURR is a comparison. */ |
| if (TREE_CODE (curr->op) != SSA_NAME) |
| return false; |
| def1 = SSA_NAME_DEF_STMT (curr->op); |
| if (!is_gimple_assign (def1)) |
| return false; |
| lcode = gimple_assign_rhs_code (def1); |
| if (TREE_CODE_CLASS (lcode) != tcc_comparison) |
| return false; |
| op1 = gimple_assign_rhs1 (def1); |
| op2 = gimple_assign_rhs2 (def1); |
| |
| /* Now look for a similar comparison in the remaining OPS. */ |
| for (i = currindex + 1; ops->iterate (i, &oe); i++) |
| { |
| tree t; |
| |
| if (TREE_CODE (oe->op) != SSA_NAME) |
| continue; |
| def2 = SSA_NAME_DEF_STMT (oe->op); |
| if (!is_gimple_assign (def2)) |
| continue; |
| rcode = gimple_assign_rhs_code (def2); |
| if (TREE_CODE_CLASS (rcode) != tcc_comparison) |
| continue; |
| |
| /* If we got here, we have a match. See if we can combine the |
| two comparisons. */ |
| if (opcode == BIT_IOR_EXPR) |
| t = maybe_fold_or_comparisons (lcode, op1, op2, |
| rcode, gimple_assign_rhs1 (def2), |
| gimple_assign_rhs2 (def2)); |
| else |
| t = maybe_fold_and_comparisons (lcode, op1, op2, |
| rcode, gimple_assign_rhs1 (def2), |
| gimple_assign_rhs2 (def2)); |
| if (!t) |
| continue; |
| |
| /* maybe_fold_and_comparisons and maybe_fold_or_comparisons |
| always give us a boolean_type_node value back. If the original |
| BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type, |
| we need to convert. */ |
| if (!useless_type_conversion_p (TREE_TYPE (curr->op), TREE_TYPE (t))) |
| t = fold_convert (TREE_TYPE (curr->op), t); |
| |
| if (TREE_CODE (t) != INTEGER_CST |
| && !operand_equal_p (t, curr->op, 0)) |
| { |
| enum tree_code subcode; |
| tree newop1, newop2; |
| if (!COMPARISON_CLASS_P (t)) |
| continue; |
| extract_ops_from_tree (t, &subcode, &newop1, &newop2); |
| STRIP_USELESS_TYPE_CONVERSION (newop1); |
| STRIP_USELESS_TYPE_CONVERSION (newop2); |
| if (!is_gimple_val (newop1) || !is_gimple_val (newop2)) |
| continue; |
| } |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "Equivalence: "); |
| print_generic_expr (dump_file, curr->op, 0); |
| fprintf (dump_file, " %s ", op_symbol_code (opcode)); |
| print_generic_expr (dump_file, oe->op, 0); |
| fprintf (dump_file, " -> "); |
| print_generic_expr (dump_file, t, 0); |
| fprintf (dump_file, "\n"); |
| } |
| |
| /* Now we can delete oe, as it has been subsumed by the new combined |
| expression t. */ |
| ops->ordered_remove (i); |
| reassociate_stats.ops_eliminated ++; |
| |
| /* If t is the same as curr->op, we're done. Otherwise we must |
| replace curr->op with t. Special case is if we got a constant |
| back, in which case we add it to the end instead of in place of |
| the current entry. */ |
| if (TREE_CODE (t) == INTEGER_CST) |
| { |
| ops->ordered_remove (currindex); |
| add_to_ops_vec (ops, t); |
| } |
| else if (!operand_equal_p (t, curr->op, 0)) |
| { |
| gimple sum; |
| enum tree_code subcode; |
| tree newop1; |
| tree newop2; |
| gcc_assert (COMPARISON_CLASS_P (t)); |
| extract_ops_from_tree (t, &subcode, &newop1, &newop2); |
| STRIP_USELESS_TYPE_CONVERSION (newop1); |
| STRIP_USELESS_TYPE_CONVERSION (newop2); |
| gcc_checking_assert (is_gimple_val (newop1) |
| && is_gimple_val (newop2)); |
| sum = build_and_add_sum (TREE_TYPE (t), newop1, newop2, subcode); |
| curr->op = gimple_get_lhs (sum); |
| } |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Perform various identities and other optimizations on the list of |
| operand entries, stored in OPS. The tree code for the binary |
| operation between all the operands is OPCODE. */ |
| |
| static void |
| optimize_ops_list (enum tree_code opcode, |
| vec<operand_entry_t> *ops) |
| { |
| unsigned int length = ops->length (); |
| unsigned int i; |
| operand_entry_t oe; |
| operand_entry_t oelast = NULL; |
| bool iterate = false; |
| |
| if (length == 1) |
| return; |
| |
| oelast = ops->last (); |
| |
| /* If the last two are constants, pop the constants off, merge them |
| and try the next two. */ |
| if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op)) |
| { |
| operand_entry_t oelm1 = (*ops)[length - 2]; |
| |
| if (oelm1->rank == 0 |
| && is_gimple_min_invariant (oelm1->op) |
| && useless_type_conversion_p (TREE_TYPE (oelm1->op), |
| TREE_TYPE (oelast->op))) |
| { |
| tree folded = fold_binary (opcode, TREE_TYPE (oelm1->op), |
| oelm1->op, oelast->op); |
| |
| if (folded && is_gimple_min_invariant (folded)) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| fprintf (dump_file, "Merging constants\n"); |
| |
| ops->pop (); |
| ops->pop (); |
| |
| add_to_ops_vec (ops, folded); |
| reassociate_stats.constants_eliminated++; |
| |
| optimize_ops_list (opcode, ops); |
| return; |
| } |
| } |
| } |
| |
| eliminate_using_constants (opcode, ops); |
| oelast = NULL; |
| |
| for (i = 0; ops->iterate (i, &oe);) |
| { |
| bool done = false; |
| |
| if (eliminate_not_pairs (opcode, ops, i, oe)) |
| return; |
| if (eliminate_duplicate_pair (opcode, ops, &done, i, oe, oelast) |
| || (!done && eliminate_plus_minus_pair (opcode, ops, i, oe)) |
| || (!done && eliminate_redundant_comparison (opcode, ops, i, oe))) |
| { |
| if (done) |
| return; |
| iterate = true; |
| oelast = NULL; |
| continue; |
| } |
| oelast = oe; |
| i++; |
| } |
| |
| length = ops->length (); |
| oelast = ops->last (); |
| |
| if (iterate) |
| optimize_ops_list (opcode, ops); |
| } |
| |
| /* The following functions are subroutines to optimize_range_tests and allow |
| it to try to change a logical combination of comparisons into a range |
| test. |
| |
| For example, both |
| X == 2 || X == 5 || X == 3 || X == 4 |
| and |
| X >= 2 && X <= 5 |
| are converted to |
| (unsigned) (X - 2) <= 3 |
| |
| For more information see comments above fold_test_range in fold-const.c, |
| this implementation is for GIMPLE. */ |
| |
| struct range_entry |
| { |
| tree exp; |
| tree low; |
| tree high; |
| bool in_p; |
| bool strict_overflow_p; |
| unsigned int idx, next; |
| }; |
| |
| /* This is similar to make_range in fold-const.c, but on top of |
| GIMPLE instead of trees. If EXP is non-NULL, it should be |
| an SSA_NAME and STMT argument is ignored, otherwise STMT |
| argument should be a GIMPLE_COND. */ |
| |
| static void |
| init_range_entry (struct range_entry *r, tree exp, gimple stmt) |
| { |
| int in_p; |
| tree low, high; |
| bool is_bool, strict_overflow_p; |
| |
| r->exp = NULL_TREE; |
| r->in_p = false; |
| r->strict_overflow_p = false; |
| r->low = NULL_TREE; |
| r->high = NULL_TREE; |
| if (exp != NULL_TREE |
| && (TREE_CODE (exp) != SSA_NAME || !INTEGRAL_TYPE_P (TREE_TYPE (exp)))) |
| return; |
| |
| /* Start with simply saying "EXP != 0" and then look at the code of EXP |
| and see if we can refine the range. Some of the cases below may not |
| happen, but it doesn't seem worth worrying about this. We "continue" |
| the outer loop when we've changed something; otherwise we "break" |
| the switch, which will "break" the while. */ |
| low = exp ? build_int_cst (TREE_TYPE (exp), 0) : boolean_false_node; |
| high = low; |
| in_p = 0; |
| strict_overflow_p = false; |
| is_bool = false; |
| if (exp == NULL_TREE) |
| is_bool = true; |
| else if (TYPE_PRECISION (TREE_TYPE (exp)) == 1) |
| { |
| if (TYPE_UNSIGNED (TREE_TYPE (exp))) |
| is_bool = true; |
| else |
| return; |
| } |
| else if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE) |
| is_bool = true; |
| |
| while (1) |
| { |
| enum tree_code code; |
| tree arg0, arg1, exp_type; |
| tree nexp; |
| location_t loc; |
| |
| if (exp != NULL_TREE) |
| { |
| if (TREE_CODE (exp) != SSA_NAME |
| || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp)) |
| break; |
| |
| stmt = SSA_NAME_DEF_STMT (exp); |
| if (!is_gimple_assign (stmt)) |
| break; |
| |
| code = gimple_assign_rhs_code (stmt); |
| arg0 = gimple_assign_rhs1 (stmt); |
| arg1 = gimple_assign_rhs2 (stmt); |
| exp_type = TREE_TYPE (exp); |
| } |
| else |
| { |
| code = gimple_cond_code (stmt); |
| arg0 = gimple_cond_lhs (stmt); |
| arg1 = gimple_cond_rhs (stmt); |
| exp_type = boolean_type_node; |
| } |
| |
| if (TREE_CODE (arg0) != SSA_NAME) |
| break; |
| loc = gimple_location (stmt); |
| switch (code) |
| { |
| case BIT_NOT_EXPR: |
| if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE |
| /* Ensure the range is either +[-,0], +[0,0], |
| -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or |
| -[1,1]. If it is e.g. +[-,-] or -[-,-] |
| or similar expression of unconditional true or |
| false, it should not be negated. */ |
| && ((high && integer_zerop (high)) |
| || (low && integer_onep (low)))) |
| { |
| in_p = !in_p; |
| exp = arg0; |
| continue; |
| } |
| break; |
| case SSA_NAME: |
| exp = arg0; |
| continue; |
| CASE_CONVERT: |
| if (is_bool) |
| goto do_default; |
| if (TYPE_PRECISION (TREE_TYPE (arg0)) == 1) |
| { |
| if (TYPE_UNSIGNED (TREE_TYPE (arg0))) |
| is_bool = true; |
| else |
| return; |
| } |
| else if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE) |
| is_bool = true; |
| goto do_default; |
| case EQ_EXPR: |
| case NE_EXPR: |
| case LT_EXPR: |
| case LE_EXPR: |
| case GE_EXPR: |
| case GT_EXPR: |
| is_bool = true; |
| /* FALLTHRU */ |
| default: |
| if (!is_bool) |
| return; |
| do_default: |
| nexp = make_range_step (loc, code, arg0, arg1, exp_type, |
| &low, &high, &in_p, |
| &strict_overflow_p); |
| if (nexp != NULL_TREE) |
| { |
| exp = nexp; |
| gcc_assert (TREE_CODE (exp) == SSA_NAME); |
| continue; |
| } |
| break; |
| } |
| break; |
| } |
| if (is_bool) |
| { |
| r->exp = exp; |
| r->in_p = in_p; |
| r->low = low; |
| r->high = high; |
| r->strict_overflow_p = strict_overflow_p; |
| } |
| } |
| |
| /* Comparison function for qsort. Sort entries |
| without SSA_NAME exp first, then with SSA_NAMEs sorted |
| by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs |
| by increasing ->low and if ->low is the same, by increasing |
| ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE |
| maximum. */ |
| |
| static int |
| range_entry_cmp (const void *a, const void *b) |
| { |
| const struct range_entry *p = (const struct range_entry *) a; |
| const struct range_entry *q = (const struct range_entry *) b; |
| |
| if (p->exp != NULL_TREE && TREE_CODE (p->exp) == SSA_NAME) |
| { |
| if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) |
| { |
| /* Group range_entries for the same SSA_NAME together. */ |
| if (SSA_NAME_VERSION (p->exp) < SSA_NAME_VERSION (q->exp)) |
| return -1; |
| else if (SSA_NAME_VERSION (p->exp) > SSA_NAME_VERSION (q->exp)) |
| return 1; |
| /* If ->low is different, NULL low goes first, then by |
| ascending low. */ |
| if (p->low != NULL_TREE) |
| { |
| if (q->low != NULL_TREE) |
| { |
| tree tem = fold_binary (LT_EXPR, boolean_type_node, |
| p->low, q->low); |
| if (tem && integer_onep (tem)) |
| return -1; |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| p->low, q->low); |
| if (tem && integer_onep (tem)) |
| return 1; |
| } |
| else |
| return 1; |
| } |
| else if (q->low != NULL_TREE) |
| return -1; |
| /* If ->high is different, NULL high goes last, before that by |
| ascending high. */ |
| if (p->high != NULL_TREE) |
| { |
| if (q->high != NULL_TREE) |
| { |
| tree tem = fold_binary (LT_EXPR, boolean_type_node, |
| p->high, q->high); |
| if (tem && integer_onep (tem)) |
| return -1; |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| p->high, q->high); |
| if (tem && integer_onep (tem)) |
| return 1; |
| } |
| else |
| return -1; |
| } |
| else if (q->high != NULL_TREE) |
| return 1; |
| /* If both ranges are the same, sort below by ascending idx. */ |
| } |
| else |
| return 1; |
| } |
| else if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) |
| return -1; |
| |
| if (p->idx < q->idx) |
| return -1; |
| else |
| { |
| gcc_checking_assert (p->idx > q->idx); |
| return 1; |
| } |
| } |
| |
| /* Helper routine of optimize_range_test. |
| [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for |
| RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges, |
| OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE |
| is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to |
| an array of COUNT pointers to other ranges. Return |
| true if the range merge has been successful. |
| If OPCODE is ERROR_MARK, this is called from within |
| maybe_optimize_range_tests and is performing inter-bb range optimization. |
| In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in |
| oe->rank. */ |
| |
| static bool |
| update_range_test (struct range_entry *range, struct range_entry *otherrange, |
| struct range_entry **otherrangep, |
| unsigned int count, enum tree_code opcode, |
| vec<operand_entry_t> *ops, tree exp, gimple_seq seq, |
| bool in_p, tree low, tree high, bool strict_overflow_p) |
| { |
| operand_entry_t oe = (*ops)[range->idx]; |
| tree op = oe->op; |
| gimple stmt = op ? SSA_NAME_DEF_STMT (op) : |
| last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); |
| location_t loc = gimple_location (stmt); |
| tree optype = op ? TREE_TYPE (op) : boolean_type_node; |
| tree tem = build_range_check (loc, optype, unshare_expr (exp), |
| in_p, low, high); |
| enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; |
| gimple_stmt_iterator gsi; |
| unsigned int i, uid; |
| |
| if (tem == NULL_TREE) |
| return false; |
| |
| /* If op is default def SSA_NAME, there is no place to insert the |
| new comparison. Give up, unless we can use OP itself as the |
| range test. */ |
| if (op && SSA_NAME_IS_DEFAULT_DEF (op)) |
| { |
| if (op == range->exp |
| && ((TYPE_PRECISION (optype) == 1 && TYPE_UNSIGNED (optype)) |
| || TREE_CODE (optype) == BOOLEAN_TYPE) |
| && (op == tem |
| || (TREE_CODE (tem) == EQ_EXPR |
| && TREE_OPERAND (tem, 0) == op |
| && integer_onep (TREE_OPERAND (tem, 1)))) |
| && opcode != BIT_IOR_EXPR |
| && (opcode != ERROR_MARK || oe->rank != BIT_IOR_EXPR)) |
| { |
| stmt = NULL; |
| tem = op; |
| } |
| else |
| return false; |
| } |
| |
| if (strict_overflow_p && issue_strict_overflow_warning (wc)) |
| warning_at (loc, OPT_Wstrict_overflow, |
| "assuming signed overflow does not occur " |
| "when simplifying range test"); |
| |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| struct range_entry *r; |
| fprintf (dump_file, "Optimizing range tests "); |
| print_generic_expr (dump_file, range->exp, 0); |
| fprintf (dump_file, " %c[", range->in_p ? '+' : '-'); |
| print_generic_expr (dump_file, range->low, 0); |
| fprintf (dump_file, ", "); |
| print_generic_expr (dump_file, range->high, 0); |
| fprintf (dump_file, "]"); |
| for (i = 0; i < count; i++) |
| { |
| if (otherrange) |
| r = otherrange + i; |
| else |
| r = otherrangep[i]; |
| fprintf (dump_file, " and %c[", r->in_p ? '+' : '-'); |
| print_generic_expr (dump_file, r->low, 0); |
| fprintf (dump_file, ", "); |
| print_generic_expr (dump_file, r->high, 0); |
| fprintf (dump_file, "]"); |
| } |
| fprintf (dump_file, "\n into "); |
| print_generic_expr (dump_file, tem, 0); |
| fprintf (dump_file, "\n"); |
| } |
| |
| if (opcode == BIT_IOR_EXPR |
| || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) |
| tem = invert_truthvalue_loc (loc, tem); |
| |
| tem = fold_convert_loc (loc, optype, tem); |
| if (stmt) |
| { |
| gsi = gsi_for_stmt (stmt); |
| uid = gimple_uid (stmt); |
| } |
| else |
| { |
| gsi = gsi_none (); |
| uid = 0; |
| } |
| if (stmt == NULL) |
| gcc_checking_assert (tem == op); |
| /* In rare cases range->exp can be equal to lhs of stmt. |
| In that case we have to insert after the stmt rather then before |
| it. If stmt is a PHI, insert it at the start of the basic block. */ |
| else if (op != range->exp) |
| { |
| gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); |
| tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true, |
| GSI_SAME_STMT); |
| gsi_prev (&gsi); |
| } |
| else if (gimple_code (stmt) != GIMPLE_PHI) |
| { |
| gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); |
| tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| } |
| else |
| { |
| gsi = gsi_after_labels (gimple_bb (stmt)); |
| if (!gsi_end_p (gsi)) |
| uid = gimple_uid (gsi_stmt (gsi)); |
| else |
| { |
| gsi = gsi_start_bb (gimple_bb (stmt)); |
| uid = 1; |
| while (!gsi_end_p (gsi)) |
| { |
| uid = gimple_uid (gsi_stmt (gsi)); |
| gsi_next (&gsi); |
| } |
| } |
| gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); |
| tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true, |
| GSI_SAME_STMT); |
| if (gsi_end_p (gsi)) |
| gsi = gsi_last_bb (gimple_bb (stmt)); |
| else |
| gsi_prev (&gsi); |
| } |
| for (; !gsi_end_p (gsi); gsi_prev (&gsi)) |
| if (gimple_uid (gsi_stmt (gsi))) |
| break; |
| else |
| gimple_set_uid (gsi_stmt (gsi), uid); |
| |
| oe->op = tem; |
| range->exp = exp; |
| range->low = low; |
| range->high = high; |
| range->in_p = in_p; |
| range->strict_overflow_p = false; |
| |
| for (i = 0; i < count; i++) |
| { |
| if (otherrange) |
| range = otherrange + i; |
| else |
| range = otherrangep[i]; |
| oe = (*ops)[range->idx]; |
| /* Now change all the other range test immediate uses, so that |
| those tests will be optimized away. */ |
| if (opcode == ERROR_MARK) |
| { |
| if (oe->op) |
| oe->op = build_int_cst (TREE_TYPE (oe->op), |
| oe->rank == BIT_IOR_EXPR ? 0 : 1); |
| else |
| oe->op = (oe->rank == BIT_IOR_EXPR |
| ? boolean_false_node : boolean_true_node); |
| } |
| else |
| oe->op = error_mark_node; |
| range->exp = NULL_TREE; |
| } |
| return true; |
| } |
| |
| /* Optimize X == CST1 || X == CST2 |
| if popcount (CST1 ^ CST2) == 1 into |
| (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)). |
| Similarly for ranges. E.g. |
| X != 2 && X != 3 && X != 10 && X != 11 |
| will be transformed by the previous optimization into |
| !((X - 2U) <= 1U || (X - 10U) <= 1U) |
| and this loop can transform that into |
| !(((X & ~8) - 2U) <= 1U). */ |
| |
| static bool |
| optimize_range_tests_xor (enum tree_code opcode, tree type, |
| tree lowi, tree lowj, tree highi, tree highj, |
| vec<operand_entry_t> *ops, |
| struct range_entry *rangei, |
| struct range_entry *rangej) |
| { |
| tree lowxor, highxor, tem, exp; |
| /* Check lowi ^ lowj == highi ^ highj and |
| popcount (lowi ^ lowj) == 1. */ |
| lowxor = fold_binary (BIT_XOR_EXPR, type, lowi, lowj); |
| if (lowxor == NULL_TREE || TREE_CODE (lowxor) != INTEGER_CST) |
| return false; |
| if (!integer_pow2p (lowxor)) |
| return false; |
| highxor = fold_binary (BIT_XOR_EXPR, type, highi, highj); |
| if (!tree_int_cst_equal (lowxor, highxor)) |
| return false; |
| |
| tem = fold_build1 (BIT_NOT_EXPR, type, lowxor); |
| exp = fold_build2 (BIT_AND_EXPR, type, rangei->exp, tem); |
| lowj = fold_build2 (BIT_AND_EXPR, type, lowi, tem); |
| highj = fold_build2 (BIT_AND_EXPR, type, highi, tem); |
| if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, exp, |
| NULL, rangei->in_p, lowj, highj, |
| rangei->strict_overflow_p |
| || rangej->strict_overflow_p)) |
| return true; |
| return false; |
| } |
| |
| /* Optimize X == CST1 || X == CST2 |
| if popcount (CST2 - CST1) == 1 into |
| ((X - CST1) & ~(CST2 - CST1)) == 0. |
| Similarly for ranges. E.g. |
| X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46 |
| || X == 75 || X == 45 |
| will be transformed by the previous optimization into |
| (X - 43U) <= 3U || (X - 75U) <= 3U |
| and this loop can transform that into |
| ((X - 43U) & ~(75U - 43U)) <= 3U. */ |
| static bool |
| optimize_range_tests_diff (enum tree_code opcode, tree type, |
| tree lowi, tree lowj, tree highi, tree highj, |
| vec<operand_entry_t> *ops, |
| struct range_entry *rangei, |
| struct range_entry *rangej) |
| { |
| tree tem1, tem2, mask; |
| /* Check highi - lowi == highj - lowj. */ |
| tem1 = fold_binary (MINUS_EXPR, type, highi, lowi); |
| if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) |
| return false; |
| tem2 = fold_binary (MINUS_EXPR, type, highj, lowj); |
| if (!tree_int_cst_equal (tem1, tem2)) |
| return false; |
| /* Check popcount (lowj - lowi) == 1. */ |
| tem1 = fold_binary (MINUS_EXPR, type, lowj, lowi); |
| if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) |
| return false; |
| if (!integer_pow2p (tem1)) |
| return false; |
| |
| type = unsigned_type_for (type); |
| tem1 = fold_convert (type, tem1); |
| tem2 = fold_convert (type, tem2); |
| lowi = fold_convert (type, lowi); |
| mask = fold_build1 (BIT_NOT_EXPR, type, tem1); |
| tem1 = fold_binary (MINUS_EXPR, type, |
| fold_convert (type, rangei->exp), lowi); |
| tem1 = fold_build2 (BIT_AND_EXPR, type, tem1, mask); |
| lowj = build_int_cst (type, 0); |
| if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, tem1, |
| NULL, rangei->in_p, lowj, tem2, |
| rangei->strict_overflow_p |
| || rangej->strict_overflow_p)) |
| return true; |
| return false; |
| } |
| |
| /* It does some common checks for function optimize_range_tests_xor and |
| optimize_range_tests_diff. |
| If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor. |
| Else it calls optimize_range_tests_diff. */ |
| |
| static bool |
| optimize_range_tests_1 (enum tree_code opcode, int first, int length, |
| bool optimize_xor, vec<operand_entry_t> *ops, |
| struct range_entry *ranges) |
| { |
| int i, j; |
| bool any_changes = false; |
| for (i = first; i < length; i++) |
| { |
| tree lowi, highi, lowj, highj, type, tem; |
| |
| if (ranges[i].exp == NULL_TREE || ranges[i].in_p) |
| continue; |
| type = TREE_TYPE (ranges[i].exp); |
| if (!INTEGRAL_TYPE_P (type)) |
| continue; |
| lowi = ranges[i].low; |
| if (lowi == NULL_TREE) |
| lowi = TYPE_MIN_VALUE (type); |
| highi = ranges[i].high; |
| if (highi == NULL_TREE) |
| continue; |
| for (j = i + 1; j < length && j < i + 64; j++) |
| { |
| bool changes; |
| if (ranges[i].exp != ranges[j].exp || ranges[j].in_p) |
| continue; |
| lowj = ranges[j].low; |
| if (lowj == NULL_TREE) |
| continue; |
| highj = ranges[j].high; |
| if (highj == NULL_TREE) |
| highj = TYPE_MAX_VALUE (type); |
| /* Check lowj > highi. */ |
| tem = fold_binary (GT_EXPR, boolean_type_node, |
| lowj, highi); |
| if (tem == NULL_TREE || !integer_onep (tem)) |
| continue; |
| if (optimize_xor) |
| changes = optimize_range_tests_xor (opcode, type, lowi, lowj, |
| highi, highj, ops, |
| ranges + i, ranges + j); |
| else |
| changes = optimize_range_tests_diff (opcode, type, lowi, lowj, |
| highi, highj, ops, |
| ranges + i, ranges + j); |
| if (changes) |
| { |
| any_changes = true; |
| break; |
| } |
| } |
| } |
| return any_changes; |
| } |
| |
| /* Helper function of optimize_range_tests_to_bit_test. Handle a single |
| range, EXP, LOW, HIGH, compute bit mask of bits to test and return |
| EXP on success, NULL otherwise. */ |
| |
| static tree |
| extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high, |
| wide_int *mask, tree *totallowp) |
| { |
| tree tem = int_const_binop (MINUS_EXPR, high, low); |
| if (tem == NULL_TREE |
| || TREE_CODE (tem) != INTEGER_CST |
| || TREE_OVERFLOW (tem) |
| || tree_int_cst_sgn (tem) == -1 |
| || compare_tree_int (tem, prec) != -1) |
| return NULL_TREE; |
| |
| unsigned HOST_WIDE_INT max = tree_to_uhwi (tem) + 1; |
| *mask = wi::shifted_mask (0, max, false, prec); |
| if (TREE_CODE (exp) == BIT_AND_EXPR |
| && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) |
| { |
| widest_int msk = wi::to_widest (TREE_OPERAND (exp, 1)); |
| msk = wi::zext (~msk, TYPE_PRECISION (TREE_TYPE (exp))); |
| if (wi::popcount (msk) == 1 |
| && wi::ltu_p (msk, prec - max)) |
| { |
| *mask |= wi::shifted_mask (msk.to_uhwi (), max, false, prec); |
| max += msk.to_uhwi (); |
| exp = TREE_OPERAND (exp, 0); |
| if (integer_zerop (low) |
| && TREE_CODE (exp) == PLUS_EXPR |
| && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) |
| { |
| tree ret = TREE_OPERAND (exp, 0); |
| STRIP_NOPS (ret); |
| widest_int bias |
| = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp, 1)), |
| TYPE_PRECISION (TREE_TYPE (low)))); |
| tree tbias = wide_int_to_tree (TREE_TYPE (ret), bias); |
| if (totallowp) |
| { |
| *totallowp = tbias; |
| return ret; |
| } |
| else if (!tree_int_cst_lt (totallow, tbias)) |
| return NULL_TREE; |
| bias = wi::to_widest (tbias); |
| bias -= wi::to_widest (totallow); |
| if (wi::ges_p (bias, 0) && wi::lts_p (bias, prec - max)) |
| { |
| *mask = wi::lshift (*mask, bias); |
| return ret; |
| } |
| } |
| } |
| } |
| if (totallowp) |
| return exp; |
| if (!tree_int_cst_lt (totallow, low)) |
| return exp; |
| tem = int_const_binop (MINUS_EXPR, low, totallow); |
| if (tem == NULL_TREE |
| || TREE_CODE (tem) != INTEGER_CST |
| || TREE_OVERFLOW (tem) |
| || compare_tree_int (tem, prec - max) == 1) |
| return NULL_TREE; |
| |
| *mask = wi::lshift (*mask, wi::to_widest (tem)); |
| return exp; |
| } |
| |
| /* Attempt to optimize small range tests using bit test. |
| E.g. |
| X != 43 && X != 76 && X != 44 && X != 78 && X != 49 |
| && X != 77 && X != 46 && X != 75 && X != 45 && X != 82 |
| has been by earlier optimizations optimized into: |
| ((X - 43U) & ~32U) > 3U && X != 49 && X != 82 |
| As all the 43 through 82 range is less than 64 numbers, |
| for 64-bit word targets optimize that into: |
| (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */ |
| |
| static bool |
| optimize_range_tests_to_bit_test (enum tree_code opcode, int first, int length, |
| vec<operand_entry_t> *ops, |
| struct range_entry *ranges) |
| { |
| int i, j; |
| bool any_changes = false; |
| int prec = GET_MODE_BITSIZE (word_mode); |
| auto_vec<struct range_entry *, 64> candidates; |
| |
| for (i = first; i < length - 2; i++) |
| { |
| tree lowi, highi, lowj, highj, type; |
| |
| if (ranges[i].exp == NULL_TREE || ranges[i].in_p) |
| continue; |
| type = TREE_TYPE (ranges[i].exp); |
| if (!INTEGRAL_TYPE_P (type)) |
| continue; |
| lowi = ranges[i].low; |
| if (lowi == NULL_TREE) |
| lowi = TYPE_MIN_VALUE (type); |
| highi = ranges[i].high; |
| if (highi == NULL_TREE) |
| continue; |
| wide_int mask; |
| tree exp = extract_bit_test_mask (ranges[i].exp, prec, lowi, lowi, |
| highi, &mask, &lowi); |
| if (exp == NULL_TREE) |
| continue; |
| bool strict_overflow_p = ranges[i].strict_overflow_p; |
| candidates.truncate (0); |
| int end = MIN (i + 64, length); |
| for (j = i + 1; j < end; j++) |
| { |
| tree exp2; |
| if (ranges[j].exp == NULL_TREE || ranges[j].in_p) |
| continue; |
| if (ranges[j].exp == exp) |
| ; |
| else if (TREE_CODE (ranges[j].exp) == BIT_AND_EXPR) |
| { |
| exp2 = TREE_OPERAND (ranges[j].exp, 0); |
| if (exp2 == exp) |
| ; |
| else if (TREE_CODE (exp2) == PLUS_EXPR) |
| { |
| exp2 = TREE_OPERAND (exp2, 0); |
| STRIP_NOPS (exp2); |
| if (exp2 != exp) |
| continue; |
| } |
| else |
| continue; |
| } |
| else |
| continue; |
| lowj = ranges[j].low; |
| if (lowj == NULL_TREE) |
| continue; |
| highj = ranges[j].high; |
| if (highj == NULL_TREE) |
| highj = TYPE_MAX_VALUE (type); |
| wide_int mask2; |
| exp2 = extract_bit_test_mask (ranges[j].exp, prec, lowi, lowj, |
| highj, &mask2, NULL); |
| if (exp2 != exp) |
| continue; |
| mask |= mask2; |
| strict_overflow_p |= ranges[j].strict_overflow_p; |
| candidates.safe_push (&ranges[j]); |
| } |
| |
| /* If we need otherwise 3 or more comparisons, use a bit test. */ |
| if (candidates.length () >= 2) |
| { |
| tree high = wide_int_to_tree (TREE_TYPE (lowi), |
| wi::to_widest (lowi) |
| + prec - 1 - wi::clz (mask)); |
| operand_entry_t oe = (*ops)[ranges[i].idx]; |
| tree op = oe->op; |
| gimple stmt = op ? SSA_NAME_DEF_STMT (op) |
| : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); |
| location_t loc = gimple_location (stmt); |
| tree optype = op ? TREE_TYPE (op) : boolean_type_node; |
| |
| /* See if it isn't cheaper to pretend the minimum value of the |
| range is 0, if maximum value is small enough. |
| We can avoid then subtraction of the minimum value, but the |
| mask constant could be perhaps more expensive. */ |
| if (compare_tree_int (lowi, 0) > 0 |
| && compare_tree_int (high, prec) < 0) |
| { |
| int cost_diff; |
| HOST_WIDE_INT m = tree_to_uhwi (lowi); |
| rtx reg = gen_raw_REG (word_mode, 10000); |
| bool speed_p = optimize_bb_for_speed_p (gimple_bb (stmt)); |
| cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg, |
| GEN_INT (-m)), speed_p); |
| rtx r = immed_wide_int_const (mask, word_mode); |
| cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r), |
| speed_p); |
| r = immed_wide_int_const (wi::lshift (mask, m), word_mode); |
| cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r), |
| speed_p); |
| if (cost_diff > 0) |
| { |
| mask = wi::lshift (mask, m); |
| lowi = build_zero_cst (TREE_TYPE (lowi)); |
| } |
| } |
| |
| tree tem = build_range_check (loc, optype, unshare_expr (exp), |
| false, lowi, high); |
| if (tem == NULL_TREE || is_gimple_val (tem)) |
| continue; |
| tree etype = unsigned_type_for (TREE_TYPE (exp)); |
| exp = fold_build2_loc (loc, MINUS_EXPR, etype, |
| fold_convert_loc (loc, etype, exp), |
| fold_convert_loc (loc, etype, lowi)); |
| exp = fold_convert_loc (loc, integer_type_node, exp); |
| tree word_type = lang_hooks.types.type_for_mode (word_mode, 1); |
| exp = fold_build2_loc (loc, LSHIFT_EXPR, word_type, |
| build_int_cst (word_type, 1), exp); |
| exp = fold_build2_loc (loc, BIT_AND_EXPR, word_type, exp, |
| wide_int_to_tree (word_type, mask)); |
| exp = fold_build2_loc (loc, EQ_EXPR, optype, exp, |
| build_zero_cst (word_type)); |
| if (is_gimple_val (exp)) |
| continue; |
| |
| /* The shift might have undefined behavior if TEM is true, |
| but reassociate_bb isn't prepared to have basic blocks |
| split when it is running. So, temporarily emit a code |
| with BIT_IOR_EXPR instead of &&, and fix it up in |
| branch_fixup. */ |
| gimple_seq seq; |
| tem = force_gimple_operand (tem, &seq, true, NULL_TREE); |
| gcc_assert (TREE_CODE (tem) == SSA_NAME); |
| gimple_set_visited (SSA_NAME_DEF_STMT (tem), true); |
| gimple_seq seq2; |
| exp = force_gimple_operand (exp, &seq2, true, NULL_TREE); |
| gimple_seq_add_seq_without_update (&seq, seq2); |
| gcc_assert (TREE_CODE (exp) == SSA_NAME); |
| gimple_set_visited (SSA_NAME_DEF_STMT (exp), true); |
| gimple g = gimple_build_assign (make_ssa_name (optype), |
| BIT_IOR_EXPR, tem, exp); |
| gimple_set_location (g, loc); |
| gimple_seq_add_stmt_without_update (&seq, g); |
| exp = gimple_assign_lhs (g); |
| tree val = build_zero_cst (optype); |
| if (update_range_test (&ranges[i], NULL, candidates.address (), |
| candidates.length (), opcode, ops, exp, |
| seq, false, val, val, strict_overflow_p)) |
| { |
| any_changes = true; |
| reassoc_branch_fixups.safe_push (tem); |
| } |
| else |
| gimple_seq_discard (seq); |
| } |
| } |
| return any_changes; |
| } |
| |
| /* Optimize range tests, similarly how fold_range_test optimizes |
| it on trees. The tree code for the binary |
| operation between all the operands is OPCODE. |
| If OPCODE is ERROR_MARK, optimize_range_tests is called from within |
| maybe_optimize_range_tests for inter-bb range optimization. |
| In that case if oe->op is NULL, oe->id is bb->index whose |
| GIMPLE_COND is && or ||ed into the test, and oe->rank says |
| the actual opcode. */ |
| |
| static bool |
| optimize_range_tests (enum tree_code opcode, |
| vec<operand_entry_t> *ops) |
| { |
| unsigned int length = ops->length (), i, j, first; |
| operand_entry_t oe; |
| struct range_entry *ranges; |
| bool any_changes = false; |
| |
| if (length == 1) |
| return false; |
| |
| ranges = XNEWVEC (struct range_entry, length); |
| for (i = 0; i < length; i++) |
| { |
| oe = (*ops)[i]; |
| ranges[i].idx = i; |
| init_range_entry (ranges + i, oe->op, |
| oe->op ? NULL : |
| last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id))); |
| /* For | invert it now, we will invert it again before emitting |
| the optimized expression. */ |
| if (opcode == BIT_IOR_EXPR |
| || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) |
| ranges[i].in_p = !ranges[i].in_p; |
| } |
| |
| qsort (ranges, length, sizeof (*ranges), range_entry_cmp); |
| for (i = 0; i < length; i++) |
| if (ranges[i].exp != NULL_TREE && TREE_CODE (ranges[i].exp) == SSA_NAME) |
| break; |
| |
| /* Try to merge ranges. */ |
| for (first = i; i < length; i++) |
| { |
| tree low = ranges[i].low; |
| tree high = ranges[i].high; |
| int in_p = ranges[i].in_p; |
| bool strict_overflow_p = ranges[i].strict_overflow_p; |
| int update_fail_count = 0; |
| |
| for (j = i + 1; j < length; j++) |
| { |
| if (ranges[i].exp != ranges[j].exp) |
| break; |
| if (!merge_ranges (&in_p, &low, &high, in_p, low, high, |
| ranges[j].in_p, ranges[j].low, ranges[j].high)) |
| break; |
| strict_overflow_p |= ranges[j].strict_overflow_p; |
| } |
| |
| if (j == i + 1) |
| continue; |
| |
| if (update_range_test (ranges + i, ranges + i + 1, NULL, j - i - 1, |
| opcode, ops, ranges[i].exp, NULL, in_p, |
| low, high, strict_overflow_p)) |
| { |
| i = j - 1; |
| any_changes = true; |
| } |
| /* Avoid quadratic complexity if all merge_ranges calls would succeed, |
| while update_range_test would fail. */ |
| else if (update_fail_count == 64) |
| i = j - 1; |
| else |
| ++update_fail_count; |
| } |
| |
| any_changes |= optimize_range_tests_1 (opcode, first, length, true, |
| ops, ranges); |
| |
| if (BRANCH_COST (optimize_function_for_speed_p (cfun), false) >= 2) |
| any_changes |= optimize_range_tests_1 (opcode, first, length, false, |
| ops, ranges); |
| if (lshift_cheap_p (optimize_function_for_speed_p (cfun))) |
| any_changes |= optimize_range_tests_to_bit_test (opcode, first, length, |
| ops, ranges); |
| |
| if (any_changes && opcode != ERROR_MARK) |
| { |
| j = 0; |
| FOR_EACH_VEC_ELT (*ops, i, oe) |
| { |
| if (oe->op == error_mark_node) |
| continue; |
| else if (i != j) |
| (*ops)[j] = oe; |
| j++; |
| } |
| ops->truncate (j); |
| } |
| |
| XDELETEVEC (ranges); |
| return any_changes; |
| } |
| |
| /* Return true if STMT is a cast like: |
| <bb N>: |
| ... |
| _123 = (int) _234; |
| |
| <bb M>: |
| # _345 = PHI <_123(N), 1(...), 1(...)> |
| where _234 has bool type, _123 has single use and |
| bb N has a single successor M. This is commonly used in |
| the last block of a range test. */ |
| |
| static bool |
| final_range_test_p (gimple stmt) |
| { |
| basic_block bb, rhs_bb; |
| edge e; |
| tree lhs, rhs; |
| use_operand_p use_p; |
| gimple use_stmt; |
| |
| if (!gimple_assign_cast_p (stmt)) |
| return false; |
| bb = gimple_bb (stmt); |
| if (!single_succ_p (bb)) |
| return false; |
| e = single_succ_edge (bb); |
| if (e->flags & EDGE_COMPLEX) |
| return false; |
| |
| lhs = gimple_assign_lhs (stmt); |
| rhs = gimple_assign_rhs1 (stmt); |
| if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
| || TREE_CODE (rhs) != SSA_NAME |
| || TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE) |
| return false; |
| |
| /* Test whether lhs is consumed only by a PHI in the only successor bb. */ |
| if (!single_imm_use (lhs, &use_p, &use_stmt)) |
| return false; |
| |
| if (gimple_code (use_stmt) != GIMPLE_PHI |
| || gimple_bb (use_stmt) != e->dest) |
| return false; |
| |
| /* And that the rhs is defined in the same loop. */ |
| rhs_bb = gimple_bb (SSA_NAME_DEF_STMT (rhs)); |
| if (rhs_bb == NULL |
| || !flow_bb_inside_loop_p (loop_containing_stmt (stmt), rhs_bb)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Return true if BB is suitable basic block for inter-bb range test |
| optimization. If BACKWARD is true, BB should be the only predecessor |
| of TEST_BB, and *OTHER_BB is either NULL and filled by the routine, |
| or compared with to find a common basic block to which all conditions |
| branch to if true resp. false. If BACKWARD is false, TEST_BB should |
| be the only predecessor of BB. */ |
| |
| static bool |
| suitable_cond_bb (basic_block bb, basic_block test_bb, basic_block *other_bb, |
| bool backward) |
| { |
| edge_iterator ei, ei2; |
| edge e, e2; |
| gimple stmt; |
| gphi_iterator gsi; |
| bool other_edge_seen = false; |
| bool is_cond; |
| |
| if (test_bb == bb) |
| return false; |
| /* Check last stmt first. */ |
| stmt = last_stmt (bb); |
| if (stmt == NULL |
| || (gimple_code (stmt) != GIMPLE_COND |
| && (backward || !final_range_test_p (stmt))) |
| || gimple_visited_p (stmt) |
| || stmt_could_throw_p (stmt) |
| || *other_bb == bb) |
| return false; |
| is_cond = gimple_code (stmt) == GIMPLE_COND; |
| if (is_cond) |
| { |
| /* If last stmt is GIMPLE_COND, verify that one of the succ edges |
| goes to the next bb (if BACKWARD, it is TEST_BB), and the other |
| to *OTHER_BB (if not set yet, try to find it out). */ |
| if (EDGE_COUNT (bb->succs) != 2) |
| return false; |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| { |
| if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) |
| return false; |
| if (e->dest == test_bb) |
| { |
| if (backward) |
| continue; |
| else |
| return false; |
| } |
| if (e->dest == bb) |
| return false; |
| if (*other_bb == NULL) |
| { |
| FOR_EACH_EDGE (e2, ei2, test_bb->succs) |
| if (!(e2->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) |
| return false; |
| else if (e->dest == e2->dest) |
| *other_bb = e->dest; |
| if (*other_bb == NULL) |
| return false; |
| } |
| if (e->dest == *other_bb) |
| other_edge_seen = true; |
| else if (backward) |
| return false; |
| } |
| if (*other_bb == NULL || !other_edge_seen) |
| return false; |
| } |
| else if (single_succ (bb) != *other_bb) |
| return false; |
| |
| /* Now check all PHIs of *OTHER_BB. */ |
| e = find_edge (bb, *other_bb); |
| e2 = find_edge (test_bb, *other_bb); |
| for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments |
| corresponding to BB and TEST_BB predecessor must be the same. */ |
| if (!operand_equal_p (gimple_phi_arg_def (phi, e->dest_idx), |
| gimple_phi_arg_def (phi, e2->dest_idx), 0)) |
| { |
| /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND, |
| one of the PHIs should have the lhs of the last stmt in |
| that block as PHI arg and that PHI should have 0 or 1 |
| corresponding to it in all other range test basic blocks |
| considered. */ |
| if (!is_cond) |
| { |
| if (gimple_phi_arg_def (phi, e->dest_idx) |
| == gimple_assign_lhs (stmt) |
| && (integer_zerop (gimple_phi_arg_def (phi, e2->dest_idx)) |
| || integer_onep (gimple_phi_arg_def (phi, |
| e2->dest_idx)))) |
| continue; |
| } |
| else |
| { |
| gimple test_last = last_stmt (test_bb); |
| if (gimple_code (test_last) != GIMPLE_COND |
| && gimple_phi_arg_def (phi, e2->dest_idx) |
| == gimple_assign_lhs (test_last) |
| && (integer_zerop (gimple_phi_arg_def (phi, e->dest_idx)) |
| || integer_onep (gimple_phi_arg_def (phi, e->dest_idx)))) |
| continue; |
| } |
| |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /* Return true if BB doesn't have side-effects that would disallow |
| range test optimization, all SSA_NAMEs set in the bb are consumed |
| in the bb and there are no PHIs. */ |
| |
| static bool |
| no_side_effect_bb (basic_block bb) |
| { |
| gimple_stmt_iterator gsi; |
| gimple last; |
| |
| if (!gimple_seq_empty_p (phi_nodes (bb))) |
| return false; |
| last = last_stmt (bb); |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple stmt = gsi_stmt (gsi); |
| tree lhs; |
| imm_use_iterator imm_iter; |
| use_operand_p use_p; |
| |
| if (is_gimple_debug (stmt)) |
| continue; |
| if (gimple_has_side_effects (stmt)) |
| return false; |
| if (stmt == last) |
| return true; |
| if (!is_gimple_assign (stmt)) |
| return false; |
| lhs = gimple_assign_lhs (stmt); |
| if (TREE_CODE (lhs) != SSA_NAME) |
| return false; |
| if (gimple_assign_rhs_could_trap_p (stmt)) |
| return false; |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) |
| { |
| gimple use_stmt = USE_STMT (use_p); |
| if (is_gimple_debug (use_stmt)) |
| continue; |
| if (gimple_bb (use_stmt) != bb) |
| return false; |
| } |
| } |
| return false; |
| } |
| |
| /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable, |
| return true and fill in *OPS recursively. */ |
| |
| static bool |
| get_ops (tree var, enum tree_code code, vec<operand_entry_t> *ops, |
| struct loop *loop) |
| { |
| gimple stmt = SSA_NAME_DEF_STMT (var); |
| tree rhs[2]; |
| int i; |
| |
| if (!is_reassociable_op (stmt, code, loop)) |
| return false; |
| |
| rhs[0] = gimple_assign_rhs1 (stmt); |
| rhs[1] = gimple_assign_rhs2 (stmt); |
| gimple_set_visited (stmt, true); |
| for (i = 0; i < 2; i++) |
| if (TREE_CODE (rhs[i]) == SSA_NAME |
| && !get_ops (rhs[i], code, ops, loop) |
| && has_single_use (rhs[i])) |
| { |
| operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool); |
| |
| oe->op = rhs[i]; |
| oe->rank = code; |
| oe->id = 0; |
| oe->count = 1; |
| ops->safe_push (oe); |
| } |
| return true; |
| } |
| |
| /* Find the ops that were added by get_ops starting from VAR, see if |
| they were changed during update_range_test and if yes, create new |
| stmts. */ |
| |
| static tree |
| update_ops (tree var, enum tree_code code, vec<operand_entry_t> ops, |
| unsigned int *pidx, struct loop *loop) |
| { |
| gimple stmt = SSA_NAME_DEF_STMT (var); |
| tree rhs[4]; |
| int i; |
| |
| if (!is_reassociable_op (stmt, code, loop)) |
| return NULL; |
| |
| rhs[0] = gimple_assign_rhs1 (stmt); |
| rhs[1] = gimple_assign_rhs2 (stmt); |
| rhs[2] = rhs[0]; |
| rhs[3] = rhs[1]; |
| for (i = 0; i < 2; i++) |
| if (TREE_CODE (rhs[i]) == SSA_NAME) |
| { |
| rhs[2 + i] = update_ops (rhs[i], code, ops, pidx, loop); |
| if (rhs[2 + i] == NULL_TREE) |
| { |
| if (has_single_use (rhs[i])) |
| rhs[2 + i] = ops[(*pidx)++]->op; |
| else |
| rhs[2 + i] = rhs[i]; |
| } |
| } |
| if ((rhs[2] != rhs[0] || rhs[3] != rhs[1]) |
| && (rhs[2] != rhs[1] || rhs[3] != rhs[0])) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| var = make_ssa_name (TREE_TYPE (var)); |
| gassign *g = gimple_build_assign (var, gimple_assign_rhs_code (stmt), |
| rhs[2], rhs[3]); |
| gimple_set_uid (g, gimple_uid (stmt)); |
| gimple_set_visited (g, true); |
| gsi_insert_before (&gsi, g, GSI_SAME_STMT); |
| } |
| return var; |
| } |
| |
| /* Structure to track the initial value passed to get_ops and |
| the range in the ops vector for each basic block. */ |
| |
| struct inter_bb_range_test_entry |
| { |
| tree op; |
| unsigned int first_idx, last_idx; |
| }; |
| |
| /* Inter-bb range test optimization. */ |
| |