| /* Global, SSA-based optimizations using mathematical identities. |
| Copyright (C) 2005-2017 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it |
| under the terms of the GNU General Public License as published by the |
| Free Software Foundation; either version 3, or (at your option) any |
| later version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* Currently, the only mini-pass in this file tries to CSE reciprocal |
| operations. These are common in sequences such as this one: |
| |
| modulus = sqrt(x*x + y*y + z*z); |
| x = x / modulus; |
| y = y / modulus; |
| z = z / modulus; |
| |
| that can be optimized to |
| |
| modulus = sqrt(x*x + y*y + z*z); |
| rmodulus = 1.0 / modulus; |
| x = x * rmodulus; |
| y = y * rmodulus; |
| z = z * rmodulus; |
| |
| We do this for loop invariant divisors, and with this pass whenever |
| we notice that a division has the same divisor multiple times. |
| |
| Of course, like in PRE, we don't insert a division if a dominator |
| already has one. However, this cannot be done as an extension of |
| PRE for several reasons. |
| |
| First of all, with some experiments it was found out that the |
| transformation is not always useful if there are only two divisions |
| by the same divisor. This is probably because modern processors |
| can pipeline the divisions; on older, in-order processors it should |
| still be effective to optimize two divisions by the same number. |
| We make this a param, and it shall be called N in the remainder of |
| this comment. |
| |
| Second, if trapping math is active, we have less freedom on where |
| to insert divisions: we can only do so in basic blocks that already |
| contain one. (If divisions don't trap, instead, we can insert |
| divisions elsewhere, which will be in blocks that are common dominators |
| of those that have the division). |
| |
| We really don't want to compute the reciprocal unless a division will |
| be found. To do this, we won't insert the division in a basic block |
| that has less than N divisions *post-dominating* it. |
| |
| The algorithm constructs a subset of the dominator tree, holding the |
| blocks containing the divisions and the common dominators to them, |
| and walk it twice. The first walk is in post-order, and it annotates |
| each block with the number of divisions that post-dominate it: this |
| gives information on where divisions can be inserted profitably. |
| The second walk is in pre-order, and it inserts divisions as explained |
| above, and replaces divisions by multiplications. |
| |
| In the best case, the cost of the pass is O(n_statements). In the |
| worst-case, the cost is due to creating the dominator tree subset, |
| with a cost of O(n_basic_blocks ^ 2); however this can only happen |
| for n_statements / n_basic_blocks statements. So, the amortized cost |
| of creating the dominator tree subset is O(n_basic_blocks) and the |
| worst-case cost of the pass is O(n_statements * n_basic_blocks). |
| |
| More practically, the cost will be small because there are few |
| divisions, and they tend to be in the same basic block, so insert_bb |
| is called very few times. |
| |
| If we did this using domwalk.c, an efficient implementation would have |
| to work on all the variables in a single pass, because we could not |
| work on just a subset of the dominator tree, as we do now, and the |
| cost would also be something like O(n_statements * n_basic_blocks). |
| The data structures would be more complex in order to work on all the |
| variables in a single pass. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "predict.h" |
| #include "alloc-pool.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "gimple-pretty-print.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "gimple-fold.h" |
| #include "gimple-iterator.h" |
| #include "gimplify.h" |
| #include "gimplify-me.h" |
| #include "stor-layout.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "builtins.h" |
| #include "params.h" |
| #include "internal-fn.h" |
| #include "case-cfn-macros.h" |
| #include "optabs-libfuncs.h" |
| #include "tree-eh.h" |
| #include "targhooks.h" |
| |
| /* This structure represents one basic block that either computes a |
| division, or is a common dominator for basic block that compute a |
| division. */ |
| struct occurrence { |
| /* The basic block represented by this structure. */ |
| basic_block bb; |
| |
| /* If non-NULL, the SSA_NAME holding the definition for a reciprocal |
| inserted in BB. */ |
| tree recip_def; |
| |
| /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that |
| was inserted in BB. */ |
| gimple *recip_def_stmt; |
| |
| /* Pointer to a list of "struct occurrence"s for blocks dominated |
| by BB. */ |
| struct occurrence *children; |
| |
| /* Pointer to the next "struct occurrence"s in the list of blocks |
| sharing a common dominator. */ |
| struct occurrence *next; |
| |
| /* The number of divisions that are in BB before compute_merit. The |
| number of divisions that are in BB or post-dominate it after |
| compute_merit. */ |
| int num_divisions; |
| |
| /* True if the basic block has a division, false if it is a common |
| dominator for basic blocks that do. If it is false and trapping |
| math is active, BB is not a candidate for inserting a reciprocal. */ |
| bool bb_has_division; |
| }; |
| |
| static struct |
| { |
| /* Number of 1.0/X ops inserted. */ |
| int rdivs_inserted; |
| |
| /* Number of 1.0/FUNC ops inserted. */ |
| int rfuncs_inserted; |
| } reciprocal_stats; |
| |
| static struct |
| { |
| /* Number of cexpi calls inserted. */ |
| int inserted; |
| } sincos_stats; |
| |
| static struct |
| { |
| /* Number of hand-written 16-bit nop / bswaps found. */ |
| int found_16bit; |
| |
| /* Number of hand-written 32-bit nop / bswaps found. */ |
| int found_32bit; |
| |
| /* Number of hand-written 64-bit nop / bswaps found. */ |
| int found_64bit; |
| } nop_stats, bswap_stats; |
| |
| static struct |
| { |
| /* Number of widening multiplication ops inserted. */ |
| int widen_mults_inserted; |
| |
| /* Number of integer multiply-and-accumulate ops inserted. */ |
| int maccs_inserted; |
| |
| /* Number of fp fused multiply-add ops inserted. */ |
| int fmas_inserted; |
| |
| /* Number of divmod calls inserted. */ |
| int divmod_calls_inserted; |
| } widen_mul_stats; |
| |
| /* The instance of "struct occurrence" representing the highest |
| interesting block in the dominator tree. */ |
| static struct occurrence *occ_head; |
| |
| /* Allocation pool for getting instances of "struct occurrence". */ |
| static object_allocator<occurrence> *occ_pool; |
| |
| |
| |
| /* Allocate and return a new struct occurrence for basic block BB, and |
| whose children list is headed by CHILDREN. */ |
| static struct occurrence * |
| occ_new (basic_block bb, struct occurrence *children) |
| { |
| struct occurrence *occ; |
| |
| bb->aux = occ = occ_pool->allocate (); |
| memset (occ, 0, sizeof (struct occurrence)); |
| |
| occ->bb = bb; |
| occ->children = children; |
| return occ; |
| } |
| |
| |
| /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a |
| list of "struct occurrence"s, one per basic block, having IDOM as |
| their common dominator. |
| |
| We try to insert NEW_OCC as deep as possible in the tree, and we also |
| insert any other block that is a common dominator for BB and one |
| block already in the tree. */ |
| |
| static void |
| insert_bb (struct occurrence *new_occ, basic_block idom, |
| struct occurrence **p_head) |
| { |
| struct occurrence *occ, **p_occ; |
| |
| for (p_occ = p_head; (occ = *p_occ) != NULL; ) |
| { |
| basic_block bb = new_occ->bb, occ_bb = occ->bb; |
| basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb); |
| if (dom == bb) |
| { |
| /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC |
| from its list. */ |
| *p_occ = occ->next; |
| occ->next = new_occ->children; |
| new_occ->children = occ; |
| |
| /* Try the next block (it may as well be dominated by BB). */ |
| } |
| |
| else if (dom == occ_bb) |
| { |
| /* OCC_BB dominates BB. Tail recurse to look deeper. */ |
| insert_bb (new_occ, dom, &occ->children); |
| return; |
| } |
| |
| else if (dom != idom) |
| { |
| gcc_assert (!dom->aux); |
| |
| /* There is a dominator between IDOM and BB, add it and make |
| two children out of NEW_OCC and OCC. First, remove OCC from |
| its list. */ |
| *p_occ = occ->next; |
| new_occ->next = occ; |
| occ->next = NULL; |
| |
| /* None of the previous blocks has DOM as a dominator: if we tail |
| recursed, we would reexamine them uselessly. Just switch BB with |
| DOM, and go on looking for blocks dominated by DOM. */ |
| new_occ = occ_new (dom, new_occ); |
| } |
| |
| else |
| { |
| /* Nothing special, go on with the next element. */ |
| p_occ = &occ->next; |
| } |
| } |
| |
| /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */ |
| new_occ->next = *p_head; |
| *p_head = new_occ; |
| } |
| |
| /* Register that we found a division in BB. */ |
| |
| static inline void |
| register_division_in (basic_block bb) |
| { |
| struct occurrence *occ; |
| |
| occ = (struct occurrence *) bb->aux; |
| if (!occ) |
| { |
| occ = occ_new (bb, NULL); |
| insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head); |
| } |
| |
| occ->bb_has_division = true; |
| occ->num_divisions++; |
| } |
| |
| |
| /* Compute the number of divisions that postdominate each block in OCC and |
| its children. */ |
| |
| static void |
| compute_merit (struct occurrence *occ) |
| { |
| struct occurrence *occ_child; |
| basic_block dom = occ->bb; |
| |
| for (occ_child = occ->children; occ_child; occ_child = occ_child->next) |
| { |
| basic_block bb; |
| if (occ_child->children) |
| compute_merit (occ_child); |
| |
| if (flag_exceptions) |
| bb = single_noncomplex_succ (dom); |
| else |
| bb = dom; |
| |
| if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb)) |
| occ->num_divisions += occ_child->num_divisions; |
| } |
| } |
| |
| |
| /* Return whether USE_STMT is a floating-point division by DEF. */ |
| static inline bool |
| is_division_by (gimple *use_stmt, tree def) |
| { |
| return is_gimple_assign (use_stmt) |
| && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR |
| && gimple_assign_rhs2 (use_stmt) == def |
| /* Do not recognize x / x as valid division, as we are getting |
| confused later by replacing all immediate uses x in such |
| a stmt. */ |
| && gimple_assign_rhs1 (use_stmt) != def |
| && !stmt_can_throw_internal (use_stmt); |
| } |
| |
| /* Walk the subset of the dominator tree rooted at OCC, setting the |
| RECIP_DEF field to a definition of 1.0 / DEF that can be used in |
| the given basic block. The field may be left NULL, of course, |
| if it is not possible or profitable to do the optimization. |
| |
| DEF_BSI is an iterator pointing at the statement defining DEF. |
| If RECIP_DEF is set, a dominator already has a computation that can |
| be used. */ |
| |
| static void |
| insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ, |
| tree def, tree recip_def, int threshold) |
| { |
| tree type; |
| gassign *new_stmt; |
| gimple_stmt_iterator gsi; |
| struct occurrence *occ_child; |
| |
| if (!recip_def |
| && (occ->bb_has_division || !flag_trapping_math) |
| && occ->num_divisions >= threshold) |
| { |
| /* Make a variable with the replacement and substitute it. */ |
| type = TREE_TYPE (def); |
| recip_def = create_tmp_reg (type, "reciptmp"); |
| new_stmt = gimple_build_assign (recip_def, RDIV_EXPR, |
| build_one_cst (type), def); |
| |
| if (occ->bb_has_division) |
| { |
| /* Case 1: insert before an existing division. */ |
| gsi = gsi_after_labels (occ->bb); |
| while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def)) |
| gsi_next (&gsi); |
| |
| gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); |
| } |
| else if (def_gsi && occ->bb == def_gsi->bb) |
| { |
| /* Case 2: insert right after the definition. Note that this will |
| never happen if the definition statement can throw, because in |
| that case the sole successor of the statement's basic block will |
| dominate all the uses as well. */ |
| gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT); |
| } |
| else |
| { |
| /* Case 3: insert in a basic block not containing defs/uses. */ |
| gsi = gsi_after_labels (occ->bb); |
| gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); |
| } |
| |
| reciprocal_stats.rdivs_inserted++; |
| |
| occ->recip_def_stmt = new_stmt; |
| } |
| |
| occ->recip_def = recip_def; |
| for (occ_child = occ->children; occ_child; occ_child = occ_child->next) |
| insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold); |
| } |
| |
| |
| /* Replace the division at USE_P with a multiplication by the reciprocal, if |
| possible. */ |
| |
| static inline void |
| replace_reciprocal (use_operand_p use_p) |
| { |
| gimple *use_stmt = USE_STMT (use_p); |
| basic_block bb = gimple_bb (use_stmt); |
| struct occurrence *occ = (struct occurrence *) bb->aux; |
| |
| if (optimize_bb_for_speed_p (bb) |
| && occ->recip_def && use_stmt != occ->recip_def_stmt) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt); |
| gimple_assign_set_rhs_code (use_stmt, MULT_EXPR); |
| SET_USE (use_p, occ->recip_def); |
| fold_stmt_inplace (&gsi); |
| update_stmt (use_stmt); |
| } |
| } |
| |
| |
| /* Free OCC and return one more "struct occurrence" to be freed. */ |
| |
| static struct occurrence * |
| free_bb (struct occurrence *occ) |
| { |
| struct occurrence *child, *next; |
| |
| /* First get the two pointers hanging off OCC. */ |
| next = occ->next; |
| child = occ->children; |
| occ->bb->aux = NULL; |
| occ_pool->remove (occ); |
| |
| /* Now ensure that we don't recurse unless it is necessary. */ |
| if (!child) |
| return next; |
| else |
| { |
| while (next) |
| next = free_bb (next); |
| |
| return child; |
| } |
| } |
| |
| |
| /* Look for floating-point divisions among DEF's uses, and try to |
| replace them by multiplications with the reciprocal. Add |
| as many statements computing the reciprocal as needed. |
| |
| DEF must be a GIMPLE register of a floating-point type. */ |
| |
| static void |
| execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def) |
| { |
| use_operand_p use_p; |
| imm_use_iterator use_iter; |
| struct occurrence *occ; |
| int count = 0, threshold; |
| |
| gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def)); |
| |
| FOR_EACH_IMM_USE_FAST (use_p, use_iter, def) |
| { |
| gimple *use_stmt = USE_STMT (use_p); |
| if (is_division_by (use_stmt, def)) |
| { |
| register_division_in (gimple_bb (use_stmt)); |
| count++; |
| } |
| } |
| |
| /* Do the expensive part only if we can hope to optimize something. */ |
| threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def))); |
| if (count >= threshold) |
| { |
| gimple *use_stmt; |
| for (occ = occ_head; occ; occ = occ->next) |
| { |
| compute_merit (occ); |
| insert_reciprocals (def_gsi, occ, def, NULL, threshold); |
| } |
| |
| FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def) |
| { |
| if (is_division_by (use_stmt, def)) |
| { |
| FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter) |
| replace_reciprocal (use_p); |
| } |
| } |
| } |
| |
| for (occ = occ_head; occ; ) |
| occ = free_bb (occ); |
| |
| occ_head = NULL; |
| } |
| |
| /* Return an internal function that implements the reciprocal of CALL, |
| or IFN_LAST if there is no such function that the target supports. */ |
| |
| internal_fn |
| internal_fn_reciprocal (gcall *call) |
| { |
| internal_fn ifn; |
| |
| switch (gimple_call_combined_fn (call)) |
| { |
| CASE_CFN_SQRT: |
| ifn = IFN_RSQRT; |
| break; |
| |
| default: |
| return IFN_LAST; |
| } |
| |
| tree_pair types = direct_internal_fn_types (ifn, call); |
| if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED)) |
| return IFN_LAST; |
| |
| return ifn; |
| } |
| |
| /* Go through all the floating-point SSA_NAMEs, and call |
| execute_cse_reciprocals_1 on each of them. */ |
| namespace { |
| |
| const pass_data pass_data_cse_reciprocals = |
| { |
| GIMPLE_PASS, /* type */ |
| "recip", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_NONE, /* tv_id */ |
| PROP_ssa, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| TODO_update_ssa, /* todo_flags_finish */ |
| }; |
| |
| class pass_cse_reciprocals : public gimple_opt_pass |
| { |
| public: |
| pass_cse_reciprocals (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_cse_reciprocals, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| virtual bool gate (function *) { return optimize && flag_reciprocal_math; } |
| virtual unsigned int execute (function *); |
| |
| }; // class pass_cse_reciprocals |
| |
| unsigned int |
| pass_cse_reciprocals::execute (function *fun) |
| { |
| basic_block bb; |
| tree arg; |
| |
| occ_pool = new object_allocator<occurrence> ("dominators for recip"); |
| |
| memset (&reciprocal_stats, 0, sizeof (reciprocal_stats)); |
| calculate_dominance_info (CDI_DOMINATORS); |
| calculate_dominance_info (CDI_POST_DOMINATORS); |
| |
| if (flag_checking) |
| FOR_EACH_BB_FN (bb, fun) |
| gcc_assert (!bb->aux); |
| |
| for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg)) |
| if (FLOAT_TYPE_P (TREE_TYPE (arg)) |
| && is_gimple_reg (arg)) |
| { |
| tree name = ssa_default_def (fun, arg); |
| if (name) |
| execute_cse_reciprocals_1 (NULL, name); |
| } |
| |
| FOR_EACH_BB_FN (bb, fun) |
| { |
| tree def; |
| |
| for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| def = PHI_RESULT (phi); |
| if (! virtual_operand_p (def) |
| && FLOAT_TYPE_P (TREE_TYPE (def))) |
| execute_cse_reciprocals_1 (NULL, def); |
| } |
| |
| for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (gimple_has_lhs (stmt) |
| && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL |
| && FLOAT_TYPE_P (TREE_TYPE (def)) |
| && TREE_CODE (def) == SSA_NAME) |
| execute_cse_reciprocals_1 (&gsi, def); |
| } |
| |
| if (optimize_bb_for_size_p (bb)) |
| continue; |
| |
| /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */ |
| for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi); |
| gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (is_gimple_assign (stmt) |
| && gimple_assign_rhs_code (stmt) == RDIV_EXPR) |
| { |
| tree arg1 = gimple_assign_rhs2 (stmt); |
| gimple *stmt1; |
| |
| if (TREE_CODE (arg1) != SSA_NAME) |
| continue; |
| |
| stmt1 = SSA_NAME_DEF_STMT (arg1); |
| |
| if (is_gimple_call (stmt1) |
| && gimple_call_lhs (stmt1)) |
| { |
| bool fail; |
| imm_use_iterator ui; |
| use_operand_p use_p; |
| tree fndecl = NULL_TREE; |
| |
| gcall *call = as_a <gcall *> (stmt1); |
| internal_fn ifn = internal_fn_reciprocal (call); |
| if (ifn == IFN_LAST) |
| { |
| fndecl = gimple_call_fndecl (call); |
| if (!fndecl |
| || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD) |
| continue; |
| fndecl = targetm.builtin_reciprocal (fndecl); |
| if (!fndecl) |
| continue; |
| } |
| |
| /* Check that all uses of the SSA name are divisions, |
| otherwise replacing the defining statement will do |
| the wrong thing. */ |
| fail = false; |
| FOR_EACH_IMM_USE_FAST (use_p, ui, arg1) |
| { |
| gimple *stmt2 = USE_STMT (use_p); |
| if (is_gimple_debug (stmt2)) |
| continue; |
| if (!is_gimple_assign (stmt2) |
| || gimple_assign_rhs_code (stmt2) != RDIV_EXPR |
| || gimple_assign_rhs1 (stmt2) == arg1 |
| || gimple_assign_rhs2 (stmt2) != arg1) |
| { |
| fail = true; |
| break; |
| } |
| } |
| if (fail) |
| continue; |
| |
| gimple_replace_ssa_lhs (call, arg1); |
| if (gimple_call_internal_p (call) != (ifn != IFN_LAST)) |
| { |
| auto_vec<tree, 4> args; |
| for (unsigned int i = 0; |
| i < gimple_call_num_args (call); i++) |
| args.safe_push (gimple_call_arg (call, i)); |
| gcall *stmt2; |
| if (ifn == IFN_LAST) |
| stmt2 = gimple_build_call_vec (fndecl, args); |
| else |
| stmt2 = gimple_build_call_internal_vec (ifn, args); |
| gimple_call_set_lhs (stmt2, arg1); |
| if (gimple_vdef (call)) |
| { |
| gimple_set_vdef (stmt2, gimple_vdef (call)); |
| SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2; |
| } |
| gimple_set_vuse (stmt2, gimple_vuse (call)); |
| gimple_stmt_iterator gsi2 = gsi_for_stmt (call); |
| gsi_replace (&gsi2, stmt2, true); |
| } |
| else |
| { |
| if (ifn == IFN_LAST) |
| gimple_call_set_fndecl (call, fndecl); |
| else |
| gimple_call_set_internal_fn (call, ifn); |
| update_stmt (call); |
| } |
| reciprocal_stats.rfuncs_inserted++; |
| |
| FOR_EACH_IMM_USE_STMT (stmt, ui, arg1) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gimple_assign_set_rhs_code (stmt, MULT_EXPR); |
| fold_stmt_inplace (&gsi); |
| update_stmt (stmt); |
| } |
| } |
| } |
| } |
| } |
| |
| statistics_counter_event (fun, "reciprocal divs inserted", |
| reciprocal_stats.rdivs_inserted); |
| statistics_counter_event (fun, "reciprocal functions inserted", |
| reciprocal_stats.rfuncs_inserted); |
| |
| free_dominance_info (CDI_DOMINATORS); |
| free_dominance_info (CDI_POST_DOMINATORS); |
| delete occ_pool; |
| return 0; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_cse_reciprocals (gcc::context *ctxt) |
| { |
| return new pass_cse_reciprocals (ctxt); |
| } |
| |
| /* Records an occurrence at statement USE_STMT in the vector of trees |
| STMTS if it is dominated by *TOP_BB or dominates it or this basic block |
| is not yet initialized. Returns true if the occurrence was pushed on |
| the vector. Adjusts *TOP_BB to be the basic block dominating all |
| statements in the vector. */ |
| |
| static bool |
| maybe_record_sincos (vec<gimple *> *stmts, |
| basic_block *top_bb, gimple *use_stmt) |
| { |
| basic_block use_bb = gimple_bb (use_stmt); |
| if (*top_bb |
| && (*top_bb == use_bb |
| || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb))) |
| stmts->safe_push (use_stmt); |
| else if (!*top_bb |
| || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb)) |
| { |
| stmts->safe_push (use_stmt); |
| *top_bb = use_bb; |
| } |
| else |
| return false; |
| |
| return true; |
| } |
| |
| /* Look for sin, cos and cexpi calls with the same argument NAME and |
| create a single call to cexpi CSEing the result in this case. |
| We first walk over all immediate uses of the argument collecting |
| statements that we can CSE in a vector and in a second pass replace |
| the statement rhs with a REALPART or IMAGPART expression on the |
| result of the cexpi call we insert before the use statement that |
| dominates all other candidates. */ |
| |
| static bool |
| execute_cse_sincos_1 (tree name) |
| { |
| gimple_stmt_iterator gsi; |
| imm_use_iterator use_iter; |
| tree fndecl, res, type; |
| gimple *def_stmt, *use_stmt, *stmt; |
| int seen_cos = 0, seen_sin = 0, seen_cexpi = 0; |
| auto_vec<gimple *> stmts; |
| basic_block top_bb = NULL; |
| int i; |
| bool cfg_changed = false; |
| |
| type = TREE_TYPE (name); |
| FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name) |
| { |
| if (gimple_code (use_stmt) != GIMPLE_CALL |
| || !gimple_call_lhs (use_stmt)) |
| continue; |
| |
| switch (gimple_call_combined_fn (use_stmt)) |
| { |
| CASE_CFN_COS: |
| seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0; |
| break; |
| |
| CASE_CFN_SIN: |
| seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0; |
| break; |
| |
| CASE_CFN_CEXPI: |
| seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0; |
| break; |
| |
| default:; |
| } |
| } |
| |
| if (seen_cos + seen_sin + seen_cexpi <= 1) |
| return false; |
| |
| /* Simply insert cexpi at the beginning of top_bb but not earlier than |
| the name def statement. */ |
| fndecl = mathfn_built_in (type, BUILT_IN_CEXPI); |
| if (!fndecl) |
| return false; |
| stmt = gimple_build_call (fndecl, 1, name); |
| res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp"); |
| gimple_call_set_lhs (stmt, res); |
| |
| def_stmt = SSA_NAME_DEF_STMT (name); |
| if (!SSA_NAME_IS_DEFAULT_DEF (name) |
| && gimple_code (def_stmt) != GIMPLE_PHI |
| && gimple_bb (def_stmt) == top_bb) |
| { |
| gsi = gsi_for_stmt (def_stmt); |
| gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); |
| } |
| else |
| { |
| gsi = gsi_after_labels (top_bb); |
| gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
| } |
| sincos_stats.inserted++; |
| |
| /* And adjust the recorded old call sites. */ |
| for (i = 0; stmts.iterate (i, &use_stmt); ++i) |
| { |
| tree rhs = NULL; |
| |
| switch (gimple_call_combined_fn (use_stmt)) |
| { |
| CASE_CFN_COS: |
| rhs = fold_build1 (REALPART_EXPR, type, res); |
| break; |
| |
| CASE_CFN_SIN: |
| rhs = fold_build1 (IMAGPART_EXPR, type, res); |
| break; |
| |
| CASE_CFN_CEXPI: |
| rhs = res; |
| break; |
| |
| default:; |
| gcc_unreachable (); |
| } |
| |
| /* Replace call with a copy. */ |
| stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs); |
| |
| gsi = gsi_for_stmt (use_stmt); |
| gsi_replace (&gsi, stmt, true); |
| if (gimple_purge_dead_eh_edges (gimple_bb (stmt))) |
| cfg_changed = true; |
| } |
| |
| return cfg_changed; |
| } |
| |
| /* To evaluate powi(x,n), the floating point value x raised to the |
| constant integer exponent n, we use a hybrid algorithm that |
| combines the "window method" with look-up tables. For an |
| introduction to exponentiation algorithms and "addition chains", |
| see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth, |
| "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming", |
| 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation |
| Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */ |
| |
| /* Provide a default value for POWI_MAX_MULTS, the maximum number of |
| multiplications to inline before calling the system library's pow |
| function. powi(x,n) requires at worst 2*bits(n)-2 multiplications, |
| so this default never requires calling pow, powf or powl. */ |
| |
| #ifndef POWI_MAX_MULTS |
| #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2) |
| #endif |
| |
| /* The size of the "optimal power tree" lookup table. All |
| exponents less than this value are simply looked up in the |
| powi_table below. This threshold is also used to size the |
| cache of pseudo registers that hold intermediate results. */ |
| #define POWI_TABLE_SIZE 256 |
| |
| /* The size, in bits of the window, used in the "window method" |
| exponentiation algorithm. This is equivalent to a radix of |
| (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */ |
| #define POWI_WINDOW_SIZE 3 |
| |
| /* The following table is an efficient representation of an |
| "optimal power tree". For each value, i, the corresponding |
| value, j, in the table states than an optimal evaluation |
| sequence for calculating pow(x,i) can be found by evaluating |
| pow(x,j)*pow(x,i-j). An optimal power tree for the first |
| 100 integers is given in Knuth's "Seminumerical algorithms". */ |
| |
| static const unsigned char powi_table[POWI_TABLE_SIZE] = |
| { |
| 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */ |
| 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */ |
| 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */ |
| 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */ |
| 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */ |
| 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */ |
| 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */ |
| 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */ |
| 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */ |
| 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */ |
| 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */ |
| 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */ |
| 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */ |
| 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */ |
| 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */ |
| 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */ |
| 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */ |
| 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */ |
| 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */ |
| 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */ |
| 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */ |
| 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */ |
| 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */ |
| 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */ |
| 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */ |
| 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */ |
| 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */ |
| 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */ |
| 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */ |
| 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */ |
| 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */ |
| 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */ |
| }; |
| |
| |
| /* Return the number of multiplications required to calculate |
| powi(x,n) where n is less than POWI_TABLE_SIZE. This is a |
| subroutine of powi_cost. CACHE is an array indicating |
| which exponents have already been calculated. */ |
| |
| static int |
| powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache) |
| { |
| /* If we've already calculated this exponent, then this evaluation |
| doesn't require any additional multiplications. */ |
| if (cache[n]) |
| return 0; |
| |
| cache[n] = true; |
| return powi_lookup_cost (n - powi_table[n], cache) |
| + powi_lookup_cost (powi_table[n], cache) + 1; |
| } |
| |
| /* Return the number of multiplications required to calculate |
| powi(x,n) for an arbitrary x, given the exponent N. This |
| function needs to be kept in sync with powi_as_mults below. */ |
| |
| static int |
| powi_cost (HOST_WIDE_INT n) |
| { |
| bool cache[POWI_TABLE_SIZE]; |
| unsigned HOST_WIDE_INT digit; |
| unsigned HOST_WIDE_INT val; |
| int result; |
| |
| if (n == 0) |
| return 0; |
| |
| /* Ignore the reciprocal when calculating the cost. */ |
| val = (n < 0) ? -n : n; |
| |
| /* Initialize the exponent cache. */ |
| memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool)); |
| cache[1] = true; |
| |
| result = 0; |
| |
| while (val >= POWI_TABLE_SIZE) |
| { |
| if (val & 1) |
| { |
| digit = val & ((1 << POWI_WINDOW_SIZE) - 1); |
| result += powi_lookup_cost (digit, cache) |
| + POWI_WINDOW_SIZE + 1; |
| val >>= POWI_WINDOW_SIZE; |
| } |
| else |
| { |
| val >>= 1; |
| result++; |
| } |
| } |
| |
| return result + powi_lookup_cost (val, cache); |
| } |
| |
| /* Recursive subroutine of powi_as_mults. This function takes the |
| array, CACHE, of already calculated exponents and an exponent N and |
| returns a tree that corresponds to CACHE[1]**N, with type TYPE. */ |
| |
| static tree |
| powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type, |
| HOST_WIDE_INT n, tree *cache) |
| { |
| tree op0, op1, ssa_target; |
| unsigned HOST_WIDE_INT digit; |
| gassign *mult_stmt; |
| |
| if (n < POWI_TABLE_SIZE && cache[n]) |
| return cache[n]; |
| |
| ssa_target = make_temp_ssa_name (type, NULL, "powmult"); |
| |
| if (n < POWI_TABLE_SIZE) |
| { |
| cache[n] = ssa_target; |
| op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache); |
| op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache); |
| } |
| else if (n & 1) |
| { |
| digit = n & ((1 << POWI_WINDOW_SIZE) - 1); |
| op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache); |
| op1 = powi_as_mults_1 (gsi, loc, type, digit, cache); |
| } |
| else |
| { |
| op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache); |
| op1 = op0; |
| } |
| |
| mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1); |
| gimple_set_location (mult_stmt, loc); |
| gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT); |
| |
| return ssa_target; |
| } |
| |
| /* Convert ARG0**N to a tree of multiplications of ARG0 with itself. |
| This function needs to be kept in sync with powi_cost above. */ |
| |
| static tree |
| powi_as_mults (gimple_stmt_iterator *gsi, location_t loc, |
| tree arg0, HOST_WIDE_INT n) |
| { |
| tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0); |
| gassign *div_stmt; |
| tree target; |
| |
| if (n == 0) |
| return build_real (type, dconst1); |
| |
| memset (cache, 0, sizeof (cache)); |
| cache[1] = arg0; |
| |
| result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache); |
| if (n >= 0) |
| return result; |
| |
| /* If the original exponent was negative, reciprocate the result. */ |
| target = make_temp_ssa_name (type, NULL, "powmult"); |
| div_stmt = gimple_build_assign (target, RDIV_EXPR, |
| build_real (type, dconst1), result); |
| gimple_set_location (div_stmt, loc); |
| gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT); |
| |
| return target; |
| } |
| |
| /* ARG0 and N are the two arguments to a powi builtin in GSI with |
| location info LOC. If the arguments are appropriate, create an |
| equivalent sequence of statements prior to GSI using an optimal |
| number of multiplications, and return an expession holding the |
| result. */ |
| |
| static tree |
| gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc, |
| tree arg0, HOST_WIDE_INT n) |
| { |
| /* Avoid largest negative number. */ |
| if (n != -n |
| && ((n >= -1 && n <= 2) |
| || (optimize_function_for_speed_p (cfun) |
| && powi_cost (n) <= POWI_MAX_MULTS))) |
| return powi_as_mults (gsi, loc, arg0, n); |
| |
| return NULL_TREE; |
| } |
| |
| /* Build a gimple call statement that calls FN with argument ARG. |
| Set the lhs of the call statement to a fresh SSA name. Insert the |
| statement prior to GSI's current position, and return the fresh |
| SSA name. */ |
| |
| static tree |
| build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc, |
| tree fn, tree arg) |
| { |
| gcall *call_stmt; |
| tree ssa_target; |
| |
| call_stmt = gimple_build_call (fn, 1, arg); |
| ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot"); |
| gimple_set_lhs (call_stmt, ssa_target); |
| gimple_set_location (call_stmt, loc); |
| gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT); |
| |
| return ssa_target; |
| } |
| |
| /* Build a gimple binary operation with the given CODE and arguments |
| ARG0, ARG1, assigning the result to a new SSA name for variable |
| TARGET. Insert the statement prior to GSI's current position, and |
| return the fresh SSA name.*/ |
| |
| static tree |
| build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc, |
| const char *name, enum tree_code code, |
| tree arg0, tree arg1) |
| { |
| tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name); |
| gassign *stmt = gimple_build_assign (result, code, arg0, arg1); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| return result; |
| } |
| |
| /* Build a gimple reference operation with the given CODE and argument |
| ARG, assigning the result to a new SSA name of TYPE with NAME. |
| Insert the statement prior to GSI's current position, and return |
| the fresh SSA name. */ |
| |
| static inline tree |
| build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type, |
| const char *name, enum tree_code code, tree arg0) |
| { |
| tree result = make_temp_ssa_name (type, NULL, name); |
| gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0)); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| return result; |
| } |
| |
| /* Build a gimple assignment to cast VAL to TYPE. Insert the statement |
| prior to GSI's current position, and return the fresh SSA name. */ |
| |
| static tree |
| build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc, |
| tree type, tree val) |
| { |
| tree result = make_ssa_name (type); |
| gassign *stmt = gimple_build_assign (result, NOP_EXPR, val); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (gsi, stmt, GSI_SAME_STMT); |
| return result; |
| } |
| |
| struct pow_synth_sqrt_info |
| { |
| bool *factors; |
| unsigned int deepest; |
| unsigned int num_mults; |
| }; |
| |
| /* Return true iff the real value C can be represented as a |
| sum of powers of 0.5 up to N. That is: |
| C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1. |
| Record in INFO the various parameters of the synthesis algorithm such |
| as the factors a[i], the maximum 0.5 power and the number of |
| multiplications that will be required. */ |
| |
| bool |
| representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n, |
| struct pow_synth_sqrt_info *info) |
| { |
| REAL_VALUE_TYPE factor = dconsthalf; |
| REAL_VALUE_TYPE remainder = c; |
| |
| info->deepest = 0; |
| info->num_mults = 0; |
| memset (info->factors, 0, n * sizeof (bool)); |
| |
| for (unsigned i = 0; i < n; i++) |
| { |
| REAL_VALUE_TYPE res; |
| |
| /* If something inexact happened bail out now. */ |
| if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor)) |
| return false; |
| |
| /* We have hit zero. The number is representable as a sum |
| of powers of 0.5. */ |
| if (real_equal (&res, &dconst0)) |
| { |
| info->factors[i] = true; |
| info->deepest = i + 1; |
| return true; |
| } |
| else if (!REAL_VALUE_NEGATIVE (res)) |
| { |
| remainder = res; |
| info->factors[i] = true; |
| info->num_mults++; |
| } |
| else |
| info->factors[i] = false; |
| |
| real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf); |
| } |
| return false; |
| } |
| |
| /* Return the tree corresponding to FN being applied |
| to ARG N times at GSI and LOC. |
| Look up previous results from CACHE if need be. |
| cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */ |
| |
| static tree |
| get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi, |
| tree fn, location_t loc, tree *cache) |
| { |
| tree res = cache[n]; |
| if (!res) |
| { |
| tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache); |
| res = build_and_insert_call (gsi, loc, fn, prev); |
| cache[n] = res; |
| } |
| |
| return res; |
| } |
| |
| /* Print to STREAM the repeated application of function FNAME to ARG |
| N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print: |
| "foo (foo (x))". */ |
| |
| static void |
| print_nested_fn (FILE* stream, const char *fname, const char* arg, |
| unsigned int n) |
| { |
| if (n == 0) |
| fprintf (stream, "%s", arg); |
| else |
| { |
| fprintf (stream, "%s (", fname); |
| print_nested_fn (stream, fname, arg, n - 1); |
| fprintf (stream, ")"); |
| } |
| } |
| |
| /* Print to STREAM the fractional sequence of sqrt chains |
| applied to ARG, described by INFO. Used for the dump file. */ |
| |
| static void |
| dump_fractional_sqrt_sequence (FILE *stream, const char *arg, |
| struct pow_synth_sqrt_info *info) |
| { |
| for (unsigned int i = 0; i < info->deepest; i++) |
| { |
| bool is_set = info->factors[i]; |
| if (is_set) |
| { |
| print_nested_fn (stream, "sqrt", arg, i + 1); |
| if (i != info->deepest - 1) |
| fprintf (stream, " * "); |
| } |
| } |
| } |
| |
| /* Print to STREAM a representation of raising ARG to an integer |
| power N. Used for the dump file. */ |
| |
| static void |
| dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n) |
| { |
| if (n > 1) |
| fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n); |
| else if (n == 1) |
| fprintf (stream, "%s", arg); |
| } |
| |
| /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of |
| square roots. Place at GSI and LOC. Limit the maximum depth |
| of the sqrt chains to MAX_DEPTH. Return the tree holding the |
| result of the expanded sequence or NULL_TREE if the expansion failed. |
| |
| This routine assumes that ARG1 is a real number with a fractional part |
| (the integer exponent case will have been handled earlier in |
| gimple_expand_builtin_pow). |
| |
| For ARG1 > 0.0: |
| * For ARG1 composed of a whole part WHOLE_PART and a fractional part |
| FRAC_PART i.e. WHOLE_PART == floor (ARG1) and |
| FRAC_PART == ARG1 - WHOLE_PART: |
| Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where |
| POW (ARG0, FRAC_PART) is expanded as a product of square root chains |
| if it can be expressed as such, that is if FRAC_PART satisfies: |
| FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i)) |
| where integer a[i] is either 0 or 1. |
| |
| Example: |
| POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625) |
| --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x))) |
| |
| For ARG1 < 0.0 there are two approaches: |
| * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1) |
| is calculated as above. |
| |
| Example: |
| POW (x, -5.625) == 1.0 / POW (x, 5.625) |
| --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x)))) |
| |
| * (B) : WHOLE_PART := - ceil (abs (ARG1)) |
| FRAC_PART := ARG1 - WHOLE_PART |
| and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART). |
| Example: |
| POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6) |
| --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6)) |
| |
| For ARG1 < 0.0 we choose between (A) and (B) depending on |
| how many multiplications we'd have to do. |
| So, for the example in (B): POW (x, -5.875), if we were to |
| follow algorithm (A) we would produce: |
| 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X))) |
| which contains more multiplications than approach (B). |
| |
| Hopefully, this approach will eliminate potentially expensive POW library |
| calls when unsafe floating point math is enabled and allow the compiler to |
| further optimise the multiplies, square roots and divides produced by this |
| function. */ |
| |
| static tree |
| expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc, |
| tree arg0, tree arg1, HOST_WIDE_INT max_depth) |
| { |
| tree type = TREE_TYPE (arg0); |
| machine_mode mode = TYPE_MODE (type); |
| tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT); |
| bool one_over = true; |
| |
| if (!sqrtfn) |
| return NULL_TREE; |
| |
| if (TREE_CODE (arg1) != REAL_CST) |
| return NULL_TREE; |
| |
| REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1); |
| |
| gcc_assert (max_depth > 0); |
| tree *cache = XALLOCAVEC (tree, max_depth + 1); |
| |
| struct pow_synth_sqrt_info synth_info; |
| synth_info.factors = XALLOCAVEC (bool, max_depth + 1); |
| synth_info.deepest = 0; |
| synth_info.num_mults = 0; |
| |
| bool neg_exp = REAL_VALUE_NEGATIVE (exp_init); |
| REAL_VALUE_TYPE exp = real_value_abs (&exp_init); |
| |
| /* The whole and fractional parts of exp. */ |
| REAL_VALUE_TYPE whole_part; |
| REAL_VALUE_TYPE frac_part; |
| |
| real_floor (&whole_part, mode, &exp); |
| real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part); |
| |
| |
| REAL_VALUE_TYPE ceil_whole = dconst0; |
| REAL_VALUE_TYPE ceil_fract = dconst0; |
| |
| if (neg_exp) |
| { |
| real_ceil (&ceil_whole, mode, &exp); |
| real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp); |
| } |
| |
| if (!representable_as_half_series_p (frac_part, max_depth, &synth_info)) |
| return NULL_TREE; |
| |
| /* Check whether it's more profitable to not use 1.0 / ... */ |
| if (neg_exp) |
| { |
| struct pow_synth_sqrt_info alt_synth_info; |
| alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1); |
| alt_synth_info.deepest = 0; |
| alt_synth_info.num_mults = 0; |
| |
| if (representable_as_half_series_p (ceil_fract, max_depth, |
| &alt_synth_info) |
| && alt_synth_info.deepest <= synth_info.deepest |
| && alt_synth_info.num_mults < synth_info.num_mults) |
| { |
| whole_part = ceil_whole; |
| frac_part = ceil_fract; |
| synth_info.deepest = alt_synth_info.deepest; |
| synth_info.num_mults = alt_synth_info.num_mults; |
| memcpy (synth_info.factors, alt_synth_info.factors, |
| (max_depth + 1) * sizeof (bool)); |
| one_over = false; |
| } |
| } |
| |
| HOST_WIDE_INT n = real_to_integer (&whole_part); |
| REAL_VALUE_TYPE cint; |
| real_from_integer (&cint, VOIDmode, n, SIGNED); |
| |
| if (!real_identical (&whole_part, &cint)) |
| return NULL_TREE; |
| |
| if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS) |
| return NULL_TREE; |
| |
| memset (cache, 0, (max_depth + 1) * sizeof (tree)); |
| |
| tree integer_res = n == 0 ? build_real (type, dconst1) : arg0; |
| |
| /* Calculate the integer part of the exponent. */ |
| if (n > 1) |
| { |
| integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n); |
| if (!integer_res) |
| return NULL_TREE; |
| } |
| |
| if (dump_file) |
| { |
| char string[64]; |
| |
| real_to_decimal (string, &exp_init, sizeof (string), 0, 1); |
| fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string); |
| |
| if (neg_exp) |
| { |
| if (one_over) |
| { |
| fprintf (dump_file, "1.0 / ("); |
| dump_integer_part (dump_file, "x", n); |
| if (n > 0) |
| fprintf (dump_file, " * "); |
| dump_fractional_sqrt_sequence (dump_file, "x", &synth_info); |
| fprintf (dump_file, ")"); |
| } |
| else |
| { |
| dump_fractional_sqrt_sequence (dump_file, "x", &synth_info); |
| fprintf (dump_file, " / ("); |
| dump_integer_part (dump_file, "x", n); |
| fprintf (dump_file, ")"); |
| } |
| } |
| else |
| { |
| dump_fractional_sqrt_sequence (dump_file, "x", &synth_info); |
| if (n > 0) |
| fprintf (dump_file, " * "); |
| dump_integer_part (dump_file, "x", n); |
| } |
| |
| fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest); |
| } |
| |
| |
| tree fract_res = NULL_TREE; |
| cache[0] = arg0; |
| |
| /* Calculate the fractional part of the exponent. */ |
| for (unsigned i = 0; i < synth_info.deepest; i++) |
| { |
| if (synth_info.factors[i]) |
| { |
| tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache); |
| |
| if (!fract_res) |
| fract_res = sqrt_chain; |
| |
| else |
| fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR, |
| fract_res, sqrt_chain); |
| } |
| } |
| |
| tree res = NULL_TREE; |
| |
| if (neg_exp) |
| { |
| if (one_over) |
| { |
| if (n > 0) |
| res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR, |
| fract_res, integer_res); |
| else |
| res = fract_res; |
| |
| res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR, |
| build_real (type, dconst1), res); |
| } |
| else |
| { |
| res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR, |
| fract_res, integer_res); |
| } |
| } |
| else |
| res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR, |
| fract_res, integer_res); |
| return res; |
| } |
| |
| /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI |
| with location info LOC. If possible, create an equivalent and |
| less expensive sequence of statements prior to GSI, and return an |
| expession holding the result. */ |
| |
| static tree |
| gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, |
| tree arg0, tree arg1) |
| { |
| REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6; |
| REAL_VALUE_TYPE c2, dconst3; |
| HOST_WIDE_INT n; |
| tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x; |
| machine_mode mode; |
| bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi)); |
| bool hw_sqrt_exists, c_is_int, c2_is_int; |
| |
| dconst1_4 = dconst1; |
| SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2); |
| |
| /* If the exponent isn't a constant, there's nothing of interest |
| to be done. */ |
| if (TREE_CODE (arg1) != REAL_CST) |
| return NULL_TREE; |
| |
| /* Don't perform the operation if flag_signaling_nans is on |
| and the operand is a signaling NaN. */ |
| if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1))) |
| && ((TREE_CODE (arg0) == REAL_CST |
| && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0))) |
| || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1)))) |
| return NULL_TREE; |
| |
| /* If the exponent is equivalent to an integer, expand to an optimal |
| multiplication sequence when profitable. */ |
| c = TREE_REAL_CST (arg1); |
| n = real_to_integer (&c); |
| real_from_integer (&cint, VOIDmode, n, SIGNED); |
| c_is_int = real_identical (&c, &cint); |
| |
| if (c_is_int |
| && ((n >= -1 && n <= 2) |
| || (flag_unsafe_math_optimizations |
| && speed_p |
| && powi_cost (n) <= POWI_MAX_MULTS))) |
| return gimple_expand_builtin_powi (gsi, loc, arg0, n); |
| |
| /* Attempt various optimizations using sqrt and cbrt. */ |
| type = TREE_TYPE (arg0); |
| mode = TYPE_MODE (type); |
| sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT); |
| |
| /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe |
| unless signed zeros must be maintained. pow(-0,0.5) = +0, while |
| sqrt(-0) = -0. */ |
| if (sqrtfn |
| && real_equal (&c, &dconsthalf) |
| && !HONOR_SIGNED_ZEROS (mode)) |
| return build_and_insert_call (gsi, loc, sqrtfn, arg0); |
| |
| hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing; |
| |
| /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math |
| optimizations since 1./3. is not exactly representable. If x |
| is negative and finite, the correct value of pow(x,1./3.) is |
| a NaN with the "invalid" exception raised, because the value |
| of 1./3. actually has an even denominator. The correct value |
| of cbrt(x) is a negative real value. */ |
| cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT); |
| dconst1_3 = real_value_truncate (mode, dconst_third ()); |
| |
| if (flag_unsafe_math_optimizations |
| && cbrtfn |
| && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0)) |
| && real_equal (&c, &dconst1_3)) |
| return build_and_insert_call (gsi, loc, cbrtfn, arg0); |
| |
| /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization |
| if we don't have a hardware sqrt insn. */ |
| dconst1_6 = dconst1_3; |
| SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1); |
| |
| if (flag_unsafe_math_optimizations |
| && sqrtfn |
| && cbrtfn |
| && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0)) |
| && speed_p |
| && hw_sqrt_exists |
| && real_equal (&c, &dconst1_6)) |
| { |
| /* sqrt(x) */ |
| sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0); |
| |
| /* cbrt(sqrt(x)) */ |
| return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0); |
| } |
| |
| |
| /* Attempt to expand the POW as a product of square root chains. |
| Expand the 0.25 case even when otpimising for size. */ |
| if (flag_unsafe_math_optimizations |
| && sqrtfn |
| && hw_sqrt_exists |
| && (speed_p || real_equal (&c, &dconst1_4)) |
| && !HONOR_SIGNED_ZEROS (mode)) |
| { |
| unsigned int max_depth = speed_p |
| ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH) |
| : 2; |
| |
| tree expand_with_sqrts |
| = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth); |
| |
| if (expand_with_sqrts) |
| return expand_with_sqrts; |
| } |
| |
| real_arithmetic (&c2, MULT_EXPR, &c, &dconst2); |
| n = real_to_integer (&c2); |
| real_from_integer (&cint, VOIDmode, n, SIGNED); |
| c2_is_int = real_identical (&c2, &cint); |
| |
| /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into |
| |
| powi(x, n/3) * powi(cbrt(x), n%3), n > 0; |
| 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0. |
| |
| Do not calculate the first factor when n/3 = 0. As cbrt(x) is |
| different from pow(x, 1./3.) due to rounding and behavior with |
| negative x, we need to constrain this transformation to unsafe |
| math and positive x or finite math. */ |
| real_from_integer (&dconst3, VOIDmode, 3, SIGNED); |
| real_arithmetic (&c2, MULT_EXPR, &c, &dconst3); |
| real_round (&c2, mode, &c2); |
| n = real_to_integer (&c2); |
| real_from_integer (&cint, VOIDmode, n, SIGNED); |
| real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3); |
| real_convert (&c2, mode, &c2); |
| |
| if (flag_unsafe_math_optimizations |
| && cbrtfn |
| && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0)) |
| && real_identical (&c2, &c) |
| && !c2_is_int |
| && optimize_function_for_speed_p (cfun) |
| && powi_cost (n / 3) <= POWI_MAX_MULTS) |
| { |
| tree powi_x_ndiv3 = NULL_TREE; |
| |
| /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not |
| possible or profitable, give up. Skip the degenerate case when |
| abs(n) < 3, where the result is always 1. */ |
| if (absu_hwi (n) >= 3) |
| { |
| powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0, |
| abs_hwi (n / 3)); |
| if (!powi_x_ndiv3) |
| return NULL_TREE; |
| } |
| |
| /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi |
| as that creates an unnecessary variable. Instead, just produce |
| either cbrt(x) or cbrt(x) * cbrt(x). */ |
| cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0); |
| |
| if (absu_hwi (n) % 3 == 1) |
| powi_cbrt_x = cbrt_x; |
| else |
| powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR, |
| cbrt_x, cbrt_x); |
| |
| /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */ |
| if (absu_hwi (n) < 3) |
| result = powi_cbrt_x; |
| else |
| result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR, |
| powi_x_ndiv3, powi_cbrt_x); |
| |
| /* If n is negative, reciprocate the result. */ |
| if (n < 0) |
| result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR, |
| build_real (type, dconst1), result); |
| |
| return result; |
| } |
| |
| /* No optimizations succeeded. */ |
| return NULL_TREE; |
| } |
| |
| /* ARG is the argument to a cabs builtin call in GSI with location info |
| LOC. Create a sequence of statements prior to GSI that calculates |
| sqrt(R*R + I*I), where R and I are the real and imaginary components |
| of ARG, respectively. Return an expression holding the result. */ |
| |
| static tree |
| gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg) |
| { |
| tree real_part, imag_part, addend1, addend2, sum, result; |
| tree type = TREE_TYPE (TREE_TYPE (arg)); |
| tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT); |
| machine_mode mode = TYPE_MODE (type); |
| |
| if (!flag_unsafe_math_optimizations |
| || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi))) |
| || !sqrtfn |
| || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing) |
| return NULL_TREE; |
| |
| real_part = build_and_insert_ref (gsi, loc, type, "cabs", |
| REALPART_EXPR, arg); |
| addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR, |
| real_part, real_part); |
| imag_part = build_and_insert_ref (gsi, loc, type, "cabs", |
| IMAGPART_EXPR, arg); |
| addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR, |
| imag_part, imag_part); |
| sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2); |
| result = build_and_insert_call (gsi, loc, sqrtfn, sum); |
| |
| return result; |
| } |
| |
| /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1 |
| on the SSA_NAME argument of each of them. Also expand powi(x,n) into |
| an optimal number of multiplies, when n is a constant. */ |
| |
| namespace { |
| |
| const pass_data pass_data_cse_sincos = |
| { |
| GIMPLE_PASS, /* type */ |
| "sincos", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_NONE, /* tv_id */ |
| PROP_ssa, /* properties_required */ |
| PROP_gimple_opt_math, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| TODO_update_ssa, /* todo_flags_finish */ |
| }; |
| |
| class pass_cse_sincos : public gimple_opt_pass |
| { |
| public: |
| pass_cse_sincos (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_cse_sincos, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| virtual bool gate (function *) |
| { |
| /* We no longer require either sincos or cexp, since powi expansion |
| piggybacks on this pass. */ |
| return optimize; |
| } |
| |
| virtual unsigned int execute (function *); |
| |
| }; // class pass_cse_sincos |
| |
| unsigned int |
| pass_cse_sincos::execute (function *fun) |
| { |
| basic_block bb; |
| bool cfg_changed = false; |
| |
| calculate_dominance_info (CDI_DOMINATORS); |
| memset (&sincos_stats, 0, sizeof (sincos_stats)); |
| |
| FOR_EACH_BB_FN (bb, fun) |
| { |
| gimple_stmt_iterator gsi; |
| bool cleanup_eh = false; |
| |
| for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| /* Only the last stmt in a bb could throw, no need to call |
| gimple_purge_dead_eh_edges if we change something in the middle |
| of a basic block. */ |
| cleanup_eh = false; |
| |
| if (is_gimple_call (stmt) |
| && gimple_call_lhs (stmt)) |
| { |
| tree arg, arg0, arg1, result; |
| HOST_WIDE_INT n; |
| location_t loc; |
| |
| switch (gimple_call_combined_fn (stmt)) |
| { |
| CASE_CFN_COS: |
| CASE_CFN_SIN: |
| CASE_CFN_CEXPI: |
| /* Make sure we have either sincos or cexp. */ |
| if (!targetm.libc_has_function (function_c99_math_complex) |
| && !targetm.libc_has_function (function_sincos)) |
| break; |
| |
| arg = gimple_call_arg (stmt, 0); |
| if (TREE_CODE (arg) == SSA_NAME) |
| cfg_changed |= execute_cse_sincos_1 (arg); |
| break; |
| |
| CASE_CFN_POW: |
| arg0 = gimple_call_arg (stmt, 0); |
| arg1 = gimple_call_arg (stmt, 1); |
| |
| loc = gimple_location (stmt); |
| result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1); |
| |
| if (result) |
| { |
| tree lhs = gimple_get_lhs (stmt); |
| gassign *new_stmt = gimple_build_assign (lhs, result); |
| gimple_set_location (new_stmt, loc); |
| unlink_stmt_vdef (stmt); |
| gsi_replace (&gsi, new_stmt, true); |
| cleanup_eh = true; |
| if (gimple_vdef (stmt)) |
| release_ssa_name (gimple_vdef (stmt)); |
| } |
| break; |
| |
| CASE_CFN_POWI: |
| arg0 = gimple_call_arg (stmt, 0); |
| arg1 = gimple_call_arg (stmt, 1); |
| loc = gimple_location (stmt); |
| |
| if (real_minus_onep (arg0)) |
| { |
| tree t0, t1, cond, one, minus_one; |
| gassign *stmt; |
| |
| t0 = TREE_TYPE (arg0); |
| t1 = TREE_TYPE (arg1); |
| one = build_real (t0, dconst1); |
| minus_one = build_real (t0, dconstm1); |
| |
| cond = make_temp_ssa_name (t1, NULL, "powi_cond"); |
| stmt = gimple_build_assign (cond, BIT_AND_EXPR, |
| arg1, build_int_cst (t1, 1)); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
| |
| result = make_temp_ssa_name (t0, NULL, "powi"); |
| stmt = gimple_build_assign (result, COND_EXPR, cond, |
| minus_one, one); |
| gimple_set_location (stmt, loc); |
| gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
| } |
| else |
| { |
| if (!tree_fits_shwi_p (arg1)) |
| break; |
| |
| n = tree_to_shwi (arg1); |
| result = gimple_expand_builtin_powi (&gsi, loc, arg0, n); |
| } |
| |
| if (result) |
| { |
| tree lhs = gimple_get_lhs (stmt); |
| gassign *new_stmt = gimple_build_assign (lhs, result); |
| gimple_set_location (new_stmt, loc); |
| unlink_stmt_vdef (stmt); |
| gsi_replace (&gsi, new_stmt, true); |
| cleanup_eh = true; |
| if (gimple_vdef (stmt)) |
| release_ssa_name (gimple_vdef (stmt)); |
| } |
| break; |
| |
| CASE_CFN_CABS: |
| arg0 = gimple_call_arg (stmt, 0); |
| loc = gimple_location (stmt); |
| result = gimple_expand_builtin_cabs (&gsi, loc, arg0); |
| |
| if (result) |
| { |
| tree lhs = gimple_get_lhs (stmt); |
| gassign *new_stmt = gimple_build_assign (lhs, result); |
| gimple_set_location (new_stmt, loc); |
| unlink_stmt_vdef (stmt); |
| gsi_replace (&gsi, new_stmt, true); |
| cleanup_eh = true; |
| if (gimple_vdef (stmt)) |
| release_ssa_name (gimple_vdef (stmt)); |
| } |
| break; |
| |
| default:; |
| } |
| } |
| } |
| if (cleanup_eh) |
| cfg_changed |= gimple_purge_dead_eh_edges (bb); |
| } |
| |
| statistics_counter_event (fun, "sincos statements inserted", |
| sincos_stats.inserted); |
| |
| return cfg_changed ? TODO_cleanup_cfg : 0; |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_cse_sincos (gcc::context *ctxt) |
| { |
| return new pass_cse_sincos (ctxt); |
| } |
| |
| /* A symbolic number structure is used to detect byte permutation and selection |
| patterns of a source. To achieve that, its field N contains an artificial |
| number consisting of BITS_PER_MARKER sized markers tracking where does each |
| byte come from in the source: |
| |
| 0 - target byte has the value 0 |
| FF - target byte has an unknown value (eg. due to sign extension) |
| 1..size - marker value is the byte index in the source (0 for lsb). |
| |
| To detect permutations on memory sources (arrays and structures), a symbolic |
| number is also associated: |
| - a base address BASE_ADDR and an OFFSET giving the address of the source; |
| - a range which gives the difference between the highest and lowest accessed |
| memory location to make such a symbolic number; |
| - the address SRC of the source element of lowest address as a convenience |
| to easily get BASE_ADDR + offset + lowest bytepos. |
| |
| Note 1: the range is different from size as size reflects the size of the |
| type of the current expression. For instance, for an array char a[], |
| (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while |
| (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this |
| time a range of 1. |
| |
| Note 2: for non-memory sources, range holds the same value as size. |
| |
| Note 3: SRC points to the SSA_NAME in case of non-memory source. */ |
| |
| struct symbolic_number { |
| uint64_t n; |
| tree type; |
| tree base_addr; |
| tree offset; |
| HOST_WIDE_INT bytepos; |
| tree src; |
| tree alias_set; |
| tree vuse; |
| unsigned HOST_WIDE_INT range; |
| }; |
| |
| #define BITS_PER_MARKER 8 |
| #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1) |
| #define MARKER_BYTE_UNKNOWN MARKER_MASK |
| #define HEAD_MARKER(n, size) \ |
| ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER))) |
| |
| /* The number which the find_bswap_or_nop_1 result should match in |
| order to have a nop. The number is masked according to the size of |
| the symbolic number before using it. */ |
| #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \ |
| (uint64_t)0x08070605 << 32 | 0x04030201) |
| |
| /* The number which the find_bswap_or_nop_1 result should match in |
| order to have a byte swap. The number is masked according to the |
| size of the symbolic number before using it. */ |
| #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \ |
| (uint64_t)0x01020304 << 32 | 0x05060708) |
| |
| /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic |
| number N. Return false if the requested operation is not permitted |
| on a symbolic number. */ |
| |
| static inline bool |
| do_shift_rotate (enum tree_code code, |
| struct symbolic_number *n, |
| int count) |
| { |
| int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
| unsigned head_marker; |
| |
| if (count % BITS_PER_UNIT != 0) |
| return false; |
| count = (count / BITS_PER_UNIT) * BITS_PER_MARKER; |
| |
| /* Zero out the extra bits of N in order to avoid them being shifted |
| into the significant bits. */ |
| if (size < 64 / BITS_PER_MARKER) |
| n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
| |
| switch (code) |
| { |
| case LSHIFT_EXPR: |
| n->n <<= count; |
| break; |
| case RSHIFT_EXPR: |
| head_marker = HEAD_MARKER (n->n, size); |
| n->n >>= count; |
| /* Arithmetic shift of signed type: result is dependent on the value. */ |
| if (!TYPE_UNSIGNED (n->type) && head_marker) |
| for (i = 0; i < count / BITS_PER_MARKER; i++) |
| n->n |= (uint64_t) MARKER_BYTE_UNKNOWN |
| << ((size - 1 - i) * BITS_PER_MARKER); |
| break; |
| case LROTATE_EXPR: |
| n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count)); |
| break; |
| case RROTATE_EXPR: |
| n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count)); |
| break; |
| default: |
| return false; |
| } |
| /* Zero unused bits for size. */ |
| if (size < 64 / BITS_PER_MARKER) |
| n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
| return true; |
| } |
| |
| /* Perform sanity checking for the symbolic number N and the gimple |
| statement STMT. */ |
| |
| static inline bool |
| verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt) |
| { |
| tree lhs_type; |
| |
| lhs_type = gimple_expr_type (stmt); |
| |
| if (TREE_CODE (lhs_type) != INTEGER_TYPE) |
| return false; |
| |
| if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Initialize the symbolic number N for the bswap pass from the base element |
| SRC manipulated by the bitwise OR expression. */ |
| |
| static bool |
| init_symbolic_number (struct symbolic_number *n, tree src) |
| { |
| int size; |
| |
| if (! INTEGRAL_TYPE_P (TREE_TYPE (src))) |
| return false; |
| |
| n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE; |
| n->src = src; |
| |
| /* Set up the symbolic number N by setting each byte to a value between 1 and |
| the byte size of rhs1. The highest order byte is set to n->size and the |
| lowest order byte to 1. */ |
| n->type = TREE_TYPE (src); |
| size = TYPE_PRECISION (n->type); |
| if (size % BITS_PER_UNIT != 0) |
| return false; |
| size /= BITS_PER_UNIT; |
| if (size > 64 / BITS_PER_MARKER) |
| return false; |
| n->range = size; |
| n->n = CMPNOP; |
| |
| if (size < 64 / BITS_PER_MARKER) |
| n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
| |
| return true; |
| } |
| |
| /* Check if STMT might be a byte swap or a nop from a memory source and returns |
| the answer. If so, REF is that memory source and the base of the memory area |
| accessed and the offset of the access from that base are recorded in N. */ |
| |
| bool |
| find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n) |
| { |
| /* Leaf node is an array or component ref. Memorize its base and |
| offset from base to compare to other such leaf node. */ |
| HOST_WIDE_INT bitsize, bitpos; |
| machine_mode mode; |
| int unsignedp, reversep, volatilep; |
| tree offset, base_addr; |
| |
| /* Not prepared to handle PDP endian. */ |
| if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) |
| return false; |
| |
| if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt)) |
| return false; |
| |
| base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode, |
| &unsignedp, &reversep, &volatilep); |
| |
| if (TREE_CODE (base_addr) == MEM_REF) |
| { |
| offset_int bit_offset = 0; |
| tree off = TREE_OPERAND (base_addr, 1); |
| |
| if (!integer_zerop (off)) |
| { |
| offset_int boff, coff = mem_ref_offset (base_addr); |
| boff = coff << LOG2_BITS_PER_UNIT; |
| bit_offset += boff; |
| } |
| |
| base_addr = TREE_OPERAND (base_addr, 0); |
| |
| /* Avoid returning a negative bitpos as this may wreak havoc later. */ |
| if (wi::neg_p (bit_offset)) |
| { |
| offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false); |
| offset_int tem = bit_offset.and_not (mask); |
| /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. |
| Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ |
| bit_offset -= tem; |
| tem >>= LOG2_BITS_PER_UNIT; |
| if (offset) |
| offset = size_binop (PLUS_EXPR, offset, |
| wide_int_to_tree (sizetype, tem)); |
| else |
| offset = wide_int_to_tree (sizetype, tem); |
| } |
| |
| bitpos += bit_offset.to_shwi (); |
| } |
| |
| if (bitpos % BITS_PER_UNIT) |
| return false; |
| if (bitsize % BITS_PER_UNIT) |
| return false; |
| if (reversep) |
| return false; |
| |
| if (!init_symbolic_number (n, ref)) |
| return false; |
| n->base_addr = base_addr; |
| n->offset = offset; |
| n->bytepos = bitpos / BITS_PER_UNIT; |
| n->alias_set = reference_alias_ptr_type (ref); |
| n->vuse = gimple_vuse (stmt); |
| return true; |
| } |
| |
| /* Compute the symbolic number N representing the result of a bitwise OR on 2 |
| symbolic number N1 and N2 whose source statements are respectively |
| SOURCE_STMT1 and SOURCE_STMT2. */ |
| |
| static gimple * |
| perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1, |
| gimple *source_stmt2, struct symbolic_number *n2, |
| struct symbolic_number *n) |
| { |
| int i, size; |
| uint64_t mask; |
| gimple *source_stmt; |
| struct symbolic_number *n_start; |
| |
| tree rhs1 = gimple_assign_rhs1 (source_stmt1); |
| if (TREE_CODE (rhs1) == BIT_FIELD_REF |
| && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) |
| rhs1 = TREE_OPERAND (rhs1, 0); |
| tree rhs2 = gimple_assign_rhs1 (source_stmt2); |
| if (TREE_CODE (rhs2) == BIT_FIELD_REF |
| && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME) |
| rhs2 = TREE_OPERAND (rhs2, 0); |
| |
| /* Sources are different, cancel bswap if they are not memory location with |
| the same base (array, structure, ...). */ |
| if (rhs1 != rhs2) |
| { |
| uint64_t inc; |
| HOST_WIDE_INT start_sub, end_sub, end1, end2, end; |
| struct symbolic_number *toinc_n_ptr, *n_end; |
| basic_block bb1, bb2; |
| |
| if (!n1->base_addr || !n2->base_addr |
| || !operand_equal_p (n1->base_addr, n2->base_addr, 0)) |
| return NULL; |
| |
| if (!n1->offset != !n2->offset |
| || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0))) |
| return NULL; |
| |
| if (n1->bytepos < n2->bytepos) |
| { |
| n_start = n1; |
| start_sub = n2->bytepos - n1->bytepos; |
| } |
| else |
| { |
| n_start = n2; |
| start_sub = n1->bytepos - n2->bytepos; |
| } |
| |
| bb1 = gimple_bb (source_stmt1); |
| bb2 = gimple_bb (source_stmt2); |
| if (dominated_by_p (CDI_DOMINATORS, bb1, bb2)) |
| source_stmt = source_stmt1; |
| else |
| source_stmt = source_stmt2; |
| |
| /* Find the highest address at which a load is performed and |
| compute related info. */ |
| end1 = n1->bytepos + (n1->range - 1); |
| end2 = n2->bytepos + (n2->range - 1); |
| if (end1 < end2) |
| { |
| end = end2; |
| end_sub = end2 - end1; |
| } |
| else |
| { |
| end = end1; |
| end_sub = end1 - end2; |
| } |
| n_end = (end2 > end1) ? n2 : n1; |
| |
| /* Find symbolic number whose lsb is the most significant. */ |
| if (BYTES_BIG_ENDIAN) |
| toinc_n_ptr = (n_end == n1) ? n2 : n1; |
| else |
| toinc_n_ptr = (n_start == n1) ? n2 : n1; |
| |
| n->range = end - n_start->bytepos + 1; |
| |
| /* Check that the range of memory covered can be represented by |
| a symbolic number. */ |
| if (n->range > 64 / BITS_PER_MARKER) |
| return NULL; |
| |
| /* Reinterpret byte marks in symbolic number holding the value of |
| bigger weight according to target endianness. */ |
| inc = BYTES_BIG_ENDIAN ? end_sub : start_sub; |
| size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT; |
| for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER) |
| { |
| unsigned marker |
| = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK; |
| if (marker && marker != MARKER_BYTE_UNKNOWN) |
| toinc_n_ptr->n += inc; |
| } |
| } |
| else |
| { |
| n->range = n1->range; |
| n_start = n1; |
| source_stmt = source_stmt1; |
| } |
| |
| if (!n1->alias_set |
| || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set)) |
| n->alias_set = n1->alias_set; |
| else |
| n->alias_set = ptr_type_node; |
| n->vuse = n_start->vuse; |
| n->base_addr = n_start->base_addr; |
| n->offset = n_start->offset; |
| n->src = n_start->src; |
| n->bytepos = n_start->bytepos; |
| n->type = n_start->type; |
| size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
| |
| for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER) |
| { |
| uint64_t masked1, masked2; |
| |
| masked1 = n1->n & mask; |
| masked2 = n2->n & mask; |
| if (masked1 && masked2 && masked1 != masked2) |
| return NULL; |
| } |
| n->n = n1->n | n2->n; |
| |
| return source_stmt; |
| } |
| |
| /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform |
| the operation given by the rhs of STMT on the result. If the operation |
| could successfully be executed the function returns a gimple stmt whose |
| rhs's first tree is the expression of the source operand and NULL |
| otherwise. */ |
| |
| static gimple * |
| find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit) |
| { |
| enum tree_code code; |
| tree rhs1, rhs2 = NULL; |
| gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1; |
| enum gimple_rhs_class rhs_class; |
| |
| if (!limit || !is_gimple_assign (stmt)) |
| return NULL; |
| |
| rhs1 = gimple_assign_rhs1 (stmt); |
| |
| if (find_bswap_or_nop_load (stmt, rhs1, n)) |
| return stmt; |
| |
| /* Handle BIT_FIELD_REF. */ |
| if (TREE_CODE (rhs1) == BIT_FIELD_REF |
| && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) |
| { |
| unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1)); |
| unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2)); |
| if (bitpos % BITS_PER_UNIT == 0 |
| && bitsize % BITS_PER_UNIT == 0 |
| && init_symbolic_number (n, TREE_OPERAND (rhs1, 0))) |
| { |
| /* Handle big-endian bit numbering in BIT_FIELD_REF. */ |
| if (BYTES_BIG_ENDIAN) |
| bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize; |
| |
| /* Shift. */ |
| if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos)) |
| return NULL; |
| |
| /* Mask. */ |
| uint64_t mask = 0; |
| uint64_t tmp = (1 << BITS_PER_UNIT) - 1; |
| for (unsigned i = 0; i < bitsize / BITS_PER_UNIT; |
| i++, tmp <<= BITS_PER_UNIT) |
| mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); |
| n->n &= mask; |
| |
| /* Convert. */ |
| n->type = TREE_TYPE (rhs1); |
| if (!n->base_addr) |
| n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
| |
| return verify_symbolic_number_p (n, stmt) ? stmt : NULL; |
| } |
| |
| return NULL; |
| } |
| |
| if (TREE_CODE (rhs1) != SSA_NAME) |
| return NULL; |
| |
| code = gimple_assign_rhs_code (stmt); |
| rhs_class = gimple_assign_rhs_class (stmt); |
| rhs1_stmt = SSA_NAME_DEF_STMT (rhs1); |
| |
| if (rhs_class == GIMPLE_BINARY_RHS) |
| rhs2 = gimple_assign_rhs2 (stmt); |
| |
| /* Handle unary rhs and binary rhs with integer constants as second |
| operand. */ |
| |
| if (rhs_class == GIMPLE_UNARY_RHS |
| || (rhs_class == GIMPLE_BINARY_RHS |
| && TREE_CODE (rhs2) == INTEGER_CST)) |
| { |
| if (code != BIT_AND_EXPR |
| && code != LSHIFT_EXPR |
| && code != RSHIFT_EXPR |
| && code != LROTATE_EXPR |
| && code != RROTATE_EXPR |
| && !CONVERT_EXPR_CODE_P (code)) |
| return NULL; |
| |
| source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1); |
| |
| /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and |
| we have to initialize the symbolic number. */ |
| if (!source_stmt1) |
| { |
| if (gimple_assign_load_p (stmt) |
| || !init_symbolic_number (n, rhs1)) |
| return NULL; |
| source_stmt1 = stmt; |
| } |
| |
| switch (code) |
| { |
| case BIT_AND_EXPR: |
| { |
| int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
| uint64_t val = int_cst_value (rhs2), mask = 0; |
| uint64_t tmp = (1 << BITS_PER_UNIT) - 1; |
| |
| /* Only constants masking full bytes are allowed. */ |
| for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT) |
| if ((val & tmp) != 0 && (val & tmp) != tmp) |
| return NULL; |
| else if (val & tmp) |
| mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); |
| |
| n->n &= mask; |
| } |
| break; |
| case LSHIFT_EXPR: |
| case RSHIFT_EXPR: |
| case LROTATE_EXPR: |
| case RROTATE_EXPR: |
| if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2))) |
| return NULL; |
| break; |
| CASE_CONVERT: |
| { |
| int i, type_size, old_type_size; |
| tree type; |
| |
| type = gimple_expr_type (stmt); |
| type_size = TYPE_PRECISION (type); |
| if (type_size % BITS_PER_UNIT != 0) |
| return NULL; |
| type_size /= BITS_PER_UNIT; |
| if (type_size > 64 / BITS_PER_MARKER) |
| return NULL; |
| |
| /* Sign extension: result is dependent on the value. */ |
| old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
| if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size |
| && HEAD_MARKER (n->n, old_type_size)) |
| for (i = 0; i < type_size - old_type_size; i++) |
| n->n |= (uint64_t) MARKER_BYTE_UNKNOWN |
| << ((type_size - 1 - i) * BITS_PER_MARKER); |
| |
| if (type_size < 64 / BITS_PER_MARKER) |
| { |
| /* If STMT casts to a smaller type mask out the bits not |
| belonging to the target type. */ |
| n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1; |
| } |
| n->type = type; |
| if (!n->base_addr) |
| n->range = type_size; |
| } |
| break; |
| default: |
| return NULL; |
| }; |
| return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL; |
| } |
| |
| /* Handle binary rhs. */ |
| |
| if (rhs_class == GIMPLE_BINARY_RHS) |
| { |
| struct symbolic_number n1, n2; |
| gimple *source_stmt, *source_stmt2; |
| |
| if (code != BIT_IOR_EXPR) |
| return NULL; |
| |
| if (TREE_CODE (rhs2) != SSA_NAME) |
| return NULL; |
| |
| rhs2_stmt = SSA_NAME_DEF_STMT (rhs2); |
| |
| switch (code) |
| { |
| case BIT_IOR_EXPR: |
| source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1); |
| |
| if (!source_stmt1) |
| return NULL; |
| |
| source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1); |
| |
| if (!source_stmt2) |
| return NULL; |
| |
| if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type)) |
| return NULL; |
| |
| if (!n1.vuse != !n2.vuse |
| || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0))) |
| return NULL; |
| |
| source_stmt |
| = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n); |
| |
| if (!source_stmt) |
| return NULL; |
| |
| if (!verify_symbolic_number_p (n, stmt)) |
| return NULL; |
| |
| break; |
| default: |
| return NULL; |
| } |
| return source_stmt; |
| } |
| return NULL; |
| } |
| |
| /* Check if STMT completes a bswap implementation or a read in a given |
| endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP |
| accordingly. It also sets N to represent the kind of operations |
| performed: size of the resulting expression and whether it works on |
| a memory source, and if so alias-set and vuse. At last, the |
| function returns a stmt whose rhs's first tree is the source |
| expression. */ |
| |
| static gimple * |
| find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap) |
| { |
| unsigned rsize; |
| uint64_t tmpn, mask; |
| /* The number which the find_bswap_or_nop_1 result should match in order |
| to have a full byte swap. The number is shifted to the right |
| according to the size of the symbolic number before using it. */ |
| uint64_t cmpxchg = CMPXCHG; |
| uint64_t cmpnop = CMPNOP; |
| |
| gimple *ins_stmt; |
| int limit; |
| |
| /* The last parameter determines the depth search limit. It usually |
| correlates directly to the number n of bytes to be touched. We |
| increase that number by log2(n) + 1 here in order to also |
| cover signed -> unsigned conversions of the src operand as can be seen |
| in libgcc, and for initial shift/and operation of the src operand. */ |
| limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); |
| limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit); |
| ins_stmt = find_bswap_or_nop_1 (stmt, n, limit); |
| |
| if (!ins_stmt) |
| return NULL; |
| |
| /* Find real size of result (highest non-zero byte). */ |
| if (n->base_addr) |
| for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++); |
| else |
| rsize = n->range; |
| |
| /* Zero out the bits corresponding to untouched bytes in original gimple |
| expression. */ |
| if (n->range < (int) sizeof (int64_t)) |
| { |
| mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1; |
| cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER; |
| cmpnop &= mask; |
| } |
| |
| /* Zero out the bits corresponding to unused bytes in the result of the |
| gimple expression. */ |
| if (rsize < n->range) |
| { |
| if (BYTES_BIG_ENDIAN) |
| { |
| mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; |
| cmpxchg &= mask; |
| cmpnop >>= (n->range - rsize) * BITS_PER_MARKER; |
| } |
| else |
| { |
| mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; |
| cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER; |
| cmpnop &= mask; |
| } |
| n->range = rsize; |
| } |
| |
| /* A complete byte swap should make the symbolic number to start with |
| the largest digit in the highest order byte. Unchanged symbolic |
| number indicates a read with same endianness as target architecture. */ |
| if (n->n == cmpnop) |
| *bswap = false; |
| else if (n->n == cmpxchg) |
| *bswap = true; |
| else |
| return NULL; |
| |
| /* Useless bit manipulation performed by code. */ |
| if (!n->base_addr && n->n == cmpnop) |
| return NULL; |
| |
| n->range *= BITS_PER_UNIT; |
| return ins_stmt; |
| } |
| |
| namespace { |
| |
| const pass_data pass_data_optimize_bswap = |
| { |
| GIMPLE_PASS, /* type */ |
| "bswap", /* name */ |
| OPTGROUP_NONE, /* optinfo_flags */ |
| TV_NONE, /* tv_id */ |
| PROP_ssa, /* properties_required */ |
| 0, /* properties_provided */ |
| 0, /* properties_destroyed */ |
| 0, /* todo_flags_start */ |
| 0, /* todo_flags_finish */ |
| }; |
| |
| class pass_optimize_bswap : public gimple_opt_pass |
| { |
| public: |
| pass_optimize_bswap (gcc::context *ctxt) |
| : gimple_opt_pass (pass_data_optimize_bswap, ctxt) |
| {} |
| |
| /* opt_pass methods: */ |
| virtual bool gate (function *) |
| { |
| return flag_expensive_optimizations && optimize; |
| } |
| |
| virtual unsigned int execute (function *); |
| |
| }; // class pass_optimize_bswap |
| |
| /* Perform the bswap optimization: replace the expression computed in the rhs |
| of CUR_STMT by an equivalent bswap, load or load + bswap expression. |
| Which of these alternatives replace the rhs is given by N->base_addr (non |
| null if a load is needed) and BSWAP. The type, VUSE and set-alias of the |
| load to perform are also given in N while the builtin bswap invoke is given |
| in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the |
| load statements involved to construct the rhs in CUR_STMT and N->range gives |
| the size of the rhs expression for maintaining some statistics. |
| |
| Note that if the replacement involve a load, CUR_STMT is moved just after |
| SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT |
| changing of basic block. */ |
| |
| static bool |
| bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl, |
| tree bswap_type, tree load_type, struct symbolic_number *n, |
| bool bswap) |
| { |
| gimple_stmt_iterator gsi; |
| tree src, tmp, tgt; |
| gimple *bswap_stmt; |
| |
| gsi = gsi_for_stmt (cur_stmt); |
| src = n->src; |
| tgt = gimple_assign_lhs (cur_stmt); |
| |
| /* Need to load the value from memory first. */ |
| if (n->base_addr) |
| { |
| gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt); |
| tree addr_expr, addr_tmp, val_expr, val_tmp; |
| tree load_offset_ptr, aligned_load_type; |
| gimple *addr_stmt, *load_stmt; |
| unsigned align; |
| HOST_WIDE_INT load_offset = 0; |
| basic_block ins_bb, cur_bb; |
| |
| ins_bb = gimple_bb (ins_stmt); |
| cur_bb = gimple_bb (cur_stmt); |
| if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb)) |
| return false; |
| |
| align = get_object_alignment (src); |
| |
| /* Move cur_stmt just before one of the load of the original |
| to ensure it has the same VUSE. See PR61517 for what could |
| go wrong. */ |
| if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt)) |
| reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt)); |
| gsi_move_before (&gsi, &gsi_ins); |
| gsi = gsi_for_stmt (cur_stmt); |
| |
| /* Compute address to load from and cast according to the size |
| of the load. */ |
| addr_expr = build_fold_addr_expr (unshare_expr (src)); |
| if (is_gimple_mem_ref_addr (addr_expr)) |
| addr_tmp = addr_expr; |
| else |
| { |
| addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL, |
| "load_src"); |
| addr_stmt = gimple_build_assign (addr_tmp, addr_expr); |
| gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT); |
| } |
| |
| /* Perform the load. */ |
| aligned_load_type = load_type; |
| if (align < TYPE_ALIGN (load_type)) |
| aligned_load_type = build_aligned_type (load_type, align); |
| load_offset_ptr = build_int_cst (n->alias_set, load_offset); |
| val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp, |
| load_offset_ptr); |
| |
| if (!bswap) |
| { |
| if (n->range == 16) |
| nop_stats.found_16bit++; |
| else if (n->range == 32) |
| nop_stats.found_32bit++; |
| else |
| { |
| gcc_assert (n->range == 64); |
| nop_stats.found_64bit++; |
| } |
| |
| /* Convert the result of load if necessary. */ |
| if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type)) |
| { |
| val_tmp = make_temp_ssa_name (aligned_load_type, NULL, |
| "load_dst"); |
| load_stmt = gimple_build_assign (val_tmp, val_expr); |
| gimple_set_vuse (load_stmt, n->vuse); |
| gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); |
| gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp); |
| } |
| else |
| { |
| gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr); |
| gimple_set_vuse (cur_stmt, n->vuse); |
| } |
| update_stmt (cur_stmt); |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, |
| "%d bit load in target endianness found at: ", |
| (int) n->range); |
| print_gimple_stmt (dump_file, cur_stmt, 0, 0); |
| } |
| return true; |
| } |
| else |
| { |
| val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst"); |
| load_stmt = gimple_build_assign (val_tmp, val_expr); |
| gimple_set_vuse (load_stmt, n->vuse); |
| gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); |
| } |
| src = val_tmp; |
| } |
| else if (TREE_CODE (src) == BIT_FIELD_REF) |
| src = TREE_OPERAND (src, 0); |
| |
| if (n->range == 16) |
| bswap_stats.found_16bit++; |
| else if (n->range == 32) |
| bswap_stats.found_32bit++; |
| else |
| { |
| gcc_assert (n->range == 64); |
| bswap_stats.found_64bit++; |
| } |
| |
| tmp = src; |
| |
| /* Convert the src expression if necessary. */ |
| if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type)) |
| { |
| gimple *convert_stmt; |
| |
| tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc"); |
| convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src); |
| gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT); |
| } |
| |
| /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values |
| are considered as rotation of 2N bit values by N bits is generally not |
| equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which |
| gives 0x03040102 while a bswap for that value is 0x04030201. */ |
| if (bswap && n->range == 16) |
| { |
| tree count = build_int_cst (NULL, BITS_PER_UNIT); |
| src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count); |
| bswap_stmt = gimple_build_assign (NULL, src); |
| } |
| else |
| bswap_stmt = gimple_build_call (fndecl, 1, tmp); |
| |
| tmp = tgt; |
| |
| /* Convert the result if necessary. */ |
| if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type)) |
| { |
| gimple *convert_stmt; |
| |
| tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst"); |
| convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp); |
| gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT); |
| } |
| |
| gimple_set_lhs (bswap_stmt, tmp); |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "%d bit bswap implementation found at: ", |
| (int) n->range); |
| print_gimple_stmt (dump_file, cur_stmt, 0, 0); |
| } |
| |
| gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT); |
| gsi_remove (&gsi, true); |
| return true; |
| } |
| |
| /* Find manual byte swap implementations as well as load in a given |
| endianness. Byte swaps are turned into a bswap builtin invokation |
| while endian loads are converted to bswap builtin invokation or |
| simple load according to the target endianness. */ |
| |
| unsigned int |
| pass_optimize_bswap::execute (function *fun) |
| { |
| basic_block bb; |
| bool bswap32_p, bswap64_p; |
| bool changed = false; |
| tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE; |
| |
| if (BITS_PER_UNIT != 8) |
| return 0; |
| |
| bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32) |
| && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing); |
| bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64) |
| && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing |
| || (bswap32_p && word_mode == SImode))); |
| |
| /* Determine the argument type of the builtins. The code later on |
| assumes that the return and argument type are the same. */ |
| if (bswap32_p) |
| { |
| tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); |
| bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
| } |
| |
| if (bswap64_p) |
| { |
| tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); |
| bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
| } |
| |
| memset (&nop_stats, 0, sizeof (nop_stats)); |
| memset (&bswap_stats, 0, sizeof (bswap_stats)); |
| calculate_dominance_info (CDI_DOMINATORS); |
| |
| FOR_EACH_BB_FN (bb, fun) |
| { |
| gimple_stmt_iterator gsi; |
| |
| /* We do a reverse scan for bswap patterns to make sure we get the |
| widest match. As bswap pattern matching doesn't handle previously |
| inserted smaller bswap replacements as sub-patterns, the wider |
| variant wouldn't be detected. */ |
| for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);) |
| { |
| gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi); |
| tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; |
| enum tree_code code; |
| struct symbolic_number n; |
| bool bswap; |
| |
| /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt |
| might be moved to a different basic block by bswap_replace and gsi |
| must not points to it if that's the case. Moving the gsi_prev |
| there make sure that gsi points to the statement previous to |
| cur_stmt while still making sure that all statements are |
| considered in this basic block. */ |
| gsi_prev (&gsi); |
| |
| if (!is_gimple_assign (cur_stmt)) |
| continue; |
| |
| code = gimple_assign_rhs_code (cur_stmt); |
| switch (code) |
| { |
| case LROTATE_EXPR: |
| case RROTATE_EXPR: |
| if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt)) |
| || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt)) |
| % BITS_PER_UNIT) |
| continue; |
| /* Fall through. */ |
| case BIT_IOR_EXPR: |
| break; |
| default: |
| continue; |
| } |
| |
| ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap); |
| |
| if (!ins_stmt) |
| continue; |
| |
| switch (n.range) |
| { |
| case 16: |
| /* Already in canonical form, nothing to do. */ |
| if (code == LROTATE_EXPR || code == RROTATE_EXPR) |
| continue; |
| load_type = bswap_type = uint16_type_node; |
| break; |
| case 32: |
| load_type = uint32_type_node; |
| if (bswap32_p) |
| { |
| fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); |
| bswap_type = bswap32_type; |
| } |
| break; |
| case 64: |
| load_type = uint64_type_node; |
| if (bswap64_p) |
| { |
| fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); |
| bswap_type = bswap64_type; |
| } |
| break; |
| default: |
| continue; |
| } |
| |
| if (bswap && !fndecl && n.range != 16) |
| continue; |
| |
| if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type, |
| &n, bswap)) |
| changed = true; |
| } |
| } |
| |
| statistics_counter_event (fun, "16-bit nop implementations found", |
| nop_stats.found_16bit); |
| statistics_counter_event (fun, "32-bit nop implementations found", |
| nop_stats.found_32bit); |
| statistics_counter_event (fun, "64-bit nop implementations found", |
| nop_stats.found_64bit); |
| statistics_counter_event (fun, "16-bit bswap implementations found", |
| bswap_stats.found_16bit); |
| statistics_counter_event (fun, "32-bit bswap implementations found", |
| bswap_stats.found_32bit); |
| statistics_counter_event (fun, "64-bit bswap implementations found", |
| bswap_stats.found_64bit); |
| |
| return (changed ? TODO_update_ssa : 0); |
| } |
| |
| } // anon namespace |
| |
| gimple_opt_pass * |
| make_pass_optimize_bswap (gcc::context *ctxt) |
| { |
| return new pass_optimize_bswap (ctxt); |
| } |
| |
| /* Return true if stmt is a type conversion operation that can be stripped |
| when used in a widening multiply operation. */ |
| static bool |
| widening_mult_conversion_strippable_p (tree result_type, gimple *stmt) |
| { |
| enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
| |
| if (TREE_CODE (result_type) == INTEGER_TYPE) |
| { |
| tree op_type; |
| tree inner_op_type; |
| |
| if (!CONVERT_EXPR_CODE_P (rhs_code)) |
| return false; |
| |
| op_type = TREE_TYPE (gimple_assign_lhs (stmt)); |
| |
| /* If the type of OP has the same precision as the result, then |
| we can strip this conversion. The multiply operation will be |
| selected to create the correct extension as a by-product. */ |
| if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type)) |
| return true; |
| |
| /* We can also strip a conversion if it preserves the signed-ness of |
| the operation and doesn't narrow the range. */ |
| inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| |
| /* If the inner-most type is unsigned, then we can strip any |
| intermediate widening operation. If it's signed, then the |
| intermediate widening operation must also be signed. */ |
| if ((TYPE_UNSIGNED (inner_op_type) |
| || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type)) |
| && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type)) |
| return true; |
| |
| return false; |
| } |
| |
| return rhs_code == FIXED_CONVERT_EXPR; |
| } |
| |
| /* Return true if RHS is a suitable operand for a widening multiplication, |
| assuming a target type of TYPE. |
| There are two cases: |
| |
| - RHS makes some value at least twice as wide. Store that value |
| in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT. |
| |
| - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so, |
| but leave *TYPE_OUT untouched. */ |
| |
| static bool |
| is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out, |
| tree *new_rhs_out) |
| { |
| gimple *stmt; |
| tree type1, rhs1; |
| |
| if (TREE_CODE (rhs) == SSA_NAME) |
| { |
| stmt = SSA_NAME_DEF_STMT (rhs); |
| if (is_gimple_assign (stmt)) |
| { |
| if (! widening_mult_conversion_strippable_p (type, stmt)) |
| rhs1 = rhs; |
| else |
| { |
| rhs1 = gimple_assign_rhs1 (stmt); |
| |
| if (TREE_CODE (rhs1) == INTEGER_CST) |
| { |
| *new_rhs_out = rhs1; |
| *type_out = NULL; |
| return true; |
| } |
| } |
| } |
| else |
| rhs1 = rhs; |
| |
| type1 = TREE_TYPE (rhs1); |
| |
| if (TREE_CODE (type1) != TREE_CODE (type) |
| || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type)) |
| return false; |
| |
| *new_rhs_out = rhs1; |
| *type_out = type1; |
| return true; |
| } |
| |
| if (TREE_CODE (rhs) == INTEGER_CST) |
| { |
| *new_rhs_out = rhs; |
| *type_out = NULL; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Return true if STMT performs a widening multiplication, assuming the |
| output type is TYPE. If so, store the unwidened types of the operands |
| in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and |
| *RHS2_OUT such that converting those operands to types *TYPE1_OUT |
| and *TYPE2_OUT would give the operands of the multiplication. */ |
| |
| static bool |
| is_widening_mult_p (gimple *stmt, |
| tree *type1_out, tree *rhs1_out, |
| tree *type2_out, tree *rhs2_out) |
| { |
| tree type = TREE_TYPE (gimple_assign_lhs (stmt)); |
| |
| if (TREE_CODE (type) == INTEGER_TYPE) |
| { |
| if (TYPE_OVERFLOW_TRAPS (type)) |
| return false; |
| } |
| else if (TREE_CODE (type) != FIXED_POINT_TYPE) |
| return false; |
| |
| if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out, |
| rhs1_out)) |
| return false; |
| |
| if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out, |
| rhs2_out)) |
| return false; |
| |
| if (*type1_out == NULL) |
| { |
| if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out)) |
| return false; |
| *type1_out = *type2_out; |
| } |
| |
| if (*type2_out == NULL) |
| { |
| if (!int_fits_type_p (*rhs2_out, *type1_out)) |
| return false; |
| *type2_out = *type1_out; |
| } |
| |
| /* Ensure that the larger of the two operands comes first. */ |
| if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out)) |
| { |
| std::swap (*type1_out, *type2_out); |
| std::swap (*rhs1_out, *rhs2_out); |
| } |
| |
| return true; |
| } |
| |
| /* Process a single gimple statement STMT, which has a MULT_EXPR as |
| its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return |
| value is true iff we converted the statement. */ |
| |
| static bool |
| convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi) |
| { |
| tree lhs, rhs1, rhs2, type, type1, type2; |
| enum insn_code handler; |
| machine_mode to_mode, from_mode, actual_mode; |
| optab op; |
| int actual_precision; |
| location_t loc = gimple_location (stmt); |
| bool from_unsigned1, from_unsigned2; |
| |
| lhs = gimple_assign_lhs (stmt); |
| type = TREE_TYPE (lhs); |
| if (TREE_CODE (type) != INTEGER_TYPE) |
| return false; |
| |
| if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2)) |
| return false; |
| |
| to_mode = TYPE_MODE (type); |
| from_mode = |