| /* Loop Vectorization |
| Copyright (C) 2003-2015 Free Software Foundation, Inc. |
| Contributed by Dorit Naishlos <dorit@il.ibm.com> and |
| Ira Rosen <irar@il.ibm.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "dumpfile.h" |
| #include "tm.h" |
| #include "hash-set.h" |
| #include "machmode.h" |
| #include "vec.h" |
| #include "double-int.h" |
| #include "input.h" |
| #include "alias.h" |
| #include "symtab.h" |
| #include "wide-int.h" |
| #include "inchash.h" |
| #include "tree.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "predict.h" |
| #include "hard-reg-set.h" |
| #include "function.h" |
| #include "dominance.h" |
| #include "cfg.h" |
| #include "cfganal.h" |
| #include "basic-block.h" |
| #include "gimple-pretty-print.h" |
| #include "tree-ssa-alias.h" |
| #include "internal-fn.h" |
| #include "gimple-expr.h" |
| #include "is-a.h" |
| #include "gimple.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-ssa.h" |
| #include "tree-phinodes.h" |
| #include "ssa-iterators.h" |
| #include "stringpool.h" |
| #include "tree-ssanames.h" |
| #include "tree-ssa-loop-ivopts.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-pass.h" |
| #include "cfgloop.h" |
| #include "hashtab.h" |
| #include "rtl.h" |
| #include "flags.h" |
| #include "statistics.h" |
| #include "real.h" |
| #include "fixed-value.h" |
| #include "insn-config.h" |
| #include "expmed.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "calls.h" |
| #include "emit-rtl.h" |
| #include "varasm.h" |
| #include "stmt.h" |
| #include "expr.h" |
| #include "recog.h" |
| #include "insn-codes.h" |
| #include "optabs.h" |
| #include "params.h" |
| #include "diagnostic-core.h" |
| #include "tree-chrec.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-vectorizer.h" |
| #include "target.h" |
| |
| /* Loop Vectorization Pass. |
| |
| This pass tries to vectorize loops. |
| |
| For example, the vectorizer transforms the following simple loop: |
| |
| short a[N]; short b[N]; short c[N]; int i; |
| |
| for (i=0; i<N; i++){ |
| a[i] = b[i] + c[i]; |
| } |
| |
| as if it was manually vectorized by rewriting the source code into: |
| |
| typedef int __attribute__((mode(V8HI))) v8hi; |
| short a[N]; short b[N]; short c[N]; int i; |
| v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; |
| v8hi va, vb, vc; |
| |
| for (i=0; i<N/8; i++){ |
| vb = pb[i]; |
| vc = pc[i]; |
| va = vb + vc; |
| pa[i] = va; |
| } |
| |
| The main entry to this pass is vectorize_loops(), in which |
| the vectorizer applies a set of analyses on a given set of loops, |
| followed by the actual vectorization transformation for the loops that |
| had successfully passed the analysis phase. |
| Throughout this pass we make a distinction between two types of |
| data: scalars (which are represented by SSA_NAMES), and memory references |
| ("data-refs"). These two types of data require different handling both |
| during analysis and transformation. The types of data-refs that the |
| vectorizer currently supports are ARRAY_REFS which base is an array DECL |
| (not a pointer), and INDIRECT_REFS through pointers; both array and pointer |
| accesses are required to have a simple (consecutive) access pattern. |
| |
| Analysis phase: |
| =============== |
| The driver for the analysis phase is vect_analyze_loop(). |
| It applies a set of analyses, some of which rely on the scalar evolution |
| analyzer (scev) developed by Sebastian Pop. |
| |
| During the analysis phase the vectorizer records some information |
| per stmt in a "stmt_vec_info" struct which is attached to each stmt in the |
| loop, as well as general information about the loop as a whole, which is |
| recorded in a "loop_vec_info" struct attached to each loop. |
| |
| Transformation phase: |
| ===================== |
| The loop transformation phase scans all the stmts in the loop, and |
| creates a vector stmt (or a sequence of stmts) for each scalar stmt S in |
| the loop that needs to be vectorized. It inserts the vector code sequence |
| just before the scalar stmt S, and records a pointer to the vector code |
| in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct |
| attached to S). This pointer will be used for the vectorization of following |
| stmts which use the def of stmt S. Stmt S is removed if it writes to memory; |
| otherwise, we rely on dead code elimination for removing it. |
| |
| For example, say stmt S1 was vectorized into stmt VS1: |
| |
| VS1: vb = px[i]; |
| S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 |
| S2: a = b; |
| |
| To vectorize stmt S2, the vectorizer first finds the stmt that defines |
| the operand 'b' (S1), and gets the relevant vector def 'vb' from the |
| vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The |
| resulting sequence would be: |
| |
| VS1: vb = px[i]; |
| S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 |
| VS2: va = vb; |
| S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 |
| |
| Operands that are not SSA_NAMEs, are data-refs that appear in |
| load/store operations (like 'x[i]' in S1), and are handled differently. |
| |
| Target modeling: |
| ================= |
| Currently the only target specific information that is used is the |
| size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". |
| Targets that can support different sizes of vectors, for now will need |
| to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More |
| flexibility will be added in the future. |
| |
| Since we only vectorize operations which vector form can be |
| expressed using existing tree codes, to verify that an operation is |
| supported, the vectorizer checks the relevant optab at the relevant |
| machine_mode (e.g, optab_handler (add_optab, V8HImode)). If |
| the value found is CODE_FOR_nothing, then there's no target support, and |
| we can't vectorize the stmt. |
| |
| For additional information on this project see: |
| http://gcc.gnu.org/projects/tree-ssa/vectorization.html |
| */ |
| |
| static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); |
| |
| /* Function vect_determine_vectorization_factor |
| |
| Determine the vectorization factor (VF). VF is the number of data elements |
| that are operated upon in parallel in a single iteration of the vectorized |
| loop. For example, when vectorizing a loop that operates on 4byte elements, |
| on a target with vector size (VS) 16byte, the VF is set to 4, since 4 |
| elements can fit in a single vector register. |
| |
| We currently support vectorization of loops in which all types operated upon |
| are of the same size. Therefore this function currently sets VF according to |
| the size of the types operated upon, and fails if there are multiple sizes |
| in the loop. |
| |
| VF is also the factor by which the loop iterations are strip-mined, e.g.: |
| original loop: |
| for (i=0; i<N; i++){ |
| a[i] = b[i] + c[i]; |
| } |
| |
| vectorized loop: |
| for (i=0; i<N; i+=VF){ |
| a[i:VF] = b[i:VF] + c[i:VF]; |
| } |
| */ |
| |
| static bool |
| vect_determine_vectorization_factor (loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| unsigned int vectorization_factor = 0; |
| tree scalar_type; |
| gphi *phi; |
| tree vectype; |
| unsigned int nunits; |
| stmt_vec_info stmt_info; |
| int i; |
| HOST_WIDE_INT dummy; |
| gimple stmt, pattern_stmt = NULL; |
| gimple_seq pattern_def_seq = NULL; |
| gimple_stmt_iterator pattern_def_si = gsi_none (); |
| bool analyze_pattern_stmt = false; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_determine_vectorization_factor ===\n"); |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| phi = si.phi (); |
| stmt_info = vinfo_for_stmt (phi); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| gcc_assert (stmt_info); |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); |
| scalar_type = TREE_TYPE (PHI_RESULT (phi)); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "get vectype for scalar type: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vectype) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported " |
| "data-type "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| scalar_type); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| STMT_VINFO_VECTYPE (stmt_info) = vectype; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| nunits = TYPE_VECTOR_SUBPARTS (vectype); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", |
| nunits); |
| |
| if (!vectorization_factor |
| || (nunits > vectorization_factor)) |
| vectorization_factor = nunits; |
| } |
| } |
| |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); |
| !gsi_end_p (si) || analyze_pattern_stmt;) |
| { |
| tree vf_vectype; |
| |
| if (analyze_pattern_stmt) |
| stmt = pattern_stmt; |
| else |
| stmt = gsi_stmt (si); |
| |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "==> examining statement: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| gcc_assert (stmt_info); |
| |
| /* Skip stmts which do not need to be vectorized. */ |
| if ((!STMT_VINFO_RELEVANT_P (stmt_info) |
| && !STMT_VINFO_LIVE_P (stmt_info)) |
| || gimple_clobber_p (stmt)) |
| { |
| if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
| && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) |
| && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) |
| || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) |
| { |
| stmt = pattern_stmt; |
| stmt_info = vinfo_for_stmt (pattern_stmt); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "==> examining pattern statement: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); |
| gsi_next (&si); |
| continue; |
| } |
| } |
| else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
| && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) |
| && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) |
| || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) |
| analyze_pattern_stmt = true; |
| |
| /* If a pattern statement has def stmts, analyze them too. */ |
| if (is_pattern_stmt_p (stmt_info)) |
| { |
| if (pattern_def_seq == NULL) |
| { |
| pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); |
| pattern_def_si = gsi_start (pattern_def_seq); |
| } |
| else if (!gsi_end_p (pattern_def_si)) |
| gsi_next (&pattern_def_si); |
| if (pattern_def_seq != NULL) |
| { |
| gimple pattern_def_stmt = NULL; |
| stmt_vec_info pattern_def_stmt_info = NULL; |
| |
| while (!gsi_end_p (pattern_def_si)) |
| { |
| pattern_def_stmt = gsi_stmt (pattern_def_si); |
| pattern_def_stmt_info |
| = vinfo_for_stmt (pattern_def_stmt); |
| if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) |
| || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) |
| break; |
| gsi_next (&pattern_def_si); |
| } |
| |
| if (!gsi_end_p (pattern_def_si)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "==> examining pattern def stmt: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, |
| pattern_def_stmt, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| stmt = pattern_def_stmt; |
| stmt_info = pattern_def_stmt_info; |
| } |
| else |
| { |
| pattern_def_si = gsi_none (); |
| analyze_pattern_stmt = false; |
| } |
| } |
| else |
| analyze_pattern_stmt = false; |
| } |
| |
| if (gimple_get_lhs (stmt) == NULL_TREE |
| /* MASK_STORE has no lhs, but is ok. */ |
| && (!is_gimple_call (stmt) |
| || !gimple_call_internal_p (stmt) |
| || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) |
| { |
| if (is_gimple_call (stmt)) |
| { |
| /* Ignore calls with no lhs. These must be calls to |
| #pragma omp simd functions, and what vectorization factor |
| it really needs can't be determined until |
| vectorizable_simd_clone_call. */ |
| if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) |
| { |
| pattern_def_seq = NULL; |
| gsi_next (&si); |
| } |
| continue; |
| } |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: irregular stmt."); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, |
| 0); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vector stmt in loop:"); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| if (STMT_VINFO_VECTYPE (stmt_info)) |
| { |
| /* The only case when a vectype had been already set is for stmts |
| that contain a dataref, or for "pattern-stmts" (stmts |
| generated by the vectorizer to represent/replace a certain |
| idiom). */ |
| gcc_assert (STMT_VINFO_DATA_REF (stmt_info) |
| || is_pattern_stmt_p (stmt_info) |
| || !gsi_end_p (pattern_def_si)); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| } |
| else |
| { |
| gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); |
| if (is_gimple_call (stmt) |
| && gimple_call_internal_p (stmt) |
| && gimple_call_internal_fn (stmt) == IFN_MASK_STORE) |
| scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); |
| else |
| scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "get vectype for scalar type: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vectype) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported " |
| "data-type "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| scalar_type); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| STMT_VINFO_VECTYPE (stmt_info) = vectype; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| } |
| |
| /* The vectorization factor is according to the smallest |
| scalar type (or the largest vector size, but we only |
| support one vector size per loop). */ |
| scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, |
| &dummy); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "get vectype for scalar type: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| vf_vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vf_vectype) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported data-type "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| scalar_type); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| if ((GET_MODE_SIZE (TYPE_MODE (vectype)) |
| != GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: different sized vector " |
| "types in statement, "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| vectype); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| vf_vectype); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| nunits = TYPE_VECTOR_SUBPARTS (vf_vectype); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); |
| if (!vectorization_factor |
| || (nunits > vectorization_factor)) |
| vectorization_factor = nunits; |
| |
| if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) |
| { |
| pattern_def_seq = NULL; |
| gsi_next (&si); |
| } |
| } |
| } |
| |
| /* TODO: Analyze cost. Decide if worth while to vectorize. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n", |
| vectorization_factor); |
| if (vectorization_factor <= 1) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported data-type\n"); |
| return false; |
| } |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_is_simple_iv_evolution. |
| |
| FORNOW: A simple evolution of an induction variables in the loop is |
| considered a polynomial evolution. */ |
| |
| static bool |
| vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, |
| tree * step) |
| { |
| tree init_expr; |
| tree step_expr; |
| tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); |
| basic_block bb; |
| |
| /* When there is no evolution in this loop, the evolution function |
| is not "simple". */ |
| if (evolution_part == NULL_TREE) |
| return false; |
| |
| /* When the evolution is a polynomial of degree >= 2 |
| the evolution function is not "simple". */ |
| if (tree_is_chrec (evolution_part)) |
| return false; |
| |
| step_expr = evolution_part; |
| init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "step: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); |
| dump_printf (MSG_NOTE, ", init: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| *init = init_expr; |
| *step = step_expr; |
| |
| if (TREE_CODE (step_expr) != INTEGER_CST |
| && (TREE_CODE (step_expr) != SSA_NAME |
| || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) |
| && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) |
| || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) |
| && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) |
| || !flag_associative_math))) |
| && (TREE_CODE (step_expr) != REAL_CST |
| || !flag_associative_math)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "step unknown.\n"); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_analyze_scalar_cycles_1. |
| |
| Examine the cross iteration def-use cycles of scalar variables |
| in LOOP. LOOP_VINFO represents the loop that is now being |
| considered for vectorization (can be LOOP, or an outer-loop |
| enclosing LOOP). */ |
| |
| static void |
| vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) |
| { |
| basic_block bb = loop->header; |
| tree init, step; |
| auto_vec<gimple, 64> worklist; |
| gphi_iterator gsi; |
| bool double_reduc; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_scalar_cycles ===\n"); |
| |
| /* First - identify all inductions. Reduction detection assumes that all the |
| inductions have been identified, therefore, this order must not be |
| changed. */ |
| for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| tree access_fn = NULL; |
| tree def = PHI_RESULT (phi); |
| stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| /* Skip virtual phi's. The data dependences that are associated with |
| virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ |
| if (virtual_operand_p (def)) |
| continue; |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; |
| |
| /* Analyze the evolution function. */ |
| access_fn = analyze_scalar_evolution (loop, def); |
| if (access_fn) |
| { |
| STRIP_NOPS (access_fn); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Access function of PHI: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) |
| = evolution_part_in_loop_num (access_fn, loop->num); |
| } |
| |
| if (!access_fn |
| || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) |
| || (LOOP_VINFO_LOOP (loop_vinfo) != loop |
| && TREE_CODE (step) != INTEGER_CST)) |
| { |
| worklist.safe_push (phi); |
| continue; |
| } |
| |
| gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; |
| } |
| |
| |
| /* Second - identify all reductions and nested cycles. */ |
| while (worklist.length () > 0) |
| { |
| gimple phi = worklist.pop (); |
| tree def = PHI_RESULT (phi); |
| stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); |
| gimple reduc_stmt; |
| bool nested_cycle; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| gcc_assert (!virtual_operand_p (def) |
| && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); |
| |
| nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo)); |
| reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle, |
| &double_reduc); |
| if (reduc_stmt) |
| { |
| if (double_reduc) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected double reduction.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; |
| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = |
| vect_double_reduction_def; |
| } |
| else |
| { |
| if (nested_cycle) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected vectorizable nested cycle.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; |
| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = |
| vect_nested_cycle; |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected reduction.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; |
| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = |
| vect_reduction_def; |
| /* Store the reduction cycles for possible vectorization in |
| loop-aware SLP. */ |
| LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); |
| } |
| } |
| } |
| else |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown def-use cycle pattern.\n"); |
| } |
| } |
| |
| |
| /* Function vect_analyze_scalar_cycles. |
| |
| Examine the cross iteration def-use cycles of scalar variables, by |
| analyzing the loop-header PHIs of scalar variables. Classify each |
| cycle as one of the following: invariant, induction, reduction, unknown. |
| We do that for the loop represented by LOOP_VINFO, and also to its |
| inner-loop, if exists. |
| Examples for scalar cycles: |
| |
| Example1: reduction: |
| |
| loop1: |
| for (i=0; i<N; i++) |
| sum += a[i]; |
| |
| Example2: induction: |
| |
| loop2: |
| for (i=0; i<N; i++) |
| a[i] = i; */ |
| |
| static void |
| vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop); |
| |
| /* When vectorizing an outer-loop, the inner-loop is executed sequentially. |
| Reductions in such inner-loop therefore have different properties than |
| the reductions in the nest that gets vectorized: |
| 1. When vectorized, they are executed in the same order as in the original |
| scalar loop, so we can't change the order of computation when |
| vectorizing them. |
| 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the |
| current checks are too strict. */ |
| |
| if (loop->inner) |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); |
| } |
| |
| |
| /* Function vect_get_loop_niters. |
| |
| Determine how many iterations the loop is executed and place it |
| in NUMBER_OF_ITERATIONS. Place the number of latch iterations |
| in NUMBER_OF_ITERATIONSM1. |
| |
| Return the loop exit condition. */ |
| |
| |
| static gcond * |
| vect_get_loop_niters (struct loop *loop, tree *number_of_iterations, |
| tree *number_of_iterationsm1) |
| { |
| tree niters; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== get_loop_niters ===\n"); |
| |
| niters = number_of_latch_executions (loop); |
| *number_of_iterationsm1 = niters; |
| |
| /* We want the number of loop header executions which is the number |
| of latch executions plus one. |
| ??? For UINT_MAX latch executions this number overflows to zero |
| for loops like do { n++; } while (n != 0); */ |
| if (niters && !chrec_contains_undetermined (niters)) |
| niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), unshare_expr (niters), |
| build_int_cst (TREE_TYPE (niters), 1)); |
| *number_of_iterations = niters; |
| |
| return get_loop_exit_condition (loop); |
| } |
| |
| |
| /* Function bb_in_loop_p |
| |
| Used as predicate for dfs order traversal of the loop bbs. */ |
| |
| static bool |
| bb_in_loop_p (const_basic_block bb, const void *data) |
| { |
| const struct loop *const loop = (const struct loop *)data; |
| if (flow_bb_inside_loop_p (loop, bb)) |
| return true; |
| return false; |
| } |
| |
| |
| /* Function new_loop_vec_info. |
| |
| Create and initialize a new loop_vec_info struct for LOOP, as well as |
| stmt_vec_info structs for all the stmts in LOOP. */ |
| |
| static loop_vec_info |
| new_loop_vec_info (struct loop *loop) |
| { |
| loop_vec_info res; |
| basic_block *bbs; |
| gimple_stmt_iterator si; |
| unsigned int i, nbbs; |
| |
| res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info)); |
| LOOP_VINFO_LOOP (res) = loop; |
| |
| bbs = get_loop_body (loop); |
| |
| /* Create/Update stmt_info for all stmts in the loop. */ |
| for (i = 0; i < loop->num_nodes; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| /* BBs in a nested inner-loop will have been already processed (because |
| we will have called vect_analyze_loop_form for any nested inner-loop). |
| Therefore, for stmts in an inner-loop we just want to update the |
| STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new |
| loop_info of the outer-loop we are currently considering to vectorize |
| (instead of the loop_info of the inner-loop). |
| For stmts in other BBs we need to create a stmt_info from scratch. */ |
| if (bb->loop_father != loop) |
| { |
| /* Inner-loop bb. */ |
| gcc_assert (loop->inner && bb->loop_father == loop->inner); |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple phi = gsi_stmt (si); |
| stmt_vec_info stmt_info = vinfo_for_stmt (phi); |
| loop_vec_info inner_loop_vinfo = |
| STMT_VINFO_LOOP_VINFO (stmt_info); |
| gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo)); |
| STMT_VINFO_LOOP_VINFO (stmt_info) = res; |
| } |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info inner_loop_vinfo = |
| STMT_VINFO_LOOP_VINFO (stmt_info); |
| gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo)); |
| STMT_VINFO_LOOP_VINFO (stmt_info) = res; |
| } |
| } |
| else |
| { |
| /* bb in current nest. */ |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple phi = gsi_stmt (si); |
| gimple_set_uid (phi, 0); |
| set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL)); |
| } |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| gimple_set_uid (stmt, 0); |
| set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL)); |
| } |
| } |
| } |
| |
| /* CHECKME: We want to visit all BBs before their successors (except for |
| latch blocks, for which this assertion wouldn't hold). In the simple |
| case of the loop forms we allow, a dfs order of the BBs would the same |
| as reversed postorder traversal, so we are safe. */ |
| |
| free (bbs); |
| bbs = XCNEWVEC (basic_block, loop->num_nodes); |
| nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, |
| bbs, loop->num_nodes, loop); |
| gcc_assert (nbbs == loop->num_nodes); |
| |
| LOOP_VINFO_BBS (res) = bbs; |
| LOOP_VINFO_NITERSM1 (res) = NULL; |
| LOOP_VINFO_NITERS (res) = NULL; |
| LOOP_VINFO_NITERS_UNCHANGED (res) = NULL; |
| LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0; |
| LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0; |
| LOOP_VINFO_VECTORIZABLE_P (res) = 0; |
| LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0; |
| LOOP_VINFO_VECT_FACTOR (res) = 0; |
| LOOP_VINFO_LOOP_NEST (res).create (3); |
| LOOP_VINFO_DATAREFS (res).create (10); |
| LOOP_VINFO_DDRS (res).create (10 * 10); |
| LOOP_VINFO_UNALIGNED_DR (res) = NULL; |
| LOOP_VINFO_MAY_MISALIGN_STMTS (res).create ( |
| PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)); |
| LOOP_VINFO_MAY_ALIAS_DDRS (res).create ( |
| PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)); |
| LOOP_VINFO_GROUPED_STORES (res).create (10); |
| LOOP_VINFO_REDUCTIONS (res).create (10); |
| LOOP_VINFO_REDUCTION_CHAINS (res).create (10); |
| LOOP_VINFO_SLP_INSTANCES (res).create (10); |
| LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1; |
| LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop); |
| LOOP_VINFO_PEELING_FOR_GAPS (res) = false; |
| LOOP_VINFO_PEELING_FOR_NITER (res) = false; |
| LOOP_VINFO_OPERANDS_SWAPPED (res) = false; |
| |
| return res; |
| } |
| |
| |
| /* Function destroy_loop_vec_info. |
| |
| Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the |
| stmts in the loop. */ |
| |
| void |
| destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts) |
| { |
| struct loop *loop; |
| basic_block *bbs; |
| int nbbs; |
| gimple_stmt_iterator si; |
| int j; |
| vec<slp_instance> slp_instances; |
| slp_instance instance; |
| bool swapped; |
| |
| if (!loop_vinfo) |
| return; |
| |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| bbs = LOOP_VINFO_BBS (loop_vinfo); |
| nbbs = clean_stmts ? loop->num_nodes : 0; |
| swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo); |
| |
| for (j = 0; j < nbbs; j++) |
| { |
| basic_block bb = bbs[j]; |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| free_stmt_vec_info (gsi_stmt (si)); |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); ) |
| { |
| gimple stmt = gsi_stmt (si); |
| |
| /* We may have broken canonical form by moving a constant |
| into RHS1 of a commutative op. Fix such occurrences. */ |
| if (swapped && is_gimple_assign (stmt)) |
| { |
| enum tree_code code = gimple_assign_rhs_code (stmt); |
| |
| if ((code == PLUS_EXPR |
| || code == POINTER_PLUS_EXPR |
| || code == MULT_EXPR) |
| && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) |
| swap_ssa_operands (stmt, |
| gimple_assign_rhs1_ptr (stmt), |
| gimple_assign_rhs2_ptr (stmt)); |
| } |
| |
| /* Free stmt_vec_info. */ |
| free_stmt_vec_info (stmt); |
| gsi_next (&si); |
| } |
| } |
| |
| free (LOOP_VINFO_BBS (loop_vinfo)); |
| vect_destroy_datarefs (loop_vinfo, NULL); |
| free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo)); |
| LOOP_VINFO_LOOP_NEST (loop_vinfo).release (); |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release (); |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release (); |
| slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); |
| FOR_EACH_VEC_ELT (slp_instances, j, instance) |
| vect_free_slp_instance (instance); |
| |
| LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); |
| LOOP_VINFO_GROUPED_STORES (loop_vinfo).release (); |
| LOOP_VINFO_REDUCTIONS (loop_vinfo).release (); |
| LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release (); |
| |
| delete LOOP_VINFO_PEELING_HTAB (loop_vinfo); |
| LOOP_VINFO_PEELING_HTAB (loop_vinfo) = NULL; |
| |
| destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); |
| |
| free (loop_vinfo); |
| loop->aux = NULL; |
| } |
| |
| |
| /* Function vect_analyze_loop_1. |
| |
| Apply a set of analyses on LOOP, and create a loop_vec_info struct |
| for it. The different analyses will record information in the |
| loop_vec_info struct. This is a subset of the analyses applied in |
| vect_analyze_loop, to be applied on an inner-loop nested in the loop |
| that is now considered for (outer-loop) vectorization. */ |
| |
| static loop_vec_info |
| vect_analyze_loop_1 (struct loop *loop) |
| { |
| loop_vec_info loop_vinfo; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "===== analyze_loop_nest_1 =====\n"); |
| |
| /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */ |
| |
| loop_vinfo = vect_analyze_loop_form (loop); |
| if (!loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad inner-loop form.\n"); |
| return NULL; |
| } |
| |
| return loop_vinfo; |
| } |
| |
| |
| /* Function vect_analyze_loop_form. |
| |
| Verify that certain CFG restrictions hold, including: |
| - the loop has a pre-header |
| - the loop has a single entry and exit |
| - the loop exit condition is simple enough, and the number of iterations |
| can be analyzed (a countable loop). */ |
| |
| loop_vec_info |
| vect_analyze_loop_form (struct loop *loop) |
| { |
| loop_vec_info loop_vinfo; |
| gcond *loop_cond; |
| tree number_of_iterations = NULL, number_of_iterationsm1 = NULL; |
| loop_vec_info inner_loop_vinfo = NULL; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_loop_form ===\n"); |
| |
| /* Different restrictions apply when we are considering an inner-most loop, |
| vs. an outer (nested) loop. |
| (FORNOW. May want to relax some of these restrictions in the future). */ |
| |
| if (!loop->inner) |
| { |
| /* Inner-most loop. We currently require that the number of BBs is |
| exactly 2 (the header and latch). Vectorizable inner-most loops |
| look like this: |
| |
| (pre-header) |
| | |
| header <--------+ |
| | | | |
| | +--> latch --+ |
| | |
| (exit-bb) */ |
| |
| if (loop->num_nodes != 2) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: control flow in loop.\n"); |
| return NULL; |
| } |
| |
| if (empty_block_p (loop->header)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: empty loop.\n"); |
| return NULL; |
| } |
| } |
| else |
| { |
| struct loop *innerloop = loop->inner; |
| edge entryedge; |
| |
| /* Nested loop. We currently require that the loop is doubly-nested, |
| contains a single inner loop, and the number of BBs is exactly 5. |
| Vectorizable outer-loops look like this: |
| |
| (pre-header) |
| | |
| header <---+ |
| | | |
| inner-loop | |
| | | |
| tail ------+ |
| | |
| (exit-bb) |
| |
| The inner-loop has the properties expected of inner-most loops |
| as described above. */ |
| |
| if ((loop->inner)->inner || (loop->inner)->next) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: multiple nested loops.\n"); |
| return NULL; |
| } |
| |
| /* Analyze the inner-loop. */ |
| inner_loop_vinfo = vect_analyze_loop_1 (loop->inner); |
| if (!inner_loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: Bad inner loop.\n"); |
| return NULL; |
| } |
| |
| if (!expr_invariant_in_loop_p (loop, |
| LOOP_VINFO_NITERS (inner_loop_vinfo))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: inner-loop count not" |
| " invariant.\n"); |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| if (loop->num_nodes != 5) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: control flow in loop.\n"); |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2); |
| entryedge = EDGE_PRED (innerloop->header, 0); |
| if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch) |
| entryedge = EDGE_PRED (innerloop->header, 1); |
| |
| if (entryedge->src != loop->header |
| || !single_exit (innerloop) |
| || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported outerloop form.\n"); |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Considering outer-loop vectorization.\n"); |
| } |
| |
| if (!single_exit (loop) |
| || EDGE_COUNT (loop->header->preds) != 2) |
| { |
| if (dump_enabled_p ()) |
| { |
| if (!single_exit (loop)) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: multiple exits.\n"); |
| else if (EDGE_COUNT (loop->header->preds) != 2) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: too many incoming edges.\n"); |
| } |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| /* We assume that the loop exit condition is at the end of the loop. i.e, |
| that the loop is represented as a do-while (with a proper if-guard |
| before the loop if needed), where the loop header contains all the |
| executable statements, and the latch is empty. */ |
| if (!empty_block_p (loop->latch) |
| || !gimple_seq_empty_p (phi_nodes (loop->latch))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: latch block not empty.\n"); |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| /* Make sure there exists a single-predecessor exit bb: */ |
| if (!single_pred_p (single_exit (loop)->dest)) |
| { |
| edge e = single_exit (loop); |
| if (!(e->flags & EDGE_ABNORMAL)) |
| { |
| split_loop_exit_edge (e); |
| if (dump_enabled_p ()) |
| dump_printf (MSG_NOTE, "split exit edge.\n"); |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: abnormal loop exit edge.\n"); |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| } |
| |
| loop_cond = vect_get_loop_niters (loop, &number_of_iterations, |
| &number_of_iterationsm1); |
| if (!loop_cond) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: complicated exit condition.\n"); |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| if (!number_of_iterations |
| || chrec_contains_undetermined (number_of_iterations)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: number of iterations cannot be " |
| "computed.\n"); |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| if (integer_zerop (number_of_iterations)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: number of iterations = 0.\n"); |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, true); |
| return NULL; |
| } |
| |
| loop_vinfo = new_loop_vec_info (loop); |
| LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; |
| LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; |
| LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; |
| |
| if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Symbolic number of iterations is "); |
| dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| } |
| |
| STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; |
| |
| /* CHECKME: May want to keep it around it in the future. */ |
| if (inner_loop_vinfo) |
| destroy_loop_vec_info (inner_loop_vinfo, false); |
| |
| gcc_assert (!loop->aux); |
| loop->aux = loop_vinfo; |
| return loop_vinfo; |
| } |
| |
| |
| /* Function vect_analyze_loop_operations. |
| |
| Scan the loop stmts and make sure they are all vectorizable. */ |
| |
| static bool |
| vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| unsigned int vectorization_factor = 0; |
| int i; |
| stmt_vec_info stmt_info; |
| bool need_to_vectorize = false; |
| int min_profitable_iters; |
| int min_scalar_loop_bound; |
| unsigned int th; |
| bool only_slp_in_loop = true, ok; |
| HOST_WIDE_INT max_niter; |
| HOST_WIDE_INT estimated_niter; |
| int min_profitable_estimate; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_loop_operations ===\n"); |
| |
| gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); |
| vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| if (slp) |
| { |
| /* If all the stmts in the loop can be SLPed, we perform only SLP, and |
| vectorization factor of the loop is the unrolling factor required by |
| the SLP instances. If that unrolling factor is 1, we say, that we |
| perform pure SLP on loop - cross iteration parallelism is not |
| exploited. */ |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| gcc_assert (stmt_info); |
| if ((STMT_VINFO_RELEVANT_P (stmt_info) |
| || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) |
| && !PURE_SLP_STMT (stmt_info)) |
| /* STMT needs both SLP and loop-based vectorization. */ |
| only_slp_in_loop = false; |
| } |
| } |
| |
| if (only_slp_in_loop) |
| vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); |
| else |
| vectorization_factor = least_common_multiple (vectorization_factor, |
| LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); |
| |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Updating vectorization factor to %d\n", |
| vectorization_factor); |
| } |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| gphi *phi = si.phi (); |
| ok = true; |
| |
| stmt_info = vinfo_for_stmt (phi); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| /* Inner-loop loop-closed exit phi in outer-loop vectorization |
| (i.e., a phi in the tail of the outer-loop). */ |
| if (! is_loop_header_bb_p (bb)) |
| { |
| /* FORNOW: we currently don't support the case that these phis |
| are not used in the outerloop (unless it is double reduction, |
| i.e., this phi is vect_reduction_def), cause this case |
| requires to actually do something here. */ |
| if ((!STMT_VINFO_RELEVANT_P (stmt_info) |
| || STMT_VINFO_LIVE_P (stmt_info)) |
| && STMT_VINFO_DEF_TYPE (stmt_info) |
| != vect_double_reduction_def) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unsupported loop-closed phi in " |
| "outer-loop.\n"); |
| return false; |
| } |
| |
| /* If PHI is used in the outer loop, we check that its operand |
| is defined in the inner loop. */ |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| tree phi_op; |
| gimple op_def_stmt; |
| |
| if (gimple_phi_num_args (phi) != 1) |
| return false; |
| |
| phi_op = PHI_ARG_DEF (phi, 0); |
| if (TREE_CODE (phi_op) != SSA_NAME) |
| return false; |
| |
| op_def_stmt = SSA_NAME_DEF_STMT (phi_op); |
| if (gimple_nop_p (op_def_stmt) |
| || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) |
| || !vinfo_for_stmt (op_def_stmt)) |
| return false; |
| |
| if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) |
| != vect_used_in_outer |
| && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) |
| != vect_used_in_outer_by_reduction) |
| return false; |
| } |
| |
| continue; |
| } |
| |
| gcc_assert (stmt_info); |
| |
| if (STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| /* FORNOW: not yet supported. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: value used after loop.\n"); |
| return false; |
| } |
| |
| if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope |
| && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) |
| { |
| /* A scalar-dependence cycle that we don't support. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: scalar dependence cycle.\n"); |
| return false; |
| } |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| need_to_vectorize = true; |
| if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) |
| ok = vectorizable_induction (phi, NULL, NULL); |
| } |
| |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: relevant phi not " |
| "supported: "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| } |
| |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| if (!gimple_clobber_p (stmt) |
| && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL)) |
| return false; |
| } |
| } /* bbs */ |
| |
| /* All operations in the loop are either irrelevant (deal with loop |
| control, or dead), or only used outside the loop and can be moved |
| out of the loop (e.g. invariants, inductions). The loop can be |
| optimized away by scalar optimizations. We're better off not |
| touching this loop. */ |
| if (!need_to_vectorize) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "All the computation can be taken out of the loop.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: redundant loop. no profit to " |
| "vectorize.\n"); |
| return false; |
| } |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vectorization_factor = %d, niters = " |
| HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor, |
| LOOP_VINFO_INT_NITERS (loop_vinfo)); |
| |
| if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor)) |
| || ((max_niter = max_stmt_executions_int (loop)) != -1 |
| && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: iteration count too small.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: iteration count smaller than " |
| "vectorization factor.\n"); |
| return false; |
| } |
| |
| /* Analyze cost. Decide if worth while to vectorize. */ |
| |
| /* Once VF is set, SLP costs should be updated since the number of created |
| vector stmts depends on VF. */ |
| vect_update_slp_costs_according_to_vf (loop_vinfo); |
| |
| vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, |
| &min_profitable_estimate); |
| LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters; |
| |
| if (min_profitable_iters < 0) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vectorization not profitable.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vector version will never be " |
| "profitable.\n"); |
| return false; |
| } |
| |
| min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) |
| * vectorization_factor) - 1); |
| |
| |
| /* Use the cost model only if it is more conservative than user specified |
| threshold. */ |
| |
| th = (unsigned) min_scalar_loop_bound; |
| if (min_profitable_iters |
| && (!min_scalar_loop_bound |
| || min_profitable_iters > min_scalar_loop_bound)) |
| th = (unsigned) min_profitable_iters; |
| |
| LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vectorization not profitable.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not vectorized: iteration count smaller than user " |
| "specified loop bound parameter or minimum profitable " |
| "iterations (whichever is more conservative).\n"); |
| return false; |
| } |
| |
| if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1 |
| && ((unsigned HOST_WIDE_INT) estimated_niter |
| <= MAX (th, (unsigned)min_profitable_estimate))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: estimated iteration count too " |
| "small.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not vectorized: estimated iteration count smaller " |
| "than specified loop bound parameter or minimum " |
| "profitable iterations (whichever is more " |
| "conservative).\n"); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| |
| /* Function vect_analyze_loop_2. |
| |
| Apply a set of analyses on LOOP, and create a loop_vec_info struct |
| for it. The different analyses will record information in the |
| loop_vec_info struct. */ |
| static bool |
| vect_analyze_loop_2 (loop_vec_info loop_vinfo) |
| { |
| bool ok, slp = false; |
| int max_vf = MAX_VECTORIZATION_FACTOR; |
| int min_vf = 2; |
| unsigned int th; |
| unsigned int n_stmts = 0; |
| |
| /* Find all data references in the loop (which correspond to vdefs/vuses) |
| and analyze their evolution in the loop. Also adjust the minimal |
| vectorization factor according to the loads and stores. |
| |
| FORNOW: Handle only simple, array references, which |
| alignment can be forced, and aligned pointer-references. */ |
| |
| ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf, &n_stmts); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data references.\n"); |
| return false; |
| } |
| |
| /* Classify all cross-iteration scalar data-flow cycles. |
| Cross-iteration cycles caused by virtual phis are analyzed separately. */ |
| |
| vect_analyze_scalar_cycles (loop_vinfo); |
| |
| vect_pattern_recog (loop_vinfo, NULL); |
| |
| /* Analyze the access patterns of the data-refs in the loop (consecutive, |
| complex, etc.). FORNOW: Only handle consecutive access pattern. */ |
| |
| ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data access.\n"); |
| return false; |
| } |
| |
| /* Data-flow analysis to detect stmts that do not need to be vectorized. */ |
| |
| ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "unexpected pattern.\n"); |
| return false; |
| } |
| |
| /* Analyze data dependences between the data-refs in the loop |
| and adjust the maximum vectorization factor according to |
| the dependences. |
| FORNOW: fail at the first data dependence that we encounter. */ |
| |
| ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); |
| if (!ok |
| || max_vf < min_vf) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data dependence.\n"); |
| return false; |
| } |
| |
| ok = vect_determine_vectorization_factor (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't determine vectorization factor.\n"); |
| return false; |
| } |
| if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data dependence.\n"); |
| return false; |
| } |
| |
| /* Analyze the alignment of the data-refs in the loop. |
| Fail if a data reference is found that cannot be vectorized. */ |
| |
| ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data alignment.\n"); |
| return false; |
| } |
| |
| /* Prune the list of ddrs to be tested at run-time by versioning for alias. |
| It is important to call pruning after vect_analyze_data_ref_accesses, |
| since we use grouping information gathered by interleaving analysis. */ |
| ok = vect_prune_runtime_alias_test_list (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "number of versioning for alias " |
| "run-time tests exceeds %d " |
| "(--param vect-max-version-for-alias-checks)\n", |
| PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)); |
| return false; |
| } |
| |
| /* This pass will decide on using loop versioning and/or loop peeling in |
| order to enhance the alignment of data references in the loop. */ |
| |
| ok = vect_enhance_data_refs_alignment (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data alignment.\n"); |
| return false; |
| } |
| |
| /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ |
| ok = vect_analyze_slp (loop_vinfo, NULL, n_stmts); |
| if (ok) |
| { |
| /* Decide which possible SLP instances to SLP. */ |
| slp = vect_make_slp_decision (loop_vinfo); |
| |
| /* Find stmts that need to be both vectorized and SLPed. */ |
| vect_detect_hybrid_slp (loop_vinfo); |
| } |
| else |
| return false; |
| |
| /* Scan all the operations in the loop and make sure they are |
| vectorizable. */ |
| |
| ok = vect_analyze_loop_operations (loop_vinfo, slp); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad operation or unsupported loop bound.\n"); |
| return false; |
| } |
| |
| /* Decide whether we need to create an epilogue loop to handle |
| remaining scalar iterations. */ |
| th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1) |
| / LOOP_VINFO_VECT_FACTOR (loop_vinfo)) |
| * LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0) |
| { |
| if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo) |
| - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)) |
| < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))) |
| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; |
| } |
| else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
| || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) |
| < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) |
| /* In case of versioning, check if the maximum number of |
| iterations is greater than th. If they are identical, |
| the epilogue is unnecessary. */ |
| && ((!LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo) |
| && !LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
| || (unsigned HOST_WIDE_INT)max_stmt_executions_int |
| (LOOP_VINFO_LOOP (loop_vinfo)) > th))) |
| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; |
| |
| /* If an epilogue loop is required make sure we can create one. */ |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
| || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); |
| if (!vect_can_advance_ivs_p (loop_vinfo) |
| || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), |
| single_exit (LOOP_VINFO_LOOP |
| (loop_vinfo)))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: can't create required " |
| "epilog loop\n"); |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_analyze_loop. |
| |
| Apply a set of analyses on LOOP, and create a loop_vec_info struct |
| for it. The different analyses will record information in the |
| loop_vec_info struct. */ |
| loop_vec_info |
| vect_analyze_loop (struct loop *loop) |
| { |
| loop_vec_info loop_vinfo; |
| unsigned int vector_sizes; |
| |
| /* Autodetect first vector size we try. */ |
| current_vector_size = 0; |
| vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "===== analyze_loop_nest =====\n"); |
| |
| if (loop_outer (loop) |
| && loop_vec_info_for_loop (loop_outer (loop)) |
| && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "outer-loop already vectorized.\n"); |
| return NULL; |
| } |
| |
| while (1) |
| { |
| /* Check the CFG characteristics of the loop (nesting, entry/exit). */ |
| loop_vinfo = vect_analyze_loop_form (loop); |
| if (!loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad loop form.\n"); |
| return NULL; |
| } |
| |
| if (vect_analyze_loop_2 (loop_vinfo)) |
| { |
| LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; |
| |
| return loop_vinfo; |
| } |
| |
| destroy_loop_vec_info (loop_vinfo, true); |
| |
| vector_sizes &= ~current_vector_size; |
| if (vector_sizes == 0 |
| || current_vector_size == 0) |
| return NULL; |
| |
| /* Try the next biggest vector size. */ |
| current_vector_size = 1 << floor_log2 (vector_sizes); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Re-trying analysis with " |
| "vector size %d\n", current_vector_size); |
| } |
| } |
| |
| |
| /* Function reduction_code_for_scalar_code |
| |
| Input: |
| CODE - tree_code of a reduction operations. |
| |
| Output: |
| REDUC_CODE - the corresponding tree-code to be used to reduce the |
| vector of partial results into a single scalar result, or ERROR_MARK |
| if the operation is a supported reduction operation, but does not have |
| such a tree-code. |
| |
| Return FALSE if CODE currently cannot be vectorized as reduction. */ |
| |
| static bool |
| reduction_code_for_scalar_code (enum tree_code code, |
| enum tree_code *reduc_code) |
| { |
| switch (code) |
| { |
| case MAX_EXPR: |
| *reduc_code = REDUC_MAX_EXPR; |
| return true; |
| |
| case MIN_EXPR: |
| *reduc_code = REDUC_MIN_EXPR; |
| return true; |
| |
| case PLUS_EXPR: |
| *reduc_code = REDUC_PLUS_EXPR; |
| return true; |
| |
| case MULT_EXPR: |
| case MINUS_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_XOR_EXPR: |
| case BIT_AND_EXPR: |
| *reduc_code = ERROR_MARK; |
| return true; |
| |
| default: |
| return false; |
| } |
| } |
| |
| |
| /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement |
| STMT is printed with a message MSG. */ |
| |
| static void |
| report_vect_op (int msg_type, gimple stmt, const char *msg) |
| { |
| dump_printf_loc (msg_type, vect_location, "%s", msg); |
| dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); |
| dump_printf (msg_type, "\n"); |
| } |
| |
| |
| /* Detect SLP reduction of the form: |
| |
| #a1 = phi <a5, a0> |
| a2 = operation (a1) |
| a3 = operation (a2) |
| a4 = operation (a3) |
| a5 = operation (a4) |
| |
| #a = phi <a5> |
| |
| PHI is the reduction phi node (#a1 = phi <a5, a0> above) |
| FIRST_STMT is the first reduction stmt in the chain |
| (a2 = operation (a1)). |
| |
| Return TRUE if a reduction chain was detected. */ |
| |
| static bool |
| vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt) |
| { |
| struct loop *loop = (gimple_bb (phi))->loop_father; |
| struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); |
| enum tree_code code; |
| gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt; |
| stmt_vec_info use_stmt_info, current_stmt_info; |
| tree lhs; |
| imm_use_iterator imm_iter; |
| use_operand_p use_p; |
| int nloop_uses, size = 0, n_out_of_loop_uses; |
| bool found = false; |
| |
| if (loop != vect_loop) |
| return false; |
| |
| lhs = PHI_RESULT (phi); |
| code = gimple_assign_rhs_code (first_stmt); |
| while (1) |
| { |
| nloop_uses = 0; |
| n_out_of_loop_uses = 0; |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) |
| { |
| gimple use_stmt = USE_STMT (use_p); |
| if (is_gimple_debug (use_stmt)) |
| continue; |
| |
| /* Check if we got back to the reduction phi. */ |
| if (use_stmt == phi) |
| { |
| loop_use_stmt = use_stmt; |
| found = true; |
| break; |
| } |
| |
| if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) |
| { |
| if (vinfo_for_stmt (use_stmt) |
| && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt))) |
| { |
| loop_use_stmt = use_stmt; |
| nloop_uses++; |
| } |
| } |
| else |
| n_out_of_loop_uses++; |
| |
| /* There are can be either a single use in the loop or two uses in |
| phi nodes. */ |
| if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) |
| return false; |
| } |
| |
| if (found) |
| break; |
| |
| /* We reached a statement with no loop uses. */ |
| if (nloop_uses == 0) |
| return false; |
| |
| /* This is a loop exit phi, and we haven't reached the reduction phi. */ |
| if (gimple_code (loop_use_stmt) == GIMPLE_PHI) |
| return false; |
| |
| if (!is_gimple_assign (loop_use_stmt) |
| || code != gimple_assign_rhs_code (loop_use_stmt) |
| || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) |
| return false; |
| |
| /* Insert USE_STMT into reduction chain. */ |
| use_stmt_info = vinfo_for_stmt (loop_use_stmt); |
| if (current_stmt) |
| { |
| current_stmt_info = vinfo_for_stmt (current_stmt); |
| GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; |
| GROUP_FIRST_ELEMENT (use_stmt_info) |
| = GROUP_FIRST_ELEMENT (current_stmt_info); |
| } |
| else |
| GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; |
| |
| lhs = gimple_assign_lhs (loop_use_stmt); |
| current_stmt = loop_use_stmt; |
| size++; |
| } |
| |
| if (!found || loop_use_stmt != phi || size < 2) |
| return false; |
| |
| /* Swap the operands, if needed, to make the reduction operand be the second |
| operand. */ |
| lhs = PHI_RESULT (phi); |
| next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); |
| while (next_stmt) |
| { |
| if (gimple_assign_rhs2 (next_stmt) == lhs) |
| { |
| tree op = gimple_assign_rhs1 (next_stmt); |
| gimple def_stmt = NULL; |
| |
| if (TREE_CODE (op) == SSA_NAME) |
| def_stmt = SSA_NAME_DEF_STMT (op); |
| |
| /* Check that the other def is either defined in the loop |
| ("vect_internal_def"), or it's an induction (defined by a |
| loop-header phi-node). */ |
| if (def_stmt |
| && gimple_bb (def_stmt) |
| && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
| && (is_gimple_assign (def_stmt) |
| || is_gimple_call (def_stmt) |
| || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) |
| == vect_induction_def |
| || (gimple_code (def_stmt) == GIMPLE_PHI |
| && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) |
| == vect_internal_def |
| && !is_loop_header_bb_p (gimple_bb (def_stmt))))) |
| { |
| lhs = gimple_assign_lhs (next_stmt); |
| next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
| continue; |
| } |
| |
| return false; |
| } |
| else |
| { |
| tree op = gimple_assign_rhs2 (next_stmt); |
| gimple def_stmt = NULL; |
| |
| if (TREE_CODE (op) == SSA_NAME) |
| def_stmt = SSA_NAME_DEF_STMT (op); |
| |
| /* Check that the other def is either defined in the loop |
| ("vect_internal_def"), or it's an induction (defined by a |
| loop-header phi-node). */ |
| if (def_stmt |
| && gimple_bb (def_stmt) |
| && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
| && (is_gimple_assign (def_stmt) |
| || is_gimple_call (def_stmt) |
| || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) |
| == vect_induction_def |
| || (gimple_code (def_stmt) == GIMPLE_PHI |
| && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) |
| == vect_internal_def |
| && !is_loop_header_bb_p (gimple_bb (def_stmt))))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| swap_ssa_operands (next_stmt, |
| gimple_assign_rhs1_ptr (next_stmt), |
| gimple_assign_rhs2_ptr (next_stmt)); |
| update_stmt (next_stmt); |
| |
| if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) |
| LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; |
| } |
| else |
| return false; |
| } |
| |
| lhs = gimple_assign_lhs (next_stmt); |
| next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
| } |
| |
| /* Save the chain for further analysis in SLP detection. */ |
| first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); |
| LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); |
| GROUP_SIZE (vinfo_for_stmt (first)) = size; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_is_simple_reduction_1 |
| |
| (1) Detect a cross-iteration def-use cycle that represents a simple |
| reduction computation. We look for the following pattern: |
| |
| loop_header: |
| a1 = phi < a0, a2 > |
| a3 = ... |
| a2 = operation (a3, a1) |
| |
| or |
| |
| a3 = ... |
| loop_header: |
| a1 = phi < a0, a2 > |
| a2 = operation (a3, a1) |
| |
| such that: |
| 1. operation is commutative and associative and it is safe to |
| change the order of the computation (if CHECK_REDUCTION is true) |
| 2. no uses for a2 in the loop (a2 is used out of the loop) |
| 3. no uses of a1 in the loop besides the reduction operation |
| 4. no uses of a1 outside the loop. |
| |
| Conditions 1,4 are tested here. |
| Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. |
| |
| (2) Detect a cross-iteration def-use cycle in nested loops, i.e., |
| nested cycles, if CHECK_REDUCTION is false. |
| |
| (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double |
| reductions: |
| |
| a1 = phi < a0, a2 > |
| inner loop (def of a3) |
| a2 = phi < a3 > |
| |
| If MODIFY is true it tries also to rework the code in-place to enable |
| detection of more reduction patterns. For the time being we rewrite |
| "res -= RHS" into "rhs += -RHS" when it seems worthwhile. |
| */ |
| |
| static gimple |
| vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi, |
| bool check_reduction, bool *double_reduc, |
| bool modify) |
| { |
| struct loop *loop = (gimple_bb (phi))->loop_father; |
| struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); |
| edge latch_e = loop_latch_edge (loop); |
| tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); |
| gimple def_stmt, def1 = NULL, def2 = NULL; |
| enum tree_code orig_code, code; |
| tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; |
| tree type; |
| int nloop_uses; |
| tree name; |
| imm_use_iterator imm_iter; |
| use_operand_p use_p; |
| bool phi_def; |
| |
| *double_reduc = false; |
| |
| /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization, |
| otherwise, we assume outer loop vectorization. */ |
| gcc_assert ((check_reduction && loop == vect_loop) |
| || (!check_reduction && flow_loop_nested_p (vect_loop, loop))); |
| |
| name = PHI_RESULT (phi); |
| /* ??? If there are no uses of the PHI result the inner loop reduction |
| won't be detected as possibly double-reduction by vectorizable_reduction |
| because that tries to walk the PHI arg from the preheader edge which |
| can be constant. See PR60382. */ |
| if (has_zero_uses (name)) |
| return NULL; |
| nloop_uses = 0; |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) |
| { |
| gimple use_stmt = USE_STMT (use_p); |
| if (is_gimple_debug (use_stmt)) |
| continue; |
| |
| if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "intermediate value used outside loop.\n"); |
| |
| return NULL; |
| } |
| |
| if (vinfo_for_stmt (use_stmt) |
| && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt))) |
| nloop_uses++; |
| if (nloop_uses > 1) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "reduction used in loop.\n"); |
| return NULL; |
| } |
| } |
| |
| if (TREE_CODE (loop_arg) != SSA_NAME) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "reduction: not ssa_name: "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return NULL; |
| } |
| |
| def_stmt = SSA_NAME_DEF_STMT (loop_arg); |
| if (!def_stmt) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "reduction: no def_stmt.\n"); |
| return NULL; |
| } |
| |
| if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| return NULL; |
| } |
| |
| if (is_gimple_assign (def_stmt)) |
| { |
| name = gimple_assign_lhs (def_stmt); |
| phi_def = false; |
| } |
| else |
| { |
| name = PHI_RESULT (def_stmt); |
| phi_def = true; |
| } |
| |
| nloop_uses = 0; |
| FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) |
| { |
| gimple use_stmt = USE_STMT (use_p); |
| if (is_gimple_debug (use_stmt)) |
| continue; |
| if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) |
| && vinfo_for_stmt (use_stmt) |
| && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt))) |
| nloop_uses++; |
| if (nloop_uses > 1) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "reduction used in loop.\n"); |
| return NULL; |
| } |
| } |
| |
| /* If DEF_STMT is a phi node itself, we expect it to have a single argument |
| defined in the inner loop. */ |
| if (phi_def) |
| { |
| op1 = PHI_ARG_DEF (def_stmt, 0); |
| |
| if (gimple_phi_num_args (def_stmt) != 1 |
| || TREE_CODE (op1) != SSA_NAME) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "unsupported phi node definition.\n"); |
| |
| return NULL; |
| } |
| |
| def1 = SSA_NAME_DEF_STMT (op1); |
| if (gimple_bb (def1) |
| && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
| && loop->inner |
| && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) |
| && is_gimple_assign (def1)) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, |
| "detected double reduction: "); |
| |
| *double_reduc = true; |
| return def_stmt; |
| } |
| |
| return NULL; |
| } |
| |
| code = orig_code = gimple_assign_rhs_code (def_stmt); |
| |
| /* We can handle "res -= x[i]", which is non-associative by |
| simply rewriting this into "res += -x[i]". Avoid changing |
| gimple instruction for the first simple tests and only do this |
| if we're allowed to change code at all. */ |
| if (code == MINUS_EXPR |
| && modify |
| && (op1 = gimple_assign_rhs1 (def_stmt)) |
| && TREE_CODE (op1) == SSA_NAME |
| && SSA_NAME_DEF_STMT (op1) == phi) |
| code = PLUS_EXPR; |
| |
| if (check_reduction |
| && (!commutative_tree_code (code) || !associative_tree_code (code))) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: not commutative/associative: "); |
| return NULL; |
| } |
| |
| if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS) |
| { |
| if (code != COND_EXPR) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: not binary operation: "); |
| |
| return NULL; |
| } |
| |
| op3 = gimple_assign_rhs1 (def_stmt); |
| if (COMPARISON_CLASS_P (op3)) |
| { |
| op4 = TREE_OPERAND (op3, 1); |
| op3 = TREE_OPERAND (op3, 0); |
| } |
| |
| op1 = gimple_assign_rhs2 (def_stmt); |
| op2 = gimple_assign_rhs3 (def_stmt); |
| |
| if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: uses not ssa_names: "); |
| |
| return NULL; |
| } |
| } |
| else |
| { |
| op1 = gimple_assign_rhs1 (def_stmt); |
| op2 = gimple_assign_rhs2 (def_stmt); |
| |
| if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: uses not ssa_names: "); |
| |
| return NULL; |
| } |
| } |
| |
| type = TREE_TYPE (gimple_assign_lhs (def_stmt)); |
| if ((TREE_CODE (op1) == SSA_NAME |
| && !types_compatible_p (type,TREE_TYPE (op1))) |
| || (TREE_CODE (op2) == SSA_NAME |
| && !types_compatible_p (type, TREE_TYPE (op2))) |
| || (op3 && TREE_CODE (op3) == SSA_NAME |
| && !types_compatible_p (type, TREE_TYPE (op3))) |
| || (op4 && TREE_CODE (op4) == SSA_NAME |
| && !types_compatible_p (type, TREE_TYPE (op4)))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "reduction: multiple types: operation type: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, type); |
| dump_printf (MSG_NOTE, ", operands types: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, |
| TREE_TYPE (op1)); |
| dump_printf (MSG_NOTE, ","); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, |
| TREE_TYPE (op2)); |
| if (op3) |
| { |
| dump_printf (MSG_NOTE, ","); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, |
| TREE_TYPE (op3)); |
| } |
| |
| if (op4) |
| { |
| dump_printf (MSG_NOTE, ","); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, |
| TREE_TYPE (op4)); |
| } |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| return NULL; |
| } |
| |
| /* Check that it's ok to change the order of the computation. |
| Generally, when vectorizing a reduction we change the order of the |
| computation. This may change the behavior of the program in some |
| cases, so we need to check that this is ok. One exception is when |
| vectorizing an outer-loop: the inner-loop is executed sequentially, |
| and therefore vectorizing reductions in the inner-loop during |
| outer-loop vectorization is safe. */ |
| |
| /* CHECKME: check for !flag_finite_math_only too? */ |
| if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math |
| && check_reduction) |
| { |
| /* Changing the order of operations changes the semantics. */ |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: unsafe fp math optimization: "); |
| return NULL; |
| } |
| else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type) |
| && check_reduction) |
| { |
| /* Changing the order of operations changes the semantics. */ |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: unsafe int math optimization: "); |
| return NULL; |
| } |
| else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction) |
| { |
| /* Changing the order of operations changes the semantics. */ |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: unsafe fixed-point math optimization: "); |
| return NULL; |
| } |
| |
| /* If we detected "res -= x[i]" earlier, rewrite it into |
| "res += -x[i]" now. If this turns out to be useless reassoc |
| will clean it up again. */ |
| if (orig_code == MINUS_EXPR) |
| { |
| tree rhs = gimple_assign_rhs2 (def_stmt); |
| tree negrhs = make_ssa_name (TREE_TYPE (rhs)); |
| gimple negate_stmt = gimple_build_assign (negrhs, NEGATE_EXPR, rhs); |
| gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); |
| set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt, |
| loop_info, NULL)); |
| gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT); |
| gimple_assign_set_rhs2 (def_stmt, negrhs); |
| gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR); |
| update_stmt (def_stmt); |
| } |
| |
| /* Reduction is safe. We're dealing with one of the following: |
| 1) integer arithmetic and no trapv |
| 2) floating point arithmetic, and special flags permit this optimization |
| 3) nested cycle (i.e., outer loop vectorization). */ |
| if (TREE_CODE (op1) == SSA_NAME) |
| def1 = SSA_NAME_DEF_STMT (op1); |
| |
| if (TREE_CODE (op2) == SSA_NAME) |
| def2 = SSA_NAME_DEF_STMT (op2); |
| |
| if (code != COND_EXPR |
| && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); |
| return NULL; |
| } |
| |
| /* Check that one def is the reduction def, defined by PHI, |
| the other def is either defined in the loop ("vect_internal_def"), |
| or it's an induction (defined by a loop-header phi-node). */ |
| |
| if (def2 && def2 == phi |
| && (code == COND_EXPR |
| || !def1 || gimple_nop_p (def1) |
| || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) |
| || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) |
| && (is_gimple_assign (def1) |
| || is_gimple_call (def1) |
| || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) |
| == vect_induction_def |
| || (gimple_code (def1) == GIMPLE_PHI |
| && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) |
| == vect_internal_def |
| && !is_loop_header_bb_p (gimple_bb (def1))))))) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); |
| return def_stmt; |
| } |
| |
| if (def1 && def1 == phi |
| && (code == COND_EXPR |
| || !def2 || gimple_nop_p (def2) |
| || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) |
| || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) |
| && (is_gimple_assign (def2) |
| || is_gimple_call (def2) |
| || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) |
| == vect_induction_def |
| || (gimple_code (def2) == GIMPLE_PHI |
| && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) |
| == vect_internal_def |
| && !is_loop_header_bb_p (gimple_bb (def2))))))) |
| { |
| if (check_reduction) |
| { |
| /* Swap operands (just for simplicity - so that the rest of the code |
| can assume that the reduction variable is always the last (second) |
| argument). */ |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, |
| "detected reduction: need to swap operands: "); |
| |
| swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), |
| gimple_assign_rhs2_ptr (def_stmt)); |
| |
| if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) |
| LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); |
| } |
| |
| return def_stmt; |
| } |
| |
| /* Try to find SLP reduction chain. */ |
| if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt)) |
| { |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_NOTE, def_stmt, |
| "reduction: detected reduction chain: "); |
| |
| return def_stmt; |
| } |
| |
| if (dump_enabled_p ()) |
| report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, |
| "reduction: unknown pattern: "); |
| |
| return NULL; |
| } |
| |
| /* Wrapper around vect_is_simple_reduction_1, that won't modify code |
| in-place. Arguments as there. */ |
| |
| static gimple |
| vect_is_simple_reduction (loop_vec_info loop_info, gimple phi, |
| bool check_reduction, bool *double_reduc) |
| { |
| return vect_is_simple_reduction_1 (loop_info, phi, check_reduction, |
| double_reduc, false); |
| } |
| |
| /* Wrapper around vect_is_simple_reduction_1, which will modify code |
| in-place if it enables detection of more reductions. Arguments |
| as there. */ |
| |
| gimple |
| vect_force_simple_reduction (loop_vec_info loop_info, gimple phi, |
| bool check_reduction, bool *double_reduc) |
| { |
| return vect_is_simple_reduction_1 (loop_info, phi, check_reduction, |
| double_reduc, true); |
| } |
| |
| /* Calculate the cost of one scalar iteration of the loop. */ |
| int |
| vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo, |
| stmt_vector_for_cost *scalar_cost_vec) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0; |
| int innerloop_iters, i; |
| |
| /* Count statements in scalar loop. Using this as scalar cost for a single |
| iteration for now. |
| |
| TODO: Add outer loop support. |
| |
| TODO: Consider assigning different costs to different scalar |
| statements. */ |
| |
| /* FORNOW. */ |
| innerloop_iters = 1; |
| if (loop->inner) |
| innerloop_iters = 50; /* FIXME */ |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| gimple_stmt_iterator si; |
| basic_block bb = bbs[i]; |
| |
| if (bb->loop_father == loop->inner) |
| factor = innerloop_iters; |
| else |
| factor = 1; |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| |
| if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) |
| continue; |
| |
| /* Skip stmts that are not vectorized inside the loop. */ |
| if (stmt_info |
| && !STMT_VINFO_RELEVANT_P (stmt_info) |
| && (!STMT_VINFO_LIVE_P (stmt_info) |
| || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) |
| && !STMT_VINFO_IN_PATTERN_P (stmt_info)) |
| continue; |
| |
| vect_cost_for_stmt kind; |
| if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))) |
| { |
| if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))) |
| kind = scalar_load; |
| else |
| kind = scalar_store; |
| } |
| else |
| kind = scalar_stmt; |
| |
| scalar_single_iter_cost |
| += record_stmt_cost (scalar_cost_vec, factor, kind, |
| NULL, 0, vect_prologue); |
| } |
| } |
| return scalar_single_iter_cost; |
| } |
| |
| /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ |
| int |
| vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, |
| int *peel_iters_epilogue, |
| stmt_vector_for_cost *scalar_cost_vec, |
| stmt_vector_for_cost *prologue_cost_vec, |
| stmt_vector_for_cost *epilogue_cost_vec) |
| { |
| int retval = 0; |
| int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| |
| if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
| { |
| *peel_iters_epilogue = vf/2; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "cost model: epilogue peel iters set to vf/2 " |
| "because loop iterations are unknown .\n"); |
| |
| /* If peeled iterations are known but number of scalar loop |
| iterations are unknown, count a taken branch per peeled loop. */ |
| retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, |
| NULL, 0, vect_prologue); |
| retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, |
| NULL, 0, vect_epilogue); |
| } |
| else |
| { |
| int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); |
| peel_iters_prologue = niters < peel_iters_prologue ? |
| niters : peel_iters_prologue; |
| *peel_iters_epilogue = (niters - peel_iters_prologue) % vf; |
| /* If we need to peel for gaps, but no peeling is required, we have to |
| peel VF iterations. */ |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) |
| *peel_iters_epilogue = vf; |
| } |
| |
| stmt_info_for_cost *si; |
| int j; |
| if (peel_iters_prologue) |
| FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) |
| retval += record_stmt_cost (prologue_cost_vec, |
| si->count * peel_iters_prologue, |
| si->kind, NULL, si->misalign, |
| vect_prologue); |
| if (*peel_iters_epilogue) |
| FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) |
| retval += record_stmt_cost (epilogue_cost_vec, |
| si->count * *peel_iters_epilogue, |
| si->kind, NULL, si->misalign, |
| vect_epilogue); |
| |
| return retval; |
| } |
| |
| /* Function vect_estimate_min_profitable_iters |
| |
| Return the number of iterations required for the vector version of the |
| loop to be profitable relative to the cost of the scalar version of the |
| loop. */ |
| |
| static void |
| vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, |
| int *ret_min_profitable_niters, |
| int *ret_min_profitable_estimate) |
| { |
| int min_profitable_iters; |
| int min_profitable_estimate; |
| int peel_iters_prologue; |
| int peel_iters_epilogue; |
| unsigned vec_inside_cost = 0; |
| int vec_outside_cost = 0; |
| unsigned vec_prologue_cost = 0; |
| unsigned vec_epilogue_cost = 0; |
| int scalar_single_iter_cost = 0; |
| int scalar_outside_cost = 0; |
| int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); |
| void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
| |
| /* Cost model disabled. */ |
| if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); |
| *ret_min_profitable_niters = 0; |
| *ret_min_profitable_estimate = 0; |
| return; |
| } |
| |
| /* Requires loop versioning tests to handle misalignment. */ |
| if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
| { |
| /* FIXME: Make cost depend on complexity of individual check. */ |
| unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); |
| (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, |
| vect_prologue); |
| dump_printf (MSG_NOTE, |
| "cost model: Adding cost of checks for loop " |
| "versioning to treat misalignment.\n"); |
| } |
| |
| /* Requires loop versioning with alias checks. */ |
| if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) |
| { |
| /* FIXME: Make cost depend on complexity of individual check. */ |
| unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); |
| (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, |
| vect_prologue); |
| dump_printf (MSG_NOTE, |
| "cost model: Adding cost of checks for loop " |
| "versioning aliasing.\n"); |
| } |
| |
| if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
| || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) |
| (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, |
| vect_prologue); |
| |
| /* Count statements in scalar loop. Using this as scalar cost for a single |
| iteration for now. |
| |
| TODO: Add outer loop support. |
| |
| TODO: Consider assigning different costs to different scalar |
| statements. */ |
| |
| auto_vec<stmt_info_for_cost> scalar_cost_vec; |
| scalar_single_iter_cost |
| = vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec); |
| |
| /* Add additional cost for the peeled instructions in prologue and epilogue |
| loop. |
| |
| FORNOW: If we don't know the value of peel_iters for prologue or epilogue |
| at compile-time - we assume it's vf/2 (the worst would be vf-1). |
| |
| TODO: Build an expression that represents peel_iters for prologue and |
| epilogue to be used in a run-time test. */ |
| |
| if (npeel < 0) |
| { |
| peel_iters_prologue = vf/2; |
| dump_printf (MSG_NOTE, "cost model: " |
| "prologue peel iters set to vf/2.\n"); |
| |
| /* If peeling for alignment is unknown, loop bound of main loop becomes |
| unknown. */ |
| peel_iters_epilogue = vf/2; |
| dump_printf (MSG_NOTE, "cost model: " |
| "epilogue peel iters set to vf/2 because " |
| "peeling for alignment is unknown.\n"); |
| |
| /* If peeled iterations are unknown, count a taken branch and a not taken |
| branch per peeled loop. Even if scalar loop iterations are known, |
| vector iterations are not known since peeled prologue iterations are |
| not known. Hence guards remain the same. */ |
| (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, |
| NULL, 0, vect_prologue); |
| (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, |
| NULL, 0, vect_prologue); |
| (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, |
| NULL, 0, vect_epilogue); |
| (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, |
| NULL, 0, vect_epilogue); |
| stmt_info_for_cost *si; |
| int j; |
| FOR_EACH_VEC_ELT (scalar_cost_vec, j, si) |
| { |
| struct _stmt_vec_info *stmt_info |
| = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; |
| (void) add_stmt_cost (target_cost_data, |
| si->count * peel_iters_prologue, |
| si->kind, stmt_info, si->misalign, |
| vect_prologue); |
| (void) add_stmt_cost (target_cost_data, |
| si->count * peel_iters_epilogue, |
| si->kind, stmt_info, si->misalign, |
| vect_epilogue); |
| } |
| } |
| else |
| { |
| stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; |
| stmt_info_for_cost *si; |
| int j; |
| void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
| |
| prologue_cost_vec.create (2); |
| epilogue_cost_vec.create (2); |
| peel_iters_prologue = npeel; |
| |
| (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, |
| &peel_iters_epilogue, |
| &scalar_cost_vec, |
| &prologue_cost_vec, |
| &epilogue_cost_vec); |
| |
| FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) |
| { |
| struct _stmt_vec_info *stmt_info |
| = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; |
| (void) add_stmt_cost (data, si->count, si->kind, stmt_info, |
| si->misalign, vect_prologue); |
| } |
| |
| FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) |
| { |
| struct _stmt_vec_info *stmt_info |
| = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; |
| (void) add_stmt_cost (data, si->count, si->kind, stmt_info, |
| si->misalign, vect_epilogue); |
| } |
| |
| prologue_cost_vec.release (); |
| epilogue_cost_vec.release (); |
| } |
| |
| /* FORNOW: The scalar outside cost is incremented in one of the |
| following ways: |
| |
| 1. The vectorizer checks for alignment and aliasing and generates |
| a condition that allows dynamic vectorization. A cost model |
| check is ANDED with the versioning condition. Hence scalar code |
| path now has the added cost of the versioning check. |
| |
| if (cost > th & versioning_check) |
| jmp to vector code |
| |
| Hence run-time scalar is incremented by not-taken branch cost. |
| |
| 2. The vectorizer then checks if a prologue is required. If the |
| cost model check was not done before during versioning, it has to |
| be done before the prologue check. |
| |
| if (cost <= th) |
| prologue = scalar_iters |
| if (prologue == 0) |
| jmp to vector code |
| else |
| execute prologue |
| if (prologue == num_iters) |
| go to exit |
| |
| Hence the run-time scalar cost is incremented by a taken branch, |
| plus a not-taken branch, plus a taken branch cost. |
| |
| 3. The vectorizer then checks if an epilogue is required. If the |
| cost model check was not done before during prologue check, it |
| has to be done with the epilogue check. |
| |
| if (prologue == 0) |
| jmp to vector code |
| else |
| execute prologue |
| if (prologue == num_iters) |
| go to exit |
| vector code: |
| if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) |
| jmp to epilogue |
| |
| Hence the run-time scalar cost should be incremented by 2 taken |
| branches. |
| |
| TODO: The back end may reorder the BBS's differently and reverse |
| conditions/branch directions. Change the estimates below to |
| something more reasonable. */ |
| |
| /* If the number of iterations is known and we do not do versioning, we can |
| decide whether to vectorize at compile time. Hence the scalar version |
| do not carry cost model guard costs. */ |
| if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
| || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) |
| { |
| /* Cost model check occurs at versioning. */ |
| if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo) |
| || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) |
| scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); |
| else |
| { |
| /* Cost model check occurs at prologue generation. */ |
| if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) |
| scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) |
| + vect_get_stmt_cost (cond_branch_not_taken); |
| /* Cost model check occurs at epilogue generation. */ |
| else |
| scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); |
| } |
| } |
| |
| /* Complete the target-specific cost calculations. */ |
| finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, |
| &vec_inside_cost, &vec_epilogue_cost); |
| |
| vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); |
| dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", |
| vec_inside_cost); |
| dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", |
| vec_prologue_cost); |
| dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", |
| vec_epilogue_cost); |
| dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", |
| scalar_single_iter_cost); |
| dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", |
| scalar_outside_cost); |
| dump_printf (MSG_NOTE, " Vector outside cost: %d\n", |
| vec_outside_cost); |
| dump_printf (MSG_NOTE, " prologue iterations: %d\n", |
| peel_iters_prologue); |
| dump_printf (MSG_NOTE, " epilogue iterations: %d\n", |
| peel_iters_epilogue); |
| } |
| |
| /* Calculate number of iterations required to make the vector version |
| profitable, relative to the loop bodies only. The following condition |
| must hold true: |
| SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC |
| where |
| SIC = scalar iteration cost, VIC = vector iteration cost, |
| VOC = vector outside cost, VF = vectorization factor, |
| PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations |
| SOC = scalar outside cost for run time cost model check. */ |
| |
| if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost) |
| { |
| if (vec_outside_cost <= 0) |
| min_profitable_iters = 1; |
| else |
| { |
| min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf |
| - vec_inside_cost * peel_iters_prologue |
| - vec_inside_cost * peel_iters_epilogue) |
| / ((scalar_single_iter_cost * vf) |
| - vec_inside_cost); |
| |
| if ((scalar_single_iter_cost * vf * min_profitable_iters) |
| <= (((int) vec_inside_cost * min_profitable_iters) |
| + (((int) vec_outside_cost - scalar_outside_cost) * vf))) |
| min_profitable_iters++; |
| } |
| } |
| /* vector version will never be profitable. */ |
| else |
| { |
| if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) |
| warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " |
| "did not happen for a simd loop"); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "cost model: the vector iteration cost = %d " |
| "divided by the scalar iteration cost = %d " |
| "is greater or equal to the vectorization factor = %d" |
| ".\n", |
| vec_inside_cost, scalar_single_iter_cost, vf); |
| *ret_min_profitable_niters = -1; |
| *ret_min_profitable_estimate = -1; |
| return; |
| } |
| |
| dump_printf (MSG_NOTE, |
| " Calculated minimum iters for profitability: %d\n", |
| min_profitable_iters); |
| |
| min_profitable_iters = |
| min_profitable_iters < vf ? vf : min_profitable_iters; |
| |
| /* Because the condition we create is: |
| if (niters <= min_profitable_iters) |
| then skip the vectorized loop. */ |
| min_profitable_iters--; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| " Runtime profitability threshold = %d\n", |
| min_profitable_iters); |
| |
| *ret_min_profitable_niters = min_profitable_iters; |
| |
| /* Calculate number of iterations required to make the vector version |
| profitable, relative to the loop bodies only. |
| |
| Non-vectorized variant is SIC * niters and it must win over vector |
| variant on the expected loop trip count. The following condition must hold true: |
| SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ |
| |
| if (vec_outside_cost <= 0) |
| min_profitable_estimate = 1; |
| else |
| { |
| min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf |
| - vec_inside_cost * peel_iters_prologue |
| - vec_inside_cost * peel_iters_epilogue) |
| / ((scalar_single_iter_cost * vf) |
| - vec_inside_cost); |
| } |
| min_profitable_estimate --; |
| min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| " Static estimate profitability threshold = %d\n", |
| min_profitable_iters); |
| |
| *ret_min_profitable_estimate = min_profitable_estimate; |
| } |
| |
| /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET |
| vector elements (not bits) for a vector of mode MODE. */ |
| static void |
| calc_vec_perm_mask_for_shift (enum machine_mode mode, unsigned int offset, |
| unsigned char *sel) |
| { |
| unsigned int i, nelt = GET_MODE_NUNITS (mode); |
| |
| for (i = 0; i < nelt; i++) |
| sel[i] = (i + offset) & (2*nelt - 1); |
| } |
| |
| /* Checks whether the target supports whole-vector shifts for vectors of mode |
| MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_ |
| it supports vec_perm_const with masks for all necessary shift amounts. */ |
| static bool |
| have_whole_vector_shift (enum machine_mode mode) |
| { |
| if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) |
| return true; |
| |
| if (direct_optab_handler (vec_perm_const_optab, mode) == CODE_FOR_nothing) |
| return false; |
| |
| unsigned int i, nelt = GET_MODE_NUNITS (mode); |
| unsigned char *sel = XALLOCAVEC (unsigned char, nelt); |
| |
| for (i = nelt/2; i >= 1; i/=2) |
| { |
| calc_vec_perm_mask_for_shift (mode, i, sel); |
| if (!can_vec_perm_p (mode, false, sel)) |
| return false; |
| } |
| return true; |
| } |
| |
| /* TODO: Close dependency between vect_model_*_cost and vectorizable_* |
| functions. Design better to avoid maintenance issues. */ |
| |
| /* Function vect_model_reduction_cost. |
| |
| Models cost for a reduction operation, including the vector ops |
| generated within the strip-mine loop, the initial definition before |
| the loop, and the epilogue code that must be generated. */ |
| |
| static bool |
| vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, |
| int ncopies) |
| { |
| int prologue_cost = 0, epilogue_cost = 0; |
| enum tree_code code; |
| optab optab; |
| tree vectype; |
| gimple stmt, orig_stmt; |
| tree reduction_op; |
| machine_mode mode; |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
| |
| /* Cost of reduction op inside loop. */ |
| unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, |
| stmt_info, 0, vect_body); |
| stmt = STMT_VINFO_STMT (stmt_info); |
| |
| switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) |
| { |
| case GIMPLE_SINGLE_RHS: |
| gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); |
| reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2); |
| break; |
| case GIMPLE_UNARY_RHS: |
| reduction_op = gimple_assign_rhs1 (stmt); |
| break; |
| case GIMPLE_BINARY_RHS: |
| reduction_op = gimple_assign_rhs2 (stmt); |
| break; |
| case GIMPLE_TERNARY_RHS: |
| reduction_op = gimple_assign_rhs3 (stmt); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); |
| if (!vectype) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "unsupported data-type "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| TREE_TYPE (reduction_op)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| mode = TYPE_MODE (vectype); |
| orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); |
| |
| if (!orig_stmt) |
| orig_stmt = STMT_VINFO_STMT (stmt_info); |
| |
| code = gimple_assign_rhs_code (orig_stmt); |
| |
| /* Add in cost for initial definition. */ |
| prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, |
| stmt_info, 0, vect_prologue); |
| |
| /* Determine cost of epilogue code. |
| |
| We have a reduction operator that will reduce the vector in one statement. |
| Also requires scalar extract. */ |
| |
| if (!nested_in_vect_loop_p (loop, orig_stmt)) |
| { |
| if (reduc_code != ERROR_MARK) |
| { |
| epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, |
| stmt_info, 0, vect_epilogue); |
| epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, |
| stmt_info, 0, vect_epilogue); |
| } |
| else |
| { |
| int |