| /* Loop Vectorization |
| Copyright (C) 2003-2021 Free Software Foundation, Inc. |
| Contributed by Dorit Naishlos <dorit@il.ibm.com> and |
| Ira Rosen <irar@il.ibm.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #define INCLUDE_ALGORITHM |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "cfganal.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "tree-ssa-loop-ivopts.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop-niter.h" |
| #include "tree-ssa-loop.h" |
| #include "cfgloop.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-vectorizer.h" |
| #include "gimple-fold.h" |
| #include "cgraph.h" |
| #include "tree-cfg.h" |
| #include "tree-if-conv.h" |
| #include "internal-fn.h" |
| #include "tree-vector-builder.h" |
| #include "vec-perm-indices.h" |
| #include "tree-eh.h" |
| |
| /* Loop Vectorization Pass. |
| |
| This pass tries to vectorize loops. |
| |
| For example, the vectorizer transforms the following simple loop: |
| |
| short a[N]; short b[N]; short c[N]; int i; |
| |
| for (i=0; i<N; i++){ |
| a[i] = b[i] + c[i]; |
| } |
| |
| as if it was manually vectorized by rewriting the source code into: |
| |
| typedef int __attribute__((mode(V8HI))) v8hi; |
| short a[N]; short b[N]; short c[N]; int i; |
| v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; |
| v8hi va, vb, vc; |
| |
| for (i=0; i<N/8; i++){ |
| vb = pb[i]; |
| vc = pc[i]; |
| va = vb + vc; |
| pa[i] = va; |
| } |
| |
| The main entry to this pass is vectorize_loops(), in which |
| the vectorizer applies a set of analyses on a given set of loops, |
| followed by the actual vectorization transformation for the loops that |
| had successfully passed the analysis phase. |
| Throughout this pass we make a distinction between two types of |
| data: scalars (which are represented by SSA_NAMES), and memory references |
| ("data-refs"). These two types of data require different handling both |
| during analysis and transformation. The types of data-refs that the |
| vectorizer currently supports are ARRAY_REFS which base is an array DECL |
| (not a pointer), and INDIRECT_REFS through pointers; both array and pointer |
| accesses are required to have a simple (consecutive) access pattern. |
| |
| Analysis phase: |
| =============== |
| The driver for the analysis phase is vect_analyze_loop(). |
| It applies a set of analyses, some of which rely on the scalar evolution |
| analyzer (scev) developed by Sebastian Pop. |
| |
| During the analysis phase the vectorizer records some information |
| per stmt in a "stmt_vec_info" struct which is attached to each stmt in the |
| loop, as well as general information about the loop as a whole, which is |
| recorded in a "loop_vec_info" struct attached to each loop. |
| |
| Transformation phase: |
| ===================== |
| The loop transformation phase scans all the stmts in the loop, and |
| creates a vector stmt (or a sequence of stmts) for each scalar stmt S in |
| the loop that needs to be vectorized. It inserts the vector code sequence |
| just before the scalar stmt S, and records a pointer to the vector code |
| in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct |
| attached to S). This pointer will be used for the vectorization of following |
| stmts which use the def of stmt S. Stmt S is removed if it writes to memory; |
| otherwise, we rely on dead code elimination for removing it. |
| |
| For example, say stmt S1 was vectorized into stmt VS1: |
| |
| VS1: vb = px[i]; |
| S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 |
| S2: a = b; |
| |
| To vectorize stmt S2, the vectorizer first finds the stmt that defines |
| the operand 'b' (S1), and gets the relevant vector def 'vb' from the |
| vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The |
| resulting sequence would be: |
| |
| VS1: vb = px[i]; |
| S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 |
| VS2: va = vb; |
| S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 |
| |
| Operands that are not SSA_NAMEs, are data-refs that appear in |
| load/store operations (like 'x[i]' in S1), and are handled differently. |
| |
| Target modeling: |
| ================= |
| Currently the only target specific information that is used is the |
| size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". |
| Targets that can support different sizes of vectors, for now will need |
| to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More |
| flexibility will be added in the future. |
| |
| Since we only vectorize operations which vector form can be |
| expressed using existing tree codes, to verify that an operation is |
| supported, the vectorizer checks the relevant optab at the relevant |
| machine_mode (e.g, optab_handler (add_optab, V8HImode)). If |
| the value found is CODE_FOR_nothing, then there's no target support, and |
| we can't vectorize the stmt. |
| |
| For additional information on this project see: |
| http://gcc.gnu.org/projects/tree-ssa/vectorization.html |
| */ |
| |
| static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); |
| static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info, |
| bool *, bool *); |
| |
| /* Subroutine of vect_determine_vf_for_stmt that handles only one |
| statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE |
| may already be set for general statements (not just data refs). */ |
| |
| static opt_result |
| vect_determine_vf_for_stmt_1 (vec_info *vinfo, stmt_vec_info stmt_info, |
| bool vectype_maybe_set_p, |
| poly_uint64 *vf) |
| { |
| gimple *stmt = stmt_info->stmt; |
| |
| if ((!STMT_VINFO_RELEVANT_P (stmt_info) |
| && !STMT_VINFO_LIVE_P (stmt_info)) |
| || gimple_clobber_p (stmt)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); |
| return opt_result::success (); |
| } |
| |
| tree stmt_vectype, nunits_vectype; |
| opt_result res = vect_get_vector_types_for_stmt (vinfo, stmt_info, |
| &stmt_vectype, |
| &nunits_vectype); |
| if (!res) |
| return res; |
| |
| if (stmt_vectype) |
| { |
| if (STMT_VINFO_VECTYPE (stmt_info)) |
| /* The only case when a vectype had been already set is for stmts |
| that contain a data ref, or for "pattern-stmts" (stmts generated |
| by the vectorizer to represent/replace a certain idiom). */ |
| gcc_assert ((STMT_VINFO_DATA_REF (stmt_info) |
| || vectype_maybe_set_p) |
| && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype); |
| else |
| STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype; |
| } |
| |
| if (nunits_vectype) |
| vect_update_max_nunits (vf, nunits_vectype); |
| |
| return opt_result::success (); |
| } |
| |
| /* Subroutine of vect_determine_vectorization_factor. Set the vector |
| types of STMT_INFO and all attached pattern statements and update |
| the vectorization factor VF accordingly. Return true on success |
| or false if something prevented vectorization. */ |
| |
| static opt_result |
| vect_determine_vf_for_stmt (vec_info *vinfo, |
| stmt_vec_info stmt_info, poly_uint64 *vf) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G", |
| stmt_info->stmt); |
| opt_result res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, false, vf); |
| if (!res) |
| return res; |
| |
| if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
| && STMT_VINFO_RELATED_STMT (stmt_info)) |
| { |
| gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); |
| stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); |
| |
| /* If a pattern statement has def stmts, analyze them too. */ |
| for (gimple_stmt_iterator si = gsi_start (pattern_def_seq); |
| !gsi_end_p (si); gsi_next (&si)) |
| { |
| stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si)); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "==> examining pattern def stmt: %G", |
| def_stmt_info->stmt); |
| res = vect_determine_vf_for_stmt_1 (vinfo, def_stmt_info, true, vf); |
| if (!res) |
| return res; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "==> examining pattern statement: %G", |
| stmt_info->stmt); |
| res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, true, vf); |
| if (!res) |
| return res; |
| } |
| |
| return opt_result::success (); |
| } |
| |
| /* Function vect_determine_vectorization_factor |
| |
| Determine the vectorization factor (VF). VF is the number of data elements |
| that are operated upon in parallel in a single iteration of the vectorized |
| loop. For example, when vectorizing a loop that operates on 4byte elements, |
| on a target with vector size (VS) 16byte, the VF is set to 4, since 4 |
| elements can fit in a single vector register. |
| |
| We currently support vectorization of loops in which all types operated upon |
| are of the same size. Therefore this function currently sets VF according to |
| the size of the types operated upon, and fails if there are multiple sizes |
| in the loop. |
| |
| VF is also the factor by which the loop iterations are strip-mined, e.g.: |
| original loop: |
| for (i=0; i<N; i++){ |
| a[i] = b[i] + c[i]; |
| } |
| |
| vectorized loop: |
| for (i=0; i<N; i+=VF){ |
| a[i:VF] = b[i:VF] + c[i:VF]; |
| } |
| */ |
| |
| static opt_result |
| vect_determine_vectorization_factor (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| unsigned nbbs = loop->num_nodes; |
| poly_uint64 vectorization_factor = 1; |
| tree scalar_type = NULL_TREE; |
| gphi *phi; |
| tree vectype; |
| stmt_vec_info stmt_info; |
| unsigned i; |
| |
| DUMP_VECT_SCOPE ("vect_determine_vectorization_factor"); |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| phi = si.phi (); |
| stmt_info = loop_vinfo->lookup_stmt (phi); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G", |
| phi); |
| |
| gcc_assert (stmt_info); |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info) |
| || STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); |
| scalar_type = TREE_TYPE (PHI_RESULT (phi)); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "get vectype for scalar type: %T\n", |
| scalar_type); |
| |
| vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type); |
| if (!vectype) |
| return opt_result::failure_at (phi, |
| "not vectorized: unsupported " |
| "data-type %T\n", |
| scalar_type); |
| STMT_VINFO_VECTYPE (stmt_info) = vectype; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", |
| vectype); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); |
| dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| vect_update_max_nunits (&vectorization_factor, vectype); |
| } |
| } |
| |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| if (is_gimple_debug (gsi_stmt (si))) |
| continue; |
| stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
| opt_result res |
| = vect_determine_vf_for_stmt (loop_vinfo, |
| stmt_info, &vectorization_factor); |
| if (!res) |
| return res; |
| } |
| } |
| |
| /* TODO: Analyze cost. Decide if worth while to vectorize. */ |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = "); |
| dump_dec (MSG_NOTE, vectorization_factor); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| if (known_le (vectorization_factor, 1U)) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: unsupported data-type\n"); |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| return opt_result::success (); |
| } |
| |
| |
| /* Function vect_is_simple_iv_evolution. |
| |
| FORNOW: A simple evolution of an induction variables in the loop is |
| considered a polynomial evolution. */ |
| |
| static bool |
| vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, |
| tree * step) |
| { |
| tree init_expr; |
| tree step_expr; |
| tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); |
| basic_block bb; |
| |
| /* When there is no evolution in this loop, the evolution function |
| is not "simple". */ |
| if (evolution_part == NULL_TREE) |
| return false; |
| |
| /* When the evolution is a polynomial of degree >= 2 |
| the evolution function is not "simple". */ |
| if (tree_is_chrec (evolution_part)) |
| return false; |
| |
| step_expr = evolution_part; |
| init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n", |
| step_expr, init_expr); |
| |
| *init = init_expr; |
| *step = step_expr; |
| |
| if (TREE_CODE (step_expr) != INTEGER_CST |
| && (TREE_CODE (step_expr) != SSA_NAME |
| || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) |
| && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) |
| || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) |
| && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) |
| || !flag_associative_math))) |
| && (TREE_CODE (step_expr) != REAL_CST |
| || !flag_associative_math)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "step unknown.\n"); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Return true if PHI, described by STMT_INFO, is the inner PHI in |
| what we are assuming is a double reduction. For example, given |
| a structure like this: |
| |
| outer1: |
| x_1 = PHI <x_4(outer2), ...>; |
| ... |
| |
| inner: |
| x_2 = PHI <x_1(outer1), ...>; |
| ... |
| x_3 = ...; |
| ... |
| |
| outer2: |
| x_4 = PHI <x_3(inner)>; |
| ... |
| |
| outer loop analysis would treat x_1 as a double reduction phi and |
| this function would then return true for x_2. */ |
| |
| static bool |
| vect_inner_phi_in_double_reduction_p (loop_vec_info loop_vinfo, gphi *phi) |
| { |
| use_operand_p use_p; |
| ssa_op_iter op_iter; |
| FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE) |
| if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p))) |
| if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def) |
| return true; |
| return false; |
| } |
| |
| /* Function vect_analyze_scalar_cycles_1. |
| |
| Examine the cross iteration def-use cycles of scalar variables |
| in LOOP. LOOP_VINFO represents the loop that is now being |
| considered for vectorization (can be LOOP, or an outer-loop |
| enclosing LOOP). */ |
| |
| static void |
| vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop) |
| { |
| basic_block bb = loop->header; |
| tree init, step; |
| auto_vec<stmt_vec_info, 64> worklist; |
| gphi_iterator gsi; |
| bool double_reduc, reduc_chain; |
| |
| DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles"); |
| |
| /* First - identify all inductions. Reduction detection assumes that all the |
| inductions have been identified, therefore, this order must not be |
| changed. */ |
| for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gphi *phi = gsi.phi (); |
| tree access_fn = NULL; |
| tree def = PHI_RESULT (phi); |
| stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi); |
| |
| /* Skip virtual phi's. The data dependences that are associated with |
| virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ |
| if (virtual_operand_p (def)) |
| continue; |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; |
| |
| /* Analyze the evolution function. */ |
| access_fn = analyze_scalar_evolution (loop, def); |
| if (access_fn) |
| { |
| STRIP_NOPS (access_fn); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Access function of PHI: %T\n", access_fn); |
| STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) |
| = initial_condition_in_loop_num (access_fn, loop->num); |
| STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) |
| = evolution_part_in_loop_num (access_fn, loop->num); |
| } |
| |
| if (!access_fn |
| || vect_inner_phi_in_double_reduction_p (loop_vinfo, phi) |
| || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) |
| || (LOOP_VINFO_LOOP (loop_vinfo) != loop |
| && TREE_CODE (step) != INTEGER_CST)) |
| { |
| worklist.safe_push (stmt_vinfo); |
| continue; |
| } |
| |
| gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) |
| != NULL_TREE); |
| gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; |
| } |
| |
| |
| /* Second - identify all reductions and nested cycles. */ |
| while (worklist.length () > 0) |
| { |
| stmt_vec_info stmt_vinfo = worklist.pop (); |
| gphi *phi = as_a <gphi *> (stmt_vinfo->stmt); |
| tree def = PHI_RESULT (phi); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi); |
| |
| gcc_assert (!virtual_operand_p (def) |
| && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); |
| |
| stmt_vec_info reduc_stmt_info |
| = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc, |
| &reduc_chain); |
| if (reduc_stmt_info) |
| { |
| STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info; |
| STMT_VINFO_REDUC_DEF (reduc_stmt_info) = stmt_vinfo; |
| if (double_reduc) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected double reduction.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; |
| STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_double_reduction_def; |
| } |
| else |
| { |
| if (loop != LOOP_VINFO_LOOP (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected vectorizable nested cycle.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected reduction.\n"); |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; |
| STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def; |
| /* Store the reduction cycles for possible vectorization in |
| loop-aware SLP if it was not detected as reduction |
| chain. */ |
| if (! reduc_chain) |
| LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push |
| (reduc_stmt_info); |
| } |
| } |
| } |
| else |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown def-use cycle pattern.\n"); |
| } |
| } |
| |
| |
| /* Function vect_analyze_scalar_cycles. |
| |
| Examine the cross iteration def-use cycles of scalar variables, by |
| analyzing the loop-header PHIs of scalar variables. Classify each |
| cycle as one of the following: invariant, induction, reduction, unknown. |
| We do that for the loop represented by LOOP_VINFO, and also to its |
| inner-loop, if exists. |
| Examples for scalar cycles: |
| |
| Example1: reduction: |
| |
| loop1: |
| for (i=0; i<N; i++) |
| sum += a[i]; |
| |
| Example2: induction: |
| |
| loop2: |
| for (i=0; i<N; i++) |
| a[i] = i; */ |
| |
| static void |
| vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop); |
| |
| /* When vectorizing an outer-loop, the inner-loop is executed sequentially. |
| Reductions in such inner-loop therefore have different properties than |
| the reductions in the nest that gets vectorized: |
| 1. When vectorized, they are executed in the same order as in the original |
| scalar loop, so we can't change the order of computation when |
| vectorizing them. |
| 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the |
| current checks are too strict. */ |
| |
| if (loop->inner) |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); |
| } |
| |
| /* Transfer group and reduction information from STMT_INFO to its |
| pattern stmt. */ |
| |
| static void |
| vect_fixup_reduc_chain (stmt_vec_info stmt_info) |
| { |
| stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info); |
| stmt_vec_info stmtp; |
| gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp) |
| && REDUC_GROUP_FIRST_ELEMENT (stmt_info)); |
| REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info); |
| do |
| { |
| stmtp = STMT_VINFO_RELATED_STMT (stmt_info); |
| gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp) |
| == STMT_VINFO_DEF_TYPE (stmt_info)); |
| REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp; |
| stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info); |
| if (stmt_info) |
| REDUC_GROUP_NEXT_ELEMENT (stmtp) |
| = STMT_VINFO_RELATED_STMT (stmt_info); |
| } |
| while (stmt_info); |
| } |
| |
| /* Fixup scalar cycles that now have their stmts detected as patterns. */ |
| |
| static void |
| vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) |
| { |
| stmt_vec_info first; |
| unsigned i; |
| |
| FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) |
| { |
| stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first); |
| while (next) |
| { |
| if ((STMT_VINFO_IN_PATTERN_P (next) |
| != STMT_VINFO_IN_PATTERN_P (first)) |
| || STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (next)) == -1) |
| break; |
| next = REDUC_GROUP_NEXT_ELEMENT (next); |
| } |
| /* If all reduction chain members are well-formed patterns adjust |
| the group to group the pattern stmts instead. */ |
| if (! next |
| && STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (first)) != -1) |
| { |
| if (STMT_VINFO_IN_PATTERN_P (first)) |
| { |
| vect_fixup_reduc_chain (first); |
| LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] |
| = STMT_VINFO_RELATED_STMT (first); |
| } |
| } |
| /* If not all stmt in the chain are patterns or if we failed |
| to update STMT_VINFO_REDUC_IDX dissolve the chain and handle |
| it as regular reduction instead. */ |
| else |
| { |
| stmt_vec_info vinfo = first; |
| stmt_vec_info last = NULL; |
| while (vinfo) |
| { |
| next = REDUC_GROUP_NEXT_ELEMENT (vinfo); |
| REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL; |
| REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL; |
| last = vinfo; |
| vinfo = next; |
| } |
| STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize (first)) |
| = vect_internal_def; |
| loop_vinfo->reductions.safe_push (vect_stmt_to_vectorize (last)); |
| LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).unordered_remove (i); |
| --i; |
| } |
| } |
| } |
| |
| /* Function vect_get_loop_niters. |
| |
| Determine how many iterations the loop is executed and place it |
| in NUMBER_OF_ITERATIONS. Place the number of latch iterations |
| in NUMBER_OF_ITERATIONSM1. Place the condition under which the |
| niter information holds in ASSUMPTIONS. |
| |
| Return the loop exit condition. */ |
| |
| |
| static gcond * |
| vect_get_loop_niters (class loop *loop, tree *assumptions, |
| tree *number_of_iterations, tree *number_of_iterationsm1) |
| { |
| edge exit = single_exit (loop); |
| class tree_niter_desc niter_desc; |
| tree niter_assumptions, niter, may_be_zero; |
| gcond *cond = get_loop_exit_condition (loop); |
| |
| *assumptions = boolean_true_node; |
| *number_of_iterationsm1 = chrec_dont_know; |
| *number_of_iterations = chrec_dont_know; |
| DUMP_VECT_SCOPE ("get_loop_niters"); |
| |
| if (!exit) |
| return cond; |
| |
| may_be_zero = NULL_TREE; |
| if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL) |
| || chrec_contains_undetermined (niter_desc.niter)) |
| return cond; |
| |
| niter_assumptions = niter_desc.assumptions; |
| may_be_zero = niter_desc.may_be_zero; |
| niter = niter_desc.niter; |
| |
| if (may_be_zero && integer_zerop (may_be_zero)) |
| may_be_zero = NULL_TREE; |
| |
| if (may_be_zero) |
| { |
| if (COMPARISON_CLASS_P (may_be_zero)) |
| { |
| /* Try to combine may_be_zero with assumptions, this can simplify |
| computation of niter expression. */ |
| if (niter_assumptions && !integer_nonzerop (niter_assumptions)) |
| niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, |
| niter_assumptions, |
| fold_build1 (TRUTH_NOT_EXPR, |
| boolean_type_node, |
| may_be_zero)); |
| else |
| niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero, |
| build_int_cst (TREE_TYPE (niter), 0), |
| rewrite_to_non_trapping_overflow (niter)); |
| |
| may_be_zero = NULL_TREE; |
| } |
| else if (integer_nonzerop (may_be_zero)) |
| { |
| *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0); |
| *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1); |
| return cond; |
| } |
| else |
| return cond; |
| } |
| |
| *assumptions = niter_assumptions; |
| *number_of_iterationsm1 = niter; |
| |
| /* We want the number of loop header executions which is the number |
| of latch executions plus one. |
| ??? For UINT_MAX latch executions this number overflows to zero |
| for loops like do { n++; } while (n != 0); */ |
| if (niter && !chrec_contains_undetermined (niter)) |
| niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter), |
| build_int_cst (TREE_TYPE (niter), 1)); |
| *number_of_iterations = niter; |
| |
| return cond; |
| } |
| |
| /* Function bb_in_loop_p |
| |
| Used as predicate for dfs order traversal of the loop bbs. */ |
| |
| static bool |
| bb_in_loop_p (const_basic_block bb, const void *data) |
| { |
| const class loop *const loop = (const class loop *)data; |
| if (flow_bb_inside_loop_p (loop, bb)) |
| return true; |
| return false; |
| } |
| |
| |
| /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as |
| stmt_vec_info structs for all the stmts in LOOP_IN. */ |
| |
| _loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared) |
| : vec_info (vec_info::loop, init_cost (loop_in, false), shared), |
| loop (loop_in), |
| bbs (XCNEWVEC (basic_block, loop->num_nodes)), |
| num_itersm1 (NULL_TREE), |
| num_iters (NULL_TREE), |
| num_iters_unchanged (NULL_TREE), |
| num_iters_assumptions (NULL_TREE), |
| th (0), |
| versioning_threshold (0), |
| vectorization_factor (0), |
| main_loop_edge (nullptr), |
| skip_main_loop_edge (nullptr), |
| skip_this_loop_edge (nullptr), |
| reusable_accumulators (), |
| max_vectorization_factor (0), |
| mask_skip_niters (NULL_TREE), |
| rgroup_compare_type (NULL_TREE), |
| simd_if_cond (NULL_TREE), |
| unaligned_dr (NULL), |
| peeling_for_alignment (0), |
| ptr_mask (0), |
| ivexpr_map (NULL), |
| scan_map (NULL), |
| slp_unrolling_factor (1), |
| single_scalar_iteration_cost (0), |
| vec_outside_cost (0), |
| vec_inside_cost (0), |
| inner_loop_cost_factor (param_vect_inner_loop_cost_factor), |
| vectorizable (false), |
| can_use_partial_vectors_p (param_vect_partial_vector_usage != 0), |
| using_partial_vectors_p (false), |
| epil_using_partial_vectors_p (false), |
| peeling_for_gaps (false), |
| peeling_for_niter (false), |
| no_data_dependencies (false), |
| has_mask_store (false), |
| scalar_loop_scaling (profile_probability::uninitialized ()), |
| scalar_loop (NULL), |
| orig_loop_info (NULL) |
| { |
| /* CHECKME: We want to visit all BBs before their successors (except for |
| latch blocks, for which this assertion wouldn't hold). In the simple |
| case of the loop forms we allow, a dfs order of the BBs would the same |
| as reversed postorder traversal, so we are safe. */ |
| |
| unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, |
| bbs, loop->num_nodes, loop); |
| gcc_assert (nbbs == loop->num_nodes); |
| |
| for (unsigned int i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| gimple_stmt_iterator si; |
| |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple *phi = gsi_stmt (si); |
| gimple_set_uid (phi, 0); |
| add_stmt (phi); |
| } |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple *stmt = gsi_stmt (si); |
| gimple_set_uid (stmt, 0); |
| if (is_gimple_debug (stmt)) |
| continue; |
| add_stmt (stmt); |
| /* If .GOMP_SIMD_LANE call for the current loop has 3 arguments, the |
| third argument is the #pragma omp simd if (x) condition, when 0, |
| loop shouldn't be vectorized, when non-zero constant, it should |
| be vectorized normally, otherwise versioned with vectorized loop |
| done if the condition is non-zero at runtime. */ |
| if (loop_in->simduid |
| && is_gimple_call (stmt) |
| && gimple_call_internal_p (stmt) |
| && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE |
| && gimple_call_num_args (stmt) >= 3 |
| && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME |
| && (loop_in->simduid |
| == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))) |
| { |
| tree arg = gimple_call_arg (stmt, 2); |
| if (integer_zerop (arg) || TREE_CODE (arg) == SSA_NAME) |
| simd_if_cond = arg; |
| else |
| gcc_assert (integer_nonzerop (arg)); |
| } |
| } |
| } |
| |
| epilogue_vinfos.create (6); |
| } |
| |
| /* Free all levels of rgroup CONTROLS. */ |
| |
| void |
| release_vec_loop_controls (vec<rgroup_controls> *controls) |
| { |
| rgroup_controls *rgc; |
| unsigned int i; |
| FOR_EACH_VEC_ELT (*controls, i, rgc) |
| rgc->controls.release (); |
| controls->release (); |
| } |
| |
| /* Free all memory used by the _loop_vec_info, as well as all the |
| stmt_vec_info structs of all the stmts in the loop. */ |
| |
| _loop_vec_info::~_loop_vec_info () |
| { |
| free (bbs); |
| |
| release_vec_loop_controls (&masks); |
| release_vec_loop_controls (&lens); |
| delete ivexpr_map; |
| delete scan_map; |
| epilogue_vinfos.release (); |
| |
| /* When we release an epiloge vinfo that we do not intend to use |
| avoid clearing AUX of the main loop which should continue to |
| point to the main loop vinfo since otherwise we'll leak that. */ |
| if (loop->aux == this) |
| loop->aux = NULL; |
| } |
| |
| /* Return an invariant or register for EXPR and emit necessary |
| computations in the LOOP_VINFO loop preheader. */ |
| |
| tree |
| cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr) |
| { |
| if (is_gimple_reg (expr) |
| || is_gimple_min_invariant (expr)) |
| return expr; |
| |
| if (! loop_vinfo->ivexpr_map) |
| loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>; |
| tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr); |
| if (! cached) |
| { |
| gimple_seq stmts = NULL; |
| cached = force_gimple_operand (unshare_expr (expr), |
| &stmts, true, NULL_TREE); |
| if (stmts) |
| { |
| edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo)); |
| gsi_insert_seq_on_edge_immediate (e, stmts); |
| } |
| } |
| return cached; |
| } |
| |
| /* Return true if we can use CMP_TYPE as the comparison type to produce |
| all masks required to mask LOOP_VINFO. */ |
| |
| static bool |
| can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type) |
| { |
| rgroup_controls *rgm; |
| unsigned int i; |
| FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) |
| if (rgm->type != NULL_TREE |
| && !direct_internal_fn_supported_p (IFN_WHILE_ULT, |
| cmp_type, rgm->type, |
| OPTIMIZE_FOR_SPEED)) |
| return false; |
| return true; |
| } |
| |
| /* Calculate the maximum number of scalars per iteration for every |
| rgroup in LOOP_VINFO. */ |
| |
| static unsigned int |
| vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo) |
| { |
| unsigned int res = 1; |
| unsigned int i; |
| rgroup_controls *rgm; |
| FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) |
| res = MAX (res, rgm->max_nscalars_per_iter); |
| return res; |
| } |
| |
| /* Calculate the minimum precision necessary to represent: |
| |
| MAX_NITERS * FACTOR |
| |
| as an unsigned integer, where MAX_NITERS is the maximum number of |
| loop header iterations for the original scalar form of LOOP_VINFO. */ |
| |
| static unsigned |
| vect_min_prec_for_max_niters (loop_vec_info loop_vinfo, unsigned int factor) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| /* Get the maximum number of iterations that is representable |
| in the counter type. */ |
| tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo)); |
| widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1; |
| |
| /* Get a more refined estimate for the number of iterations. */ |
| widest_int max_back_edges; |
| if (max_loop_iterations (loop, &max_back_edges)) |
| max_ni = wi::smin (max_ni, max_back_edges + 1); |
| |
| /* Work out how many bits we need to represent the limit. */ |
| return wi::min_precision (max_ni * factor, UNSIGNED); |
| } |
| |
| /* True if the loop needs peeling or partial vectors when vectorized. */ |
| |
| static bool |
| vect_need_peeling_or_partial_vectors_p (loop_vec_info loop_vinfo) |
| { |
| unsigned HOST_WIDE_INT const_vf; |
| HOST_WIDE_INT max_niter |
| = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); |
| |
| unsigned th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); |
| if (!th && LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo)) |
| th = LOOP_VINFO_COST_MODEL_THRESHOLD (LOOP_VINFO_ORIG_LOOP_INFO |
| (loop_vinfo)); |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0) |
| { |
| /* Work out the (constant) number of iterations that need to be |
| peeled for reasons other than niters. */ |
| unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) |
| peel_niter += 1; |
| if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter, |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo))) |
| return true; |
| } |
| else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
| /* ??? When peeling for gaps but not alignment, we could |
| try to check whether the (variable) niters is known to be |
| VF * N + 1. That's something of a niche case though. */ |
| || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
| || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf) |
| || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) |
| < (unsigned) exact_log2 (const_vf)) |
| /* In case of versioning, check if the maximum number of |
| iterations is greater than th. If they are identical, |
| the epilogue is unnecessary. */ |
| && (!LOOP_REQUIRES_VERSIONING (loop_vinfo) |
| || ((unsigned HOST_WIDE_INT) max_niter |
| > (th / const_vf) * const_vf)))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Each statement in LOOP_VINFO can be masked where necessary. Check |
| whether we can actually generate the masks required. Return true if so, |
| storing the type of the scalar IV in LOOP_VINFO_RGROUP_COMPARE_TYPE. */ |
| |
| static bool |
| vect_verify_full_masking (loop_vec_info loop_vinfo) |
| { |
| unsigned int min_ni_width; |
| unsigned int max_nscalars_per_iter |
| = vect_get_max_nscalars_per_iter (loop_vinfo); |
| |
| /* Use a normal loop if there are no statements that need masking. |
| This only happens in rare degenerate cases: it means that the loop |
| has no loads, no stores, and no live-out values. */ |
| if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()) |
| return false; |
| |
| /* Work out how many bits we need to represent the limit. */ |
| min_ni_width |
| = vect_min_prec_for_max_niters (loop_vinfo, max_nscalars_per_iter); |
| |
| /* Find a scalar mode for which WHILE_ULT is supported. */ |
| opt_scalar_int_mode cmp_mode_iter; |
| tree cmp_type = NULL_TREE; |
| tree iv_type = NULL_TREE; |
| widest_int iv_limit = vect_iv_limit_for_partial_vectors (loop_vinfo); |
| unsigned int iv_precision = UINT_MAX; |
| |
| if (iv_limit != -1) |
| iv_precision = wi::min_precision (iv_limit * max_nscalars_per_iter, |
| UNSIGNED); |
| |
| FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT) |
| { |
| unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ()); |
| if (cmp_bits >= min_ni_width |
| && targetm.scalar_mode_supported_p (cmp_mode_iter.require ())) |
| { |
| tree this_type = build_nonstandard_integer_type (cmp_bits, true); |
| if (this_type |
| && can_produce_all_loop_masks_p (loop_vinfo, this_type)) |
| { |
| /* Although we could stop as soon as we find a valid mode, |
| there are at least two reasons why that's not always the |
| best choice: |
| |
| - An IV that's Pmode or wider is more likely to be reusable |
| in address calculations than an IV that's narrower than |
| Pmode. |
| |
| - Doing the comparison in IV_PRECISION or wider allows |
| a natural 0-based IV, whereas using a narrower comparison |
| type requires mitigations against wrap-around. |
| |
| Conversely, if the IV limit is variable, doing the comparison |
| in a wider type than the original type can introduce |
| unnecessary extensions, so picking the widest valid mode |
| is not always a good choice either. |
| |
| Here we prefer the first IV type that's Pmode or wider, |
| and the first comparison type that's IV_PRECISION or wider. |
| (The comparison type must be no wider than the IV type, |
| to avoid extensions in the vector loop.) |
| |
| ??? We might want to try continuing beyond Pmode for ILP32 |
| targets if CMP_BITS < IV_PRECISION. */ |
| iv_type = this_type; |
| if (!cmp_type || iv_precision > TYPE_PRECISION (cmp_type)) |
| cmp_type = this_type; |
| if (cmp_bits >= GET_MODE_BITSIZE (Pmode)) |
| break; |
| } |
| } |
| } |
| |
| if (!cmp_type) |
| return false; |
| |
| LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = cmp_type; |
| LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type; |
| return true; |
| } |
| |
| /* Check whether we can use vector access with length based on precison |
| comparison. So far, to keep it simple, we only allow the case that the |
| precision of the target supported length is larger than the precision |
| required by loop niters. */ |
| |
| static bool |
| vect_verify_loop_lens (loop_vec_info loop_vinfo) |
| { |
| if (LOOP_VINFO_LENS (loop_vinfo).is_empty ()) |
| return false; |
| |
| unsigned int max_nitems_per_iter = 1; |
| unsigned int i; |
| rgroup_controls *rgl; |
| /* Find the maximum number of items per iteration for every rgroup. */ |
| FOR_EACH_VEC_ELT (LOOP_VINFO_LENS (loop_vinfo), i, rgl) |
| { |
| unsigned nitems_per_iter = rgl->max_nscalars_per_iter * rgl->factor; |
| max_nitems_per_iter = MAX (max_nitems_per_iter, nitems_per_iter); |
| } |
| |
| /* Work out how many bits we need to represent the length limit. */ |
| unsigned int min_ni_prec |
| = vect_min_prec_for_max_niters (loop_vinfo, max_nitems_per_iter); |
| |
| /* Now use the maximum of below precisions for one suitable IV type: |
| - the IV's natural precision |
| - the precision needed to hold: the maximum number of scalar |
| iterations multiplied by the scale factor (min_ni_prec above) |
| - the Pmode precision |
| |
| If min_ni_prec is less than the precision of the current niters, |
| we perfer to still use the niters type. Prefer to use Pmode and |
| wider IV to avoid narrow conversions. */ |
| |
| unsigned int ni_prec |
| = TYPE_PRECISION (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo))); |
| min_ni_prec = MAX (min_ni_prec, ni_prec); |
| min_ni_prec = MAX (min_ni_prec, GET_MODE_BITSIZE (Pmode)); |
| |
| tree iv_type = NULL_TREE; |
| opt_scalar_int_mode tmode_iter; |
| FOR_EACH_MODE_IN_CLASS (tmode_iter, MODE_INT) |
| { |
| scalar_mode tmode = tmode_iter.require (); |
| unsigned int tbits = GET_MODE_BITSIZE (tmode); |
| |
| /* ??? Do we really want to construct one IV whose precision exceeds |
| BITS_PER_WORD? */ |
| if (tbits > BITS_PER_WORD) |
| break; |
| |
| /* Find the first available standard integral type. */ |
| if (tbits >= min_ni_prec && targetm.scalar_mode_supported_p (tmode)) |
| { |
| iv_type = build_nonstandard_integer_type (tbits, true); |
| break; |
| } |
| } |
| |
| if (!iv_type) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't vectorize with length-based partial vectors" |
| " because there is no suitable iv type.\n"); |
| return false; |
| } |
| |
| LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = iv_type; |
| LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type; |
| |
| return true; |
| } |
| |
| /* Calculate the cost of one scalar iteration of the loop. */ |
| static void |
| vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes, factor; |
| int innerloop_iters, i; |
| |
| DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost"); |
| |
| /* Gather costs for statements in the scalar loop. */ |
| |
| /* FORNOW. */ |
| innerloop_iters = 1; |
| if (loop->inner) |
| innerloop_iters = LOOP_VINFO_INNER_LOOP_COST_FACTOR (loop_vinfo); |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| gimple_stmt_iterator si; |
| basic_block bb = bbs[i]; |
| |
| if (bb->loop_father == loop->inner) |
| factor = innerloop_iters; |
| else |
| factor = 1; |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple *stmt = gsi_stmt (si); |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt); |
| |
| if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) |
| continue; |
| |
| /* Skip stmts that are not vectorized inside the loop. */ |
| stmt_vec_info vstmt_info = vect_stmt_to_vectorize (stmt_info); |
| if (!STMT_VINFO_RELEVANT_P (vstmt_info) |
| && (!STMT_VINFO_LIVE_P (vstmt_info) |
| || !VECTORIZABLE_CYCLE_DEF |
| (STMT_VINFO_DEF_TYPE (vstmt_info)))) |
| continue; |
| |
| vect_cost_for_stmt kind; |
| if (STMT_VINFO_DATA_REF (stmt_info)) |
| { |
| if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))) |
| kind = scalar_load; |
| else |
| kind = scalar_store; |
| } |
| else if (vect_nop_conversion_p (stmt_info)) |
| continue; |
| else |
| kind = scalar_stmt; |
| |
| /* We are using vect_prologue here to avoid scaling twice |
| by the inner loop factor. */ |
| record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), |
| factor, kind, stmt_info, 0, vect_prologue); |
| } |
| } |
| |
| /* Now accumulate cost. */ |
| void *target_cost_data = init_cost (loop, true); |
| stmt_info_for_cost *si; |
| int j; |
| FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), |
| j, si) |
| (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count, |
| si->kind, si->stmt_info, si->vectype, |
| si->misalign, si->where); |
| unsigned prologue_cost = 0, body_cost = 0, epilogue_cost = 0; |
| finish_cost (target_cost_data, &prologue_cost, &body_cost, |
| &epilogue_cost); |
| destroy_cost_data (target_cost_data); |
| LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) |
| = prologue_cost + body_cost + epilogue_cost; |
| } |
| |
| |
| /* Function vect_analyze_loop_form_1. |
| |
| Verify that certain CFG restrictions hold, including: |
| - the loop has a pre-header |
| - the loop has a single entry and exit |
| - the loop exit condition is simple enough |
| - the number of iterations can be analyzed, i.e, a countable loop. The |
| niter could be analyzed under some assumptions. */ |
| |
| opt_result |
| vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond, |
| tree *assumptions, tree *number_of_iterationsm1, |
| tree *number_of_iterations, gcond **inner_loop_cond) |
| { |
| DUMP_VECT_SCOPE ("vect_analyze_loop_form"); |
| |
| /* Different restrictions apply when we are considering an inner-most loop, |
| vs. an outer (nested) loop. |
| (FORNOW. May want to relax some of these restrictions in the future). */ |
| |
| if (!loop->inner) |
| { |
| /* Inner-most loop. We currently require that the number of BBs is |
| exactly 2 (the header and latch). Vectorizable inner-most loops |
| look like this: |
| |
| (pre-header) |
| | |
| header <--------+ |
| | | | |
| | +--> latch --+ |
| | |
| (exit-bb) */ |
| |
| if (loop->num_nodes != 2) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " control flow in loop.\n"); |
| |
| if (empty_block_p (loop->header)) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: empty loop.\n"); |
| } |
| else |
| { |
| class loop *innerloop = loop->inner; |
| edge entryedge; |
| |
| /* Nested loop. We currently require that the loop is doubly-nested, |
| contains a single inner loop, and the number of BBs is exactly 5. |
| Vectorizable outer-loops look like this: |
| |
| (pre-header) |
| | |
| header <---+ |
| | | |
| inner-loop | |
| | | |
| tail ------+ |
| | |
| (exit-bb) |
| |
| The inner-loop has the properties expected of inner-most loops |
| as described above. */ |
| |
| if ((loop->inner)->inner || (loop->inner)->next) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " multiple nested loops.\n"); |
| |
| if (loop->num_nodes != 5) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " control flow in loop.\n"); |
| |
| entryedge = loop_preheader_edge (innerloop); |
| if (entryedge->src != loop->header |
| || !single_exit (innerloop) |
| || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " unsupported outerloop form.\n"); |
| |
| /* Analyze the inner-loop. */ |
| tree inner_niterm1, inner_niter, inner_assumptions; |
| opt_result res |
| = vect_analyze_loop_form_1 (loop->inner, inner_loop_cond, |
| &inner_assumptions, &inner_niterm1, |
| &inner_niter, NULL); |
| if (!res) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: Bad inner loop.\n"); |
| return res; |
| } |
| |
| /* Don't support analyzing niter under assumptions for inner |
| loop. */ |
| if (!integer_onep (inner_assumptions)) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: Bad inner loop.\n"); |
| |
| if (!expr_invariant_in_loop_p (loop, inner_niter)) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: inner-loop count not" |
| " invariant.\n"); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Considering outer-loop vectorization.\n"); |
| } |
| |
| if (!single_exit (loop)) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: multiple exits.\n"); |
| if (EDGE_COUNT (loop->header->preds) != 2) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " too many incoming edges.\n"); |
| |
| /* We assume that the loop exit condition is at the end of the loop. i.e, |
| that the loop is represented as a do-while (with a proper if-guard |
| before the loop if needed), where the loop header contains all the |
| executable statements, and the latch is empty. */ |
| if (!empty_block_p (loop->latch) |
| || !gimple_seq_empty_p (phi_nodes (loop->latch))) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: latch block not empty.\n"); |
| |
| /* Make sure the exit is not abnormal. */ |
| edge e = single_exit (loop); |
| if (e->flags & EDGE_ABNORMAL) |
| return opt_result::failure_at (vect_location, |
| "not vectorized:" |
| " abnormal loop exit edge.\n"); |
| |
| *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations, |
| number_of_iterationsm1); |
| if (!*loop_cond) |
| return opt_result::failure_at |
| (vect_location, |
| "not vectorized: complicated exit condition.\n"); |
| |
| if (integer_zerop (*assumptions) |
| || !*number_of_iterations |
| || chrec_contains_undetermined (*number_of_iterations)) |
| return opt_result::failure_at |
| (*loop_cond, |
| "not vectorized: number of iterations cannot be computed.\n"); |
| |
| if (integer_zerop (*number_of_iterations)) |
| return opt_result::failure_at |
| (*loop_cond, |
| "not vectorized: number of iterations = 0.\n"); |
| |
| return opt_result::success (); |
| } |
| |
| /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */ |
| |
| opt_loop_vec_info |
| vect_analyze_loop_form (class loop *loop, vec_info_shared *shared) |
| { |
| tree assumptions, number_of_iterations, number_of_iterationsm1; |
| gcond *loop_cond, *inner_loop_cond = NULL; |
| |
| opt_result res |
| = vect_analyze_loop_form_1 (loop, &loop_cond, |
| &assumptions, &number_of_iterationsm1, |
| &number_of_iterations, &inner_loop_cond); |
| if (!res) |
| return opt_loop_vec_info::propagate_failure (res); |
| |
| loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared); |
| LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; |
| LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; |
| LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; |
| if (!integer_onep (assumptions)) |
| { |
| /* We consider to vectorize this loop by versioning it under |
| some assumptions. In order to do this, we need to clear |
| existing information computed by scev and niter analyzer. */ |
| scev_reset_htab (); |
| free_numbers_of_iterations_estimates (loop); |
| /* Also set flag for this loop so that following scev and niter |
| analysis are done under the assumptions. */ |
| loop_constraint_set (loop, LOOP_C_FINITE); |
| /* Also record the assumptions for versioning. */ |
| LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions; |
| } |
| |
| if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Symbolic number of iterations is "); |
| dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| } |
| |
| stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond); |
| STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type; |
| if (inner_loop_cond) |
| { |
| stmt_vec_info inner_loop_cond_info |
| = loop_vinfo->lookup_stmt (inner_loop_cond); |
| STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type; |
| /* If we have an estimate on the number of iterations of the inner |
| loop use that to limit the scale for costing, otherwise use |
| --param vect-inner-loop-cost-factor literally. */ |
| widest_int nit; |
| if (estimated_stmt_executions (loop->inner, &nit)) |
| LOOP_VINFO_INNER_LOOP_COST_FACTOR (loop_vinfo) |
| = wi::smin (nit, param_vect_inner_loop_cost_factor).to_uhwi (); |
| } |
| |
| gcc_assert (!loop->aux); |
| loop->aux = loop_vinfo; |
| return opt_loop_vec_info::success (loop_vinfo); |
| } |
| |
| |
| |
| /* Scan the loop stmts and dependent on whether there are any (non-)SLP |
| statements update the vectorization factor. */ |
| |
| static void |
| vect_update_vf_for_slp (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| poly_uint64 vectorization_factor; |
| int i; |
| |
| DUMP_VECT_SCOPE ("vect_update_vf_for_slp"); |
| |
| vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| gcc_assert (known_ne (vectorization_factor, 0U)); |
| |
| /* If all the stmts in the loop can be SLPed, we perform only SLP, and |
| vectorization factor of the loop is the unrolling factor required by |
| the SLP instances. If that unrolling factor is 1, we say, that we |
| perform pure SLP on loop - cross iteration parallelism is not |
| exploited. */ |
| bool only_slp_in_loop = true; |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (si.phi ()); |
| if (!stmt_info) |
| continue; |
| if ((STMT_VINFO_RELEVANT_P (stmt_info) |
| || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) |
| && !PURE_SLP_STMT (stmt_info)) |
| /* STMT needs both SLP and loop-based vectorization. */ |
| only_slp_in_loop = false; |
| } |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| if (is_gimple_debug (gsi_stmt (si))) |
| continue; |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
| stmt_info = vect_stmt_to_vectorize (stmt_info); |
| if ((STMT_VINFO_RELEVANT_P (stmt_info) |
| || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) |
| && !PURE_SLP_STMT (stmt_info)) |
| /* STMT needs both SLP and loop-based vectorization. */ |
| only_slp_in_loop = false; |
| } |
| } |
| |
| if (only_slp_in_loop) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Loop contains only SLP stmts\n"); |
| vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Loop contains SLP and non-SLP stmts\n"); |
| /* Both the vectorization factor and unroll factor have the form |
| GET_MODE_SIZE (loop_vinfo->vector_mode) * X for some rational X, |
| so they must have a common multiple. */ |
| vectorization_factor |
| = force_common_multiple (vectorization_factor, |
| LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); |
| } |
| |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Updating vectorization factor to "); |
| dump_dec (MSG_NOTE, vectorization_factor); |
| dump_printf (MSG_NOTE, ".\n"); |
| } |
| } |
| |
| /* Return true if STMT_INFO describes a double reduction phi and if |
| the other phi in the reduction is also relevant for vectorization. |
| This rejects cases such as: |
| |
| outer1: |
| x_1 = PHI <x_3(outer2), ...>; |
| ... |
| |
| inner: |
| x_2 = ...; |
| ... |
| |
| outer2: |
| x_3 = PHI <x_2(inner)>; |
| |
| if nothing in x_2 or elsewhere makes x_1 relevant. */ |
| |
| static bool |
| vect_active_double_reduction_p (stmt_vec_info stmt_info) |
| { |
| if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def) |
| return false; |
| |
| return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info)); |
| } |
| |
| /* Function vect_analyze_loop_operations. |
| |
| Scan the loop stmts and make sure they are all vectorizable. */ |
| |
| static opt_result |
| vect_analyze_loop_operations (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| int i; |
| stmt_vec_info stmt_info; |
| bool need_to_vectorize = false; |
| bool ok; |
| |
| DUMP_VECT_SCOPE ("vect_analyze_loop_operations"); |
| |
| auto_vec<stmt_info_for_cost> cost_vec; |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| gphi *phi = si.phi (); |
| ok = true; |
| |
| stmt_info = loop_vinfo->lookup_stmt (phi); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi); |
| if (virtual_operand_p (gimple_phi_result (phi))) |
| continue; |
| |
| /* Inner-loop loop-closed exit phi in outer-loop vectorization |
| (i.e., a phi in the tail of the outer-loop). */ |
| if (! is_loop_header_bb_p (bb)) |
| { |
| /* FORNOW: we currently don't support the case that these phis |
| are not used in the outerloop (unless it is double reduction, |
| i.e., this phi is vect_reduction_def), cause this case |
| requires to actually do something here. */ |
| if (STMT_VINFO_LIVE_P (stmt_info) |
| && !vect_active_double_reduction_p (stmt_info)) |
| return opt_result::failure_at (phi, |
| "Unsupported loop-closed phi" |
| " in outer-loop.\n"); |
| |
| /* If PHI is used in the outer loop, we check that its operand |
| is defined in the inner loop. */ |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| tree phi_op; |
| |
| if (gimple_phi_num_args (phi) != 1) |
| return opt_result::failure_at (phi, "unsupported phi"); |
| |
| phi_op = PHI_ARG_DEF (phi, 0); |
| stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op); |
| if (!op_def_info) |
| return opt_result::failure_at (phi, "unsupported phi\n"); |
| |
| if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer |
| && (STMT_VINFO_RELEVANT (op_def_info) |
| != vect_used_in_outer_by_reduction)) |
| return opt_result::failure_at (phi, "unsupported phi\n"); |
| |
| if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def |
| || (STMT_VINFO_DEF_TYPE (stmt_info) |
| == vect_double_reduction_def)) |
| && !vectorizable_lc_phi (loop_vinfo, |
| stmt_info, NULL, NULL)) |
| return opt_result::failure_at (phi, "unsupported phi\n"); |
| } |
| |
| continue; |
| } |
| |
| gcc_assert (stmt_info); |
| |
| if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope |
| || STMT_VINFO_LIVE_P (stmt_info)) |
| && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) |
| /* A scalar-dependence cycle that we don't support. */ |
| return opt_result::failure_at (phi, |
| "not vectorized:" |
| " scalar dependence cycle.\n"); |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| need_to_vectorize = true; |
| if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def |
| && ! PURE_SLP_STMT (stmt_info)) |
| ok = vectorizable_induction (loop_vinfo, |
| stmt_info, NULL, NULL, |
| &cost_vec); |
| else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def |
| || (STMT_VINFO_DEF_TYPE (stmt_info) |
| == vect_double_reduction_def) |
| || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) |
| && ! PURE_SLP_STMT (stmt_info)) |
| ok = vectorizable_reduction (loop_vinfo, |
| stmt_info, NULL, NULL, &cost_vec); |
| } |
| |
| /* SLP PHIs are tested by vect_slp_analyze_node_operations. */ |
| if (ok |
| && STMT_VINFO_LIVE_P (stmt_info) |
| && !PURE_SLP_STMT (stmt_info)) |
| ok = vectorizable_live_operation (loop_vinfo, |
| stmt_info, NULL, NULL, NULL, |
| -1, false, &cost_vec); |
| |
| if (!ok) |
| return opt_result::failure_at (phi, |
| "not vectorized: relevant phi not " |
| "supported: %G", |
| static_cast <gimple *> (phi)); |
| } |
| |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
| gsi_next (&si)) |
| { |
| gimple *stmt = gsi_stmt (si); |
| if (!gimple_clobber_p (stmt) |
| && !is_gimple_debug (stmt)) |
| { |
| opt_result res |
| = vect_analyze_stmt (loop_vinfo, |
| loop_vinfo->lookup_stmt (stmt), |
| &need_to_vectorize, |
| NULL, NULL, &cost_vec); |
| if (!res) |
| return res; |
| } |
| } |
| } /* bbs */ |
| |
| add_stmt_costs (loop_vinfo, loop_vinfo->target_cost_data, &cost_vec); |
| |
| /* All operations in the loop are either irrelevant (deal with loop |
| control, or dead), or only used outside the loop and can be moved |
| out of the loop (e.g. invariants, inductions). The loop can be |
| optimized away by scalar optimizations. We're better off not |
| touching this loop. */ |
| if (!need_to_vectorize) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "All the computation can be taken out of the loop.\n"); |
| return opt_result::failure_at |
| (vect_location, |
| "not vectorized: redundant loop. no profit to vectorize.\n"); |
| } |
| |
| return opt_result::success (); |
| } |
| |
| /* Return true if we know that the iteration count is smaller than the |
| vectorization factor. Return false if it isn't, or if we can't be sure |
| either way. */ |
| |
| static bool |
| vect_known_niters_smaller_than_vf (loop_vec_info loop_vinfo) |
| { |
| unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); |
| |
| HOST_WIDE_INT max_niter; |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
| max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo); |
| else |
| max_niter = max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); |
| |
| if (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf) |
| return true; |
| |
| return false; |
| } |
| |
| /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it |
| is worthwhile to vectorize. Return 1 if definitely yes, 0 if |
| definitely no, or -1 if it's worth retrying. */ |
| |
| static int |
| vect_analyze_loop_costing (loop_vec_info loop_vinfo) |
| { |
| class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); |
| |
| /* Only loops that can handle partially-populated vectors can have iteration |
| counts less than the vectorization factor. */ |
| if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| { |
| if (vect_known_niters_smaller_than_vf (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: iteration count smaller than " |
| "vectorization factor.\n"); |
| return 0; |
| } |
| } |
| |
| /* If using the "very cheap" model. reject cases in which we'd keep |
| a copy of the scalar code (even if we might be able to vectorize it). */ |
| if (loop_cost_model (loop) == VECT_COST_MODEL_VERY_CHEAP |
| && (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
| || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
| || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "some scalar iterations would need to be peeled\n"); |
| return 0; |
| } |
| |
| int min_profitable_iters, min_profitable_estimate; |
| vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, |
| &min_profitable_estimate); |
| |
| if (min_profitable_iters < 0) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vectorization not profitable.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vector version will never be " |
| "profitable.\n"); |
| return -1; |
| } |
| |
| int min_scalar_loop_bound = (param_min_vect_loop_bound |
| * assumed_vf); |
| |
| /* Use the cost model only if it is more conservative than user specified |
| threshold. */ |
| unsigned int th = (unsigned) MAX (min_scalar_loop_bound, |
| min_profitable_iters); |
| |
| LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && LOOP_VINFO_INT_NITERS (loop_vinfo) < th) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: vectorization not profitable.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not vectorized: iteration count smaller than user " |
| "specified loop bound parameter or minimum profitable " |
| "iterations (whichever is more conservative).\n"); |
| return 0; |
| } |
| |
| /* The static profitablity threshold min_profitable_estimate includes |
| the cost of having to check at runtime whether the scalar loop |
| should be used instead. If it turns out that we don't need or want |
| such a check, the threshold we should use for the static estimate |
| is simply the point at which the vector loop becomes more profitable |
| than the scalar loop. */ |
| if (min_profitable_estimate > min_profitable_iters |
| && !LOOP_REQUIRES_VERSIONING (loop_vinfo) |
| && !LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) |
| && !LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
| && !vect_apply_runtime_profitability_check_p (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "no need for a runtime" |
| " choice between the scalar and vector loops\n"); |
| min_profitable_estimate = min_profitable_iters; |
| } |
| |
| /* If the vector loop needs multiple iterations to be beneficial then |
| things are probably too close to call, and the conservative thing |
| would be to stick with the scalar code. */ |
| if (loop_cost_model (loop) == VECT_COST_MODEL_VERY_CHEAP |
| && min_profitable_estimate > (int) vect_vf_for_cost (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "one iteration of the vector loop would be" |
| " more expensive than the equivalent number of" |
| " iterations of the scalar loop\n"); |
| return 0; |
| } |
| |
| HOST_WIDE_INT estimated_niter; |
| |
| /* If we are vectorizing an epilogue then we know the maximum number of |
| scalar iterations it will cover is at least one lower than the |
| vectorization factor of the main loop. */ |
| if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) |
| estimated_niter |
| = vect_vf_for_cost (LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo)) - 1; |
| else |
| { |
| estimated_niter = estimated_stmt_executions_int (loop); |
| if (estimated_niter == -1) |
| estimated_niter = likely_max_stmt_executions_int (loop); |
| } |
| if (estimated_niter != -1 |
| && ((unsigned HOST_WIDE_INT) estimated_niter |
| < MAX (th, (unsigned) min_profitable_estimate))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: estimated iteration count too " |
| "small.\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not vectorized: estimated iteration count smaller " |
| "than specified loop bound parameter or minimum " |
| "profitable iterations (whichever is more " |
| "conservative).\n"); |
| return -1; |
| } |
| |
| return 1; |
| } |
| |
| static opt_result |
| vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs, |
| vec<data_reference_p> *datarefs, |
| unsigned int *n_stmts) |
| { |
| *n_stmts = 0; |
| for (unsigned i = 0; i < loop->num_nodes; i++) |
| for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); |
| !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (is_gimple_debug (stmt)) |
| continue; |
| ++(*n_stmts); |
| opt_result res = vect_find_stmt_data_reference (loop, stmt, datarefs, |
| NULL, 0); |
| if (!res) |
| { |
| if (is_gimple_call (stmt) && loop->safelen) |
| { |
| tree fndecl = gimple_call_fndecl (stmt), op; |
| if (fndecl != NULL_TREE) |
| { |
| cgraph_node *node = cgraph_node::get (fndecl); |
| if (node != NULL && node->simd_clones != NULL) |
| { |
| unsigned int j, n = gimple_call_num_args (stmt); |
| for (j = 0; j < n; j++) |
| { |
| op = gimple_call_arg (stmt, j); |
| if (DECL_P (op) |
| || (REFERENCE_CLASS_P (op) |
| && get_base_address (op))) |
| break; |
| } |
| op = gimple_call_lhs (stmt); |
| /* Ignore #pragma omp declare simd functions |
| if they don't have data references in the |
| call stmt itself. */ |
| if (j == n |
| && !(op |
| && (DECL_P (op) |
| || (REFERENCE_CLASS_P (op) |
| && get_base_address (op))))) |
| continue; |
| } |
| } |
| } |
| return res; |
| } |
| /* If dependence analysis will give up due to the limit on the |
| number of datarefs stop here and fail fatally. */ |
| if (datarefs->length () |
| > (unsigned)param_loop_max_datarefs_for_datadeps) |
| return opt_result::failure_at (stmt, "exceeded param " |
| "loop-max-datarefs-for-datadeps\n"); |
| } |
| return opt_result::success (); |
| } |
| |
| /* Look for SLP-only access groups and turn each individual access into its own |
| group. */ |
| static void |
| vect_dissolve_slp_only_groups (loop_vec_info loop_vinfo) |
| { |
| unsigned int i; |
| struct data_reference *dr; |
| |
| DUMP_VECT_SCOPE ("vect_dissolve_slp_only_groups"); |
| |
| vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| gcc_assert (DR_REF (dr)); |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (DR_STMT (dr)); |
| |
| /* Check if the load is a part of an interleaving chain. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| { |
| stmt_vec_info first_element = DR_GROUP_FIRST_ELEMENT (stmt_info); |
| dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_element); |
| unsigned int group_size = DR_GROUP_SIZE (first_element); |
| |
| /* Check if SLP-only groups. */ |
| if (!STMT_SLP_TYPE (stmt_info) |
| && STMT_VINFO_SLP_VECT_ONLY (first_element)) |
| { |
| /* Dissolve the group. */ |
| STMT_VINFO_SLP_VECT_ONLY (first_element) = false; |
| |
| stmt_vec_info vinfo = first_element; |
| while (vinfo) |
| { |
| stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (vinfo); |
| DR_GROUP_FIRST_ELEMENT (vinfo) = vinfo; |
| DR_GROUP_NEXT_ELEMENT (vinfo) = NULL; |
| DR_GROUP_SIZE (vinfo) = 1; |
| if (STMT_VINFO_STRIDED_P (first_element)) |
| DR_GROUP_GAP (vinfo) = 0; |
| else |
| DR_GROUP_GAP (vinfo) = group_size - 1; |
| /* Duplicate and adjust alignment info, it needs to |
| be present on each group leader, see dr_misalignment. */ |
| if (vinfo != first_element) |
| { |
| dr_vec_info *dr_info2 = STMT_VINFO_DR_INFO (vinfo); |
| dr_info2->target_alignment = dr_info->target_alignment; |
| int misalignment = dr_info->misalignment; |
| if (misalignment != DR_MISALIGNMENT_UNKNOWN) |
| { |
| HOST_WIDE_INT diff |
| = (TREE_INT_CST_LOW (DR_INIT (dr_info2->dr)) |
| - TREE_INT_CST_LOW (DR_INIT (dr_info->dr))); |
| unsigned HOST_WIDE_INT align_c |
| = dr_info->target_alignment.to_constant (); |
| misalignment = (misalignment + diff) % align_c; |
| } |
| dr_info2->misalignment = misalignment; |
| } |
| vinfo = next; |
| } |
| } |
| } |
| } |
| } |
| |
| /* Determine if operating on full vectors for LOOP_VINFO might leave |
| some scalar iterations still to do. If so, decide how we should |
| handle those scalar iterations. The possibilities are: |
| |
| (1) Make LOOP_VINFO operate on partial vectors instead of full vectors. |
| In this case: |
| |
| LOOP_VINFO_USING_PARTIAL_VECTORS_P == true |
| LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false |
| LOOP_VINFO_PEELING_FOR_NITER == false |
| |
| (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop |
| to handle the remaining scalar iterations. In this case: |
| |
| LOOP_VINFO_USING_PARTIAL_VECTORS_P == false |
| LOOP_VINFO_PEELING_FOR_NITER == true |
| |
| There are two choices: |
| |
| (2a) Consider vectorizing the epilogue loop at the same VF as the |
| main loop, but using partial vectors instead of full vectors. |
| In this case: |
| |
| LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true |
| |
| (2b) Consider vectorizing the epilogue loop at lower VFs only. |
| In this case: |
| |
| LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false |
| |
| When FOR_EPILOGUE_P is true, make this determination based on the |
| assumption that LOOP_VINFO is an epilogue loop, otherwise make it |
| based on the assumption that LOOP_VINFO is the main loop. The caller |
| has made sure that the number of iterations is set appropriately for |
| this value of FOR_EPILOGUE_P. */ |
| |
| opt_result |
| vect_determine_partial_vectors_and_peeling (loop_vec_info loop_vinfo, |
| bool for_epilogue_p) |
| { |
| /* Determine whether there would be any scalar iterations left over. */ |
| bool need_peeling_or_partial_vectors_p |
| = vect_need_peeling_or_partial_vectors_p (loop_vinfo); |
| |
| /* Decide whether to vectorize the loop with partial vectors. */ |
| LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false; |
| LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo) = false; |
| if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| && need_peeling_or_partial_vectors_p) |
| { |
| /* For partial-vector-usage=1, try to push the handling of partial |
| vectors to the epilogue, with the main loop continuing to operate |
| on full vectors. |
| |
| ??? We could then end up failing to use partial vectors if we |
| decide to peel iterations into a prologue, and if the main loop |
| then ends up processing fewer than VF iterations. */ |
| if (param_vect_partial_vector_usage == 1 |
| && !LOOP_VINFO_EPILOGUE_P (loop_vinfo) |
| && !vect_known_niters_smaller_than_vf (loop_vinfo)) |
| LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo) = true; |
| else |
| LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = true; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "operating on partial vectors%s.\n", |
| for_epilogue_p ? " for epilogue loop" : ""); |
| else |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "operating only on full vectors%s.\n", |
| for_epilogue_p ? " for epilogue loop" : ""); |
| } |
| |
| if (for_epilogue_p) |
| { |
| loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo); |
| gcc_assert (orig_loop_vinfo); |
| if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| gcc_assert (known_lt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
| LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo))); |
| } |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| { |
| /* Check that the loop processes at least one full vector. */ |
| poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| tree scalar_niters = LOOP_VINFO_NITERS (loop_vinfo); |
| if (known_lt (wi::to_widest (scalar_niters), vf)) |
| return opt_result::failure_at (vect_location, |
| "loop does not have enough iterations" |
| " to support vectorization.\n"); |
| |
| /* If we need to peel an extra epilogue iteration to handle data |
| accesses with gaps, check that there are enough scalar iterations |
| available. |
| |
| The check above is redundant with this one when peeling for gaps, |
| but the distinction is useful for diagnostics. */ |
| tree scalar_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo); |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
| && known_lt (wi::to_widest (scalar_nitersm1), vf)) |
| return opt_result::failure_at (vect_location, |
| "loop does not have enough iterations" |
| " to support peeling for gaps.\n"); |
| } |
| |
| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) |
| = (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) |
| && need_peeling_or_partial_vectors_p); |
| |
| return opt_result::success (); |
| } |
| |
| /* Function vect_analyze_loop_2. |
| |
| Apply a set of analyses on LOOP, and create a loop_vec_info struct |
| for it. The different analyses will record information in the |
| loop_vec_info struct. */ |
| static opt_result |
| vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts) |
| { |
| opt_result ok = opt_result::success (); |
| int res; |
| unsigned int max_vf = MAX_VECTORIZATION_FACTOR; |
| poly_uint64 min_vf = 2; |
| loop_vec_info orig_loop_vinfo = NULL; |
| |
| /* If we are dealing with an epilogue then orig_loop_vinfo points to the |
| loop_vec_info of the first vectorized loop. */ |
| if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) |
| orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo); |
| else |
| orig_loop_vinfo = loop_vinfo; |
| gcc_assert (orig_loop_vinfo); |
| |
| /* The first group of checks is independent of the vector size. */ |
| fatal = true; |
| |
| if (LOOP_VINFO_SIMD_IF_COND (loop_vinfo) |
| && integer_zerop (LOOP_VINFO_SIMD_IF_COND (loop_vinfo))) |
| return opt_result::failure_at (vect_location, |
| "not vectorized: simd if(0)\n"); |
| |
| /* Find all data references in the loop (which correspond to vdefs/vuses) |
| and analyze their evolution in the loop. */ |
| |
| loop_p loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| /* Gather the data references and count stmts in the loop. */ |
| if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ()) |
| { |
| opt_result res |
| = vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo), |
| &LOOP_VINFO_DATAREFS (loop_vinfo), |
| n_stmts); |
| if (!res) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: loop contains function " |
| "calls or data references that cannot " |
| "be analyzed\n"); |
| return res; |
| } |
| loop_vinfo->shared->save_datarefs (); |
| } |
| else |
| loop_vinfo->shared->check_datarefs (); |
| |
| /* Analyze the data references and also adjust the minimal |
| vectorization factor according to the loads and stores. */ |
| |
| ok = vect_analyze_data_refs (loop_vinfo, &min_vf, &fatal); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data references.\n"); |
| return ok; |
| } |
| |
| /* Classify all cross-iteration scalar data-flow cycles. |
| Cross-iteration cycles caused by virtual phis are analyzed separately. */ |
| vect_analyze_scalar_cycles (loop_vinfo); |
| |
| vect_pattern_recog (loop_vinfo); |
| |
| vect_fixup_scalar_cycles_with_patterns (loop_vinfo); |
| |
| /* Analyze the access patterns of the data-refs in the loop (consecutive, |
| complex, etc.). FORNOW: Only handle consecutive access pattern. */ |
| |
| ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data access.\n"); |
| return ok; |
| } |
| |
| /* Data-flow analysis to detect stmts that do not need to be vectorized. */ |
| |
| ok = vect_mark_stmts_to_be_vectorized (loop_vinfo, &fatal); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "unexpected pattern.\n"); |
| return ok; |
| } |
| |
| /* While the rest of the analysis below depends on it in some way. */ |
| fatal = false; |
| |
| /* Analyze data dependences between the data-refs in the loop |
| and adjust the maximum vectorization factor according to |
| the dependences. |
| FORNOW: fail at the first data dependence that we encounter. */ |
| |
| ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data dependence.\n"); |
| return ok; |
| } |
| if (max_vf != MAX_VECTORIZATION_FACTOR |
| && maybe_lt (max_vf, min_vf)) |
| return opt_result::failure_at (vect_location, "bad data dependence.\n"); |
| LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf; |
| |
| ok = vect_determine_vectorization_factor (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't determine vectorization factor.\n"); |
| return ok; |
| } |
| if (max_vf != MAX_VECTORIZATION_FACTOR |
| && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo))) |
| return opt_result::failure_at (vect_location, "bad data dependence.\n"); |
| |
| /* Compute the scalar iteration cost. */ |
| vect_compute_single_scalar_iteration_cost (loop_vinfo); |
| |
| poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| |
| /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ |
| ok = vect_analyze_slp (loop_vinfo, *n_stmts); |
| if (!ok) |
| return ok; |
| |
| /* If there are any SLP instances mark them as pure_slp. */ |
| bool slp = vect_make_slp_decision (loop_vinfo); |
| if (slp) |
| { |
| /* Find stmts that need to be both vectorized and SLPed. */ |
| vect_detect_hybrid_slp (loop_vinfo); |
| |
| /* Update the vectorization factor based on the SLP decision. */ |
| vect_update_vf_for_slp (loop_vinfo); |
| |
| /* Optimize the SLP graph with the vectorization factor fixed. */ |
| vect_optimize_slp (loop_vinfo); |
| |
| /* Gather the loads reachable from the SLP graph entries. */ |
| vect_gather_slp_loads (loop_vinfo); |
| } |
| |
| bool saved_can_use_partial_vectors_p |
| = LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo); |
| |
| /* We don't expect to have to roll back to anything other than an empty |
| set of rgroups. */ |
| gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()); |
| |
| /* This is the point where we can re-start analysis with SLP forced off. */ |
| start_over: |
| |
| /* Now the vectorization factor is final. */ |
| poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| gcc_assert (known_ne (vectorization_factor, 0U)); |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vectorization_factor = "); |
| dump_dec (MSG_NOTE, vectorization_factor); |
| dump_printf (MSG_NOTE, ", niters = %wd\n", |
| LOOP_VINFO_INT_NITERS (loop_vinfo)); |
| } |
| |
| /* Analyze the alignment of the data-refs in the loop. |
| Fail if a data reference is found that cannot be vectorized. */ |
| |
| ok = vect_analyze_data_refs_alignment (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data alignment.\n"); |
| return ok; |
| } |
| |
| /* Prune the list of ddrs to be tested at run-time by versioning for alias. |
| It is important to call pruning after vect_analyze_data_ref_accesses, |
| since we use grouping information gathered by interleaving analysis. */ |
| ok = vect_prune_runtime_alias_test_list (loop_vinfo); |
| if (!ok) |
| return ok; |
| |
| /* Do not invoke vect_enhance_data_refs_alignment for epilogue |
| vectorization, since we do not want to add extra peeling or |
| add versioning for alignment. */ |
| if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) |
| /* This pass will decide on using loop versioning and/or loop peeling in |
| order to enhance the alignment of data references in the loop. */ |
| ok = vect_enhance_data_refs_alignment (loop_vinfo); |
| if (!ok) |
| return ok; |
| |
| if (slp) |
| { |
| /* Analyze operations in the SLP instances. Note this may |
| remove unsupported SLP instances which makes the above |
| SLP kind detection invalid. */ |
| unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length (); |
| vect_slp_analyze_operations (loop_vinfo); |
| if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size) |
| { |
| ok = opt_result::failure_at (vect_location, |
| "unsupported SLP instances\n"); |
| goto again; |
| } |
| |
| /* Check whether any load in ALL SLP instances is possibly permuted. */ |
| slp_tree load_node, slp_root; |
| unsigned i, x; |
| slp_instance instance; |
| bool can_use_lanes = true; |
| FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), x, instance) |
| { |
| slp_root = SLP_INSTANCE_TREE (instance); |
| int group_size = SLP_TREE_LANES (slp_root); |
| tree vectype = SLP_TREE_VECTYPE (slp_root); |
| bool loads_permuted = false; |
| FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node) |
| { |
| if (!SLP_TREE_LOAD_PERMUTATION (load_node).exists ()) |
| continue; |
| unsigned j; |
| stmt_vec_info load_info; |
| FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load_info) |
| if (SLP_TREE_LOAD_PERMUTATION (load_node)[j] != j) |
| { |
| loads_permuted = true; |
| break; |
| } |
| } |
| |
| /* If the loads and stores can be handled with load/store-lane |
| instructions record it and move on to the next instance. */ |
| if (loads_permuted |
| && SLP_INSTANCE_KIND (instance) == slp_inst_kind_store |
| && vect_store_lanes_supported (vectype, group_size, false)) |
| { |
| FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node) |
| { |
| stmt_vec_info stmt_vinfo = DR_GROUP_FIRST_ELEMENT |
| (SLP_TREE_SCALAR_STMTS (load_node)[0]); |
| /* Use SLP for strided accesses (or if we can't |
| load-lanes). */ |
| if (STMT_VINFO_STRIDED_P (stmt_vinfo) |
| || ! vect_load_lanes_supported |
| (STMT_VINFO_VECTYPE (stmt_vinfo), |
| DR_GROUP_SIZE (stmt_vinfo), false)) |
| break; |
| } |
| |
| can_use_lanes |
| = can_use_lanes && i == SLP_INSTANCE_LOADS (instance).length (); |
| |
| if (can_use_lanes && dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "SLP instance %p can use load/store-lanes\n", |
| instance); |
| } |
| else |
| { |
| can_use_lanes = false; |
| break; |
| } |
| } |
| |
| /* If all SLP instances can use load/store-lanes abort SLP and try again |
| with SLP disabled. */ |
| if (can_use_lanes) |
| { |
| ok = opt_result::failure_at (vect_location, |
| "Built SLP cancelled: can use " |
| "load/store-lanes\n"); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Built SLP cancelled: all SLP instances support " |
| "load/store-lanes\n"); |
| goto again; |
| } |
| } |
| |
| /* Dissolve SLP-only groups. */ |
| vect_dissolve_slp_only_groups (loop_vinfo); |
| |
| /* Scan all the remaining operations in the loop that are not subject |
| to SLP and make sure they are vectorizable. */ |
| ok = vect_analyze_loop_operations (loop_vinfo); |
| if (!ok) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad operation or unsupported loop bound.\n"); |
| return ok; |
| } |
| |
| /* For now, we don't expect to mix both masking and length approaches for one |
| loop, disable it if both are recorded. */ |
| if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| && !LOOP_VINFO_MASKS (loop_vinfo).is_empty () |
| && !LOOP_VINFO_LENS (loop_vinfo).is_empty ()) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't vectorize a loop with partial vectors" |
| " because we don't expect to mix different" |
| " approaches with partial vectors for the" |
| " same loop.\n"); |
| LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; |
| } |
| |
| /* If we still have the option of using partial vectors, |
| check whether we can generate the necessary loop controls. */ |
| if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| && !vect_verify_full_masking (loop_vinfo) |
| && !vect_verify_loop_lens (loop_vinfo)) |
| LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; |
| |
| /* If we're vectorizing an epilogue loop, the vectorized loop either needs |
| to be able to handle fewer than VF scalars, or needs to have a lower VF |
| than the main loop. */ |
| if (LOOP_VINFO_EPILOGUE_P (loop_vinfo) |
| && !LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| && maybe_ge (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
| LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo))) |
| return opt_result::failure_at (vect_location, |
| "Vectorization factor too high for" |
| " epilogue loop.\n"); |
| |
| /* Decide whether this loop_vinfo should use partial vectors or peeling, |
| assuming that the loop will be used as a main loop. We will redo |
| this analysis later if we instead decide to use the loop as an |
| epilogue loop. */ |
| ok = vect_determine_partial_vectors_and_peeling (loop_vinfo, false); |
| if (!ok) |
| return ok; |
| |
| /* Check the costings of the loop make vectorizing worthwhile. */ |
| res = vect_analyze_loop_costing (loop_vinfo); |
| if (res < 0) |
| { |
| ok = opt_result::failure_at (vect_location, |
| "Loop costings may not be worthwhile.\n"); |
| goto again; |
| } |
| if (!res) |
| return opt_result::failure_at (vect_location, |
| "Loop costings not worthwhile.\n"); |
| |
| /* If an epilogue loop is required make sure we can create one. */ |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) |
| || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); |
| if (!vect_can_advance_ivs_p (loop_vinfo) |
| || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), |
| single_exit (LOOP_VINFO_LOOP |
| (loop_vinfo)))) |
| { |
| ok = opt_result::failure_at (vect_location, |
| "not vectorized: can't create required " |
| "epilog loop\n"); |
| goto again; |
| } |
| } |
| |
| /* During peeling, we need to check if number of loop iterations is |
| enough for both peeled prolog loop and vector loop. This check |
| can be merged along with threshold check of loop versioning, so |
| increase threshold for this case if necessary. |
| |
| If we are analyzing an epilogue we still want to check what its |
| versioning threshold would be. If we decide to vectorize the epilogues we |
| will want to use the lowest versioning threshold of all epilogues and main |
| loop. This will enable us to enter a vectorized epilogue even when |
| versioning the loop. We can't simply check whether the epilogue requires |
| versioning though since we may have skipped some versioning checks when |
| analyzing the epilogue. For instance, checks for alias versioning will be |
| skipped when dealing with epilogues as we assume we already checked them |
| for the main loop. So instead we always check the 'orig_loop_vinfo'. */ |
| if (LOOP_REQUIRES_VERSIONING (orig_loop_vinfo)) |
| { |
| poly_uint64 niters_th = 0; |
| unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); |
| |
| if (!vect_use_loop_mask_for_alignment_p (loop_vinfo)) |
| { |
| /* Niters for peeled prolog loop. */ |
| if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) |
| { |
| dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); |
| tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt); |
| niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1; |
| } |
| else |
| niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); |
| } |
| |
| /* Niters for at least one iteration of vectorized loop. */ |
| if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| /* One additional iteration because of peeling for gap. */ |
| if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) |
| niters_th += 1; |
| |
| /* Use the same condition as vect_transform_loop to decide when to use |
| the cost to determine a versioning threshold. */ |
| if (vect_apply_runtime_profitability_check_p (loop_vinfo) |
| && ordered_p (th, niters_th)) |
| niters_th = ordered_max (poly_uint64 (th), niters_th); |
| |
| LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th; |
| } |
| |
| gcc_assert (known_eq (vectorization_factor, |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo))); |
| |
| /* Ok to vectorize! */ |
| return opt_result::success (); |
| |
| again: |
| /* Ensure that "ok" is false (with an opt_problem if dumping is enabled). */ |
| gcc_assert (!ok); |
| |
| /* Try again with SLP forced off but if we didn't do any SLP there is |
| no point in re-trying. */ |
| if (!slp) |
| return ok; |
| |
| /* If there are reduction chains re-trying will fail anyway. */ |
| if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) |
| return ok; |
| |
| /* Likewise if the grouped loads or stores in the SLP cannot be handled |
| via interleaving or lane instructions. */ |
| slp_instance instance; |
| slp_tree node; |
| unsigned i, j; |
| FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) |
| { |
| stmt_vec_info vinfo; |
| vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]; |
| if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) |
| continue; |
| vinfo = DR_GROUP_FIRST_ELEMENT (vinfo); |
| unsigned int size = DR_GROUP_SIZE (vinfo); |
| tree vectype = STMT_VINFO_VECTYPE (vinfo); |
| if (! vect_store_lanes_supported (vectype, size, false) |
| && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U) |
| && ! vect_grouped_store_supported (vectype, size)) |
| return opt_result::failure_at (vinfo->stmt, |
| "unsupported grouped store\n"); |
| FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) |
| { |
| vinfo = SLP_TREE_SCALAR_STMTS (node)[0]; |
| vinfo = DR_GROUP_FIRST_ELEMENT (vinfo); |
| bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo); |
| size = DR_GROUP_SIZE (vinfo); |
| vectype = STMT_VINFO_VECTYPE (vinfo); |
| if (! vect_load_lanes_supported (vectype, size, false) |
| && ! vect_grouped_load_supported (vectype, single_element_p, |
| size)) |
| return opt_result::failure_at (vinfo->stmt, |
| "unsupported grouped load\n"); |
| } |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "re-trying with SLP disabled\n"); |
| |
| /* Roll back state appropriately. No SLP this time. */ |
| slp = false; |
| /* Restore vectorization factor as it were without SLP. */ |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor; |
| /* Free the SLP instances. */ |
| FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance) |
| vect_free_slp_instance (instance); |
| LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); |
| /* Reset SLP type to loop_vect on all stmts. */ |
| for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i) |
| { |
| basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i]; |
| for (gimple_stmt_iterator si = gsi_start_phis (bb); |
| !gsi_end_p (si); gsi_next (&si)) |
| { |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
| STMT_SLP_TYPE (stmt_info) = loop_vect; |
| if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def |
| || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def) |
| { |
| /* vectorizable_reduction adjusts reduction stmt def-types, |
| restore them to that of the PHI. */ |
| STMT_VINFO_DEF_TYPE (STMT_VINFO_REDUC_DEF (stmt_info)) |
| = STMT_VINFO_DEF_TYPE (stmt_info); |
| STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize |
| (STMT_VINFO_REDUC_DEF (stmt_info))) |
| = STMT_VINFO_DEF_TYPE (stmt_info); |
| } |
| } |
| for (gimple_stmt_iterator si = gsi_start_bb (bb); |
| !gsi_end_p (si); gsi_next (&si)) |
| { |
| if (is_gimple_debug (gsi_stmt (si))) |
| continue; |
| stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
| STMT_SLP_TYPE (stmt_info) = loop_vect; |
| if (STMT_VINFO_IN_PATTERN_P (stmt_info)) |
| { |
| stmt_vec_info pattern_stmt_info |
| = STMT_VINFO_RELATED_STMT (stmt_info); |
| if (STMT_VINFO_SLP_VECT_ONLY_PATTERN (pattern_stmt_info)) |
| STMT_VINFO_IN_PATTERN_P (stmt_info) = false; |
| |
| gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); |
| STMT_SLP_TYPE (pattern_stmt_info) = loop_vect; |
| for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq); |
| !gsi_end_p (pi); gsi_next (&pi)) |
| STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi))) |
| = loop_vect; |
| } |
| } |
| } |
| /* Free optimized alias test DDRS. */ |
| LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0); |
| LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); |
| LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release (); |
| /* Reset target cost data. */ |
| destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); |
| LOOP_VINFO_TARGET_COST_DATA (loop_vinfo) |
| = init_cost (LOOP_VINFO_LOOP (loop_vinfo), false); |
| /* Reset accumulated rgroup information. */ |
| release_vec_loop_controls (&LOOP_VINFO_MASKS (loop_vinfo)); |
| release_vec_loop_controls (&LOOP_VINFO_LENS (loop_vinfo)); |
| /* Reset assorted flags. */ |
| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; |
| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false; |
| LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0; |
| LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0; |
| LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| = saved_can_use_partial_vectors_p; |
| |
| goto start_over; |
| } |
| |
| /* Return true if vectorizing a loop using NEW_LOOP_VINFO appears |
| to be better than vectorizing it using OLD_LOOP_VINFO. Assume that |
| OLD_LOOP_VINFO is better unless something specifically indicates |
| otherwise. |
| |
| Note that this deliberately isn't a partial order. */ |
| |
| static bool |
| vect_better_loop_vinfo_p (loop_vec_info new_loop_vinfo, |
| loop_vec_info old_loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (new_loop_vinfo); |
| gcc_assert (LOOP_VINFO_LOOP (old_loop_vinfo) == loop); |
| |
| poly_int64 new_vf = LOOP_VINFO_VECT_FACTOR (new_loop_vinfo); |
| poly_int64 old_vf = LOOP_VINFO_VECT_FACTOR (old_loop_vinfo); |
| |
| /* Always prefer a VF of loop->simdlen over any other VF. */ |
| if (loop->simdlen) |
| { |
| bool new_simdlen_p = known_eq (new_vf, loop->simdlen); |
| bool old_simdlen_p = known_eq (old_vf, loop->simdlen); |
| if (new_simdlen_p != old_simdlen_p) |
| return new_simdlen_p; |
| } |
| |
| /* Limit the VFs to what is likely to be the maximum number of iterations, |
| to handle cases in which at least one loop_vinfo is fully-masked. */ |
| HOST_WIDE_INT estimated_max_niter; |
| loop_vec_info main_loop = LOOP_VINFO_ORIG_LOOP_INFO (old_loop_vinfo); |
| unsigned HOST_WIDE_INT main_vf; |
| if (main_loop |
| && LOOP_VINFO_NITERS_KNOWN_P (main_loop) |
| && LOOP_VINFO_VECT_FACTOR (main_loop).is_constant (&main_vf)) |
| estimated_max_niter = LOOP_VINFO_INT_NITERS (main_loop) % main_vf; |
| else |
| estimated_max_niter = likely_max_stmt_executions_int (loop); |
| if (estimated_max_niter != -1) |
| { |
| if (known_le (estimated_max_niter, new_vf)) |
| new_vf = estimated_max_niter; |
| if (known_le (estimated_max_niter, old_vf)) |
| old_vf = estimated_max_niter; |
| } |
| |
| /* Check whether the (fractional) cost per scalar iteration is lower |
| or higher: new_inside_cost / new_vf vs. old_inside_cost / old_vf. */ |
| poly_int64 rel_new = new_loop_vinfo->vec_inside_cost * old_vf; |
| poly_int64 rel_old = old_loop_vinfo->vec_inside_cost * new_vf; |
| |
| HOST_WIDE_INT est_rel_new_min |
| = estimated_poly_value (rel_new, POLY_VALUE_MIN); |
| HOST_WIDE_INT est_rel_new_max |
| = estimated_poly_value (rel_new, POLY_VALUE_MAX); |
| |
| HOST_WIDE_INT est_rel_old_min |
| = estimated_poly_value (rel_old, POLY_VALUE_MIN); |
| HOST_WIDE_INT est_rel_old_max |
| = estimated_poly_value (rel_old, POLY_VALUE_MAX); |
| |
| /* Check first if we can make out an unambigous total order from the minimum |
| and maximum estimates. */ |
| if (est_rel_new_min < est_rel_old_min |
| && est_rel_new_max < est_rel_old_max) |
| return true; |
| else if (est_rel_old_min < est_rel_new_min |
| && est_rel_old_max < est_rel_new_max) |
| return false; |
| /* When old_loop_vinfo uses a variable vectorization factor, |
| we know that it has a lower cost for at least one runtime VF. |
| However, we don't know how likely that VF is. |
| |
| One option would be to compare the costs for the estimated VFs. |
| The problem is that that can put too much pressure on the cost |
| model. E.g. if the estimated VF is also the lowest possible VF, |
| and if old_loop_vinfo is 1 unit worse than new_loop_vinfo |
| for the estimated VF, we'd then choose new_loop_vinfo even |
| though (a) new_loop_vinfo might not actually be better than |
| old_loop_vinfo for that VF and (b) it would be significantly |
| worse at larger VFs. |
| |
| Here we go for a hacky compromise: pick new_loop_vinfo if it is |
| no more expensive than old_loop_vinfo even after doubling the |
| estimated old_loop_vinfo VF. For all but trivial loops, this |
| ensures that we only pick new_loop_vinfo if it is significantly |
| better than old_loop_vinfo at the estimated VF. */ |
| |
| if (est_rel_old_min != est_rel_new_min |
| || est_rel_old_max != est_rel_new_max) |
| { |
| HOST_WIDE_INT est_rel_new_likely |
| = estimated_poly_value (rel_new, POLY_VALUE_LIKELY); |
| HOST_WIDE_INT est_rel_old_likely |
| = estimated_poly_value (rel_old, POLY_VALUE_LIKELY); |
| |
| return est_rel_new_likely * 2 <= est_rel_old_likely; |
| } |
| |
| /* If there's nothing to choose between the loop bodies, see whether |
| there's a difference in the prologue and epilogue costs. */ |
| if (new_loop_vinfo->vec_outside_cost != old_loop_vinfo->vec_outside_cost) |
| return new_loop_vinfo->vec_outside_cost < old_loop_vinfo->vec_outside_cost; |
| |
| return false; |
| } |
| |
| /* Decide whether to replace OLD_LOOP_VINFO with NEW_LOOP_VINFO. Return |
| true if we should. */ |
| |
| static bool |
| vect_joust_loop_vinfos (loop_vec_info new_loop_vinfo, |
| loop_vec_info old_loop_vinfo) |
| { |
| if (!vect_better_loop_vinfo_p (new_loop_vinfo, old_loop_vinfo)) |
| return false; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Preferring vector mode %s to vector mode %s\n", |
| GET_MODE_NAME (new_loop_vinfo->vector_mode), |
| GET_MODE_NAME (old_loop_vinfo->vector_mode)); |
| return true; |
| } |
| |
| /* If LOOP_VINFO is already a main loop, return it unmodified. Otherwise |
| try to reanalyze it as a main loop. Return the loop_vinfo on success |
| and null on failure. */ |
| |
| static loop_vec_info |
| vect_reanalyze_as_main_loop (loop_vec_info loop_vinfo, unsigned int *n_stmts) |
| { |
| if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) |
| return loop_vinfo; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Reanalyzing as a main loop with vector mode %s\n", |
| GET_MODE_NAME (loop_vinfo->vector_mode)); |
| |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| vec_info_shared *shared = loop_vinfo->shared; |
| opt_loop_vec_info main_loop_vinfo = vect_analyze_loop_form (loop, shared); |
| gcc_assert (main_loop_vinfo); |
| |
| main_loop_vinfo->vector_mode = loop_vinfo->vector_mode; |
| |
| bool fatal = false; |
| bool res = vect_analyze_loop_2 (main_loop_vinfo, fatal, n_stmts); |
| loop->aux = NULL; |
| if (!res) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Failed to analyze main loop with vector" |
| " mode %s\n", |
| GET_MODE_NAME (loop_vinfo->vector_mode)); |
| delete main_loop_vinfo; |
| return NULL; |
| } |
| LOOP_VINFO_VECTORIZABLE_P (main_loop_vinfo) = 1; |
| return main_loop_vinfo; |
| } |
| |
| /* Function vect_analyze_loop. |
| |
| Apply a set of analyses on LOOP, and create a loop_vec_info struct |
| for it. The different analyses will record information in the |
| loop_vec_info struct. */ |
| opt_loop_vec_info |
| vect_analyze_loop (class loop *loop, vec_info_shared *shared) |
| { |
| auto_vector_modes vector_modes; |
| |
| /* Autodetect first vector size we try. */ |
| unsigned int autovec_flags |
| = targetm.vectorize.autovectorize_vector_modes (&vector_modes, |
| loop->simdlen != 0); |
| unsigned int mode_i = 0; |
| |
| DUMP_VECT_SCOPE ("analyze_loop_nest"); |
| |
| if (loop_outer (loop) |
| && loop_vec_info_for_loop (loop_outer (loop)) |
| && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) |
| return opt_loop_vec_info::failure_at (vect_location, |
| "outer-loop already vectorized.\n"); |
| |
| if (!find_loop_nest (loop, &shared->loop_nest)) |
| return opt_loop_vec_info::failure_at |
| (vect_location, |
| "not vectorized: loop nest containing two or more consecutive inner" |
| " loops cannot be vectorized\n"); |
| |
| unsigned n_stmts = 0; |
| machine_mode autodetected_vector_mode = VOIDmode; |
| opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL); |
| machine_mode next_vector_mode = VOIDmode; |
| poly_uint64 lowest_th = 0; |
| unsigned vectorized_loops = 0; |
| bool pick_lowest_cost_p = ((autovec_flags & VECT_COMPARE_COSTS) |
| && !unlimited_cost_model (loop)); |
| |
| bool vect_epilogues = false; |
| opt_result res = opt_result::success (); |
| unsigned HOST_WIDE_INT simdlen = loop->simdlen; |
| while (1) |
| { |
| /* Check the CFG characteristics of the loop (nesting, entry/exit). */ |
| opt_loop_vec_info loop_vinfo = vect_analyze_loop_form (loop, shared); |
| if (!loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad loop form.\n"); |
| gcc_checking_assert (first_loop_vinfo == NULL); |
| return loop_vinfo; |
| } |
| loop_vinfo->vector_mode = next_vector_mode; |
| |
| bool fatal = false; |
| |
| /* When pick_lowest_cost_p is true, we should in principle iterate |
| over all the loop_vec_infos that LOOP_VINFO could replace and |
| try to vectorize LOOP_VINFO under the same conditions. |
| E.g. when trying to replace an epilogue loop, we should vectorize |
| LOOP_VINFO as an epilogue loop with the same VF limit. When trying |
| to replace the main loop, we should vectorize LOOP_VINFO as a main |
| loop too. |
| |
| However, autovectorize_vector_modes is usually sorted as follows: |
| |
| - Modes that naturally produce lower VFs usually follow modes that |
| naturally produce higher VFs. |
| |
| - When modes naturally produce the same VF, maskable modes |
| usually follow unmaskable ones, so that the maskable mode |
| can be used to vectorize the epilogue of the unmaskable mode. |
| |
| This order is preferred because it leads to the maximum |
| epilogue vectorization opportunities. Targets should only use |
| a different order if they want to make wide modes available while |
| disparaging them relative to earlier, smaller modes. The assumption |
| in that case is that the wider modes are more expensive in some |
| way that isn't reflected directly in the costs. |
| |
| There should therefore be few interesting cases in which |
| LOOP_VINFO fails when treated as an epilogue loop, succeeds when |
| treated as a standalone loop, and ends up being genuinely cheaper |
| than FIRST_LOOP_VINFO. */ |
| if (vect_epilogues) |
| LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = first_loop_vinfo; |
| |
| res = vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts); |
| if (mode_i == 0) |
| autodetected_vector_mode = loop_vinfo->vector_mode; |
| if (dump_enabled_p ()) |
| { |
| if (res) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Analysis succeeded with vector mode %s\n", |
| GET_MODE_NAME (loop_vinfo->vector_mode)); |
| else |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Analysis failed with vector mode %s\n", |
| GET_MODE_NAME (loop_vinfo->vector_mode)); |
| } |
| |
| loop->aux = NULL; |
| |
| if (!fatal) |
| while (mode_i < vector_modes.length () |
| && vect_chooses_same_modes_p (loop_vinfo, vector_modes[mode_i])) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** The result for vector mode %s would" |
| " be the same\n", |
| GET_MODE_NAME (vector_modes[mode_i])); |
| mode_i += 1; |
| } |
| |
| if (res) |
| { |
| LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; |
| vectorized_loops++; |
| |
| /* Once we hit the desired simdlen for the first time, |
| discard any previous attempts. */ |
| if (simdlen |
| && known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), simdlen)) |
| { |
| delete first_loop_vinfo; |
| first_loop_vinfo = opt_loop_vec_info::success (NULL); |
| LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL; |
| simdlen = 0; |
| } |
| else if (pick_lowest_cost_p && first_loop_vinfo) |
| { |
| /* Keep trying to roll back vectorization attempts while the |
| loop_vec_infos they produced were worse than this one. */ |
| vec<loop_vec_info> &vinfos = first_loop_vinfo->epilogue_vinfos; |
| while (!vinfos.is_empty () |
| && vect_joust_loop_vinfos (loop_vinfo, vinfos.last ())) |
| { |
| gcc_assert (vect_epilogues); |
| delete vinfos.pop (); |
| } |
| if (vinfos.is_empty () |
| && vect_joust_loop_vinfos (loop_vinfo, first_loop_vinfo)) |
| { |
| loop_vec_info main_loop_vinfo |
| = vect_reanalyze_as_main_loop (loop_vinfo, &n_stmts); |
| if (main_loop_vinfo == loop_vinfo) |
| { |
| delete first_loop_vinfo; |
| first_loop_vinfo = opt_loop_vec_info::success (NULL); |
| } |
| else if (main_loop_vinfo |
| && vect_joust_loop_vinfos (main_loop_vinfo, |
| first_loop_vinfo)) |
| { |
| delete first_loop_vinfo; |
| first_loop_vinfo = opt_loop_vec_info::success (NULL); |
| delete loop_vinfo; |
| loop_vinfo |
| = opt_loop_vec_info::success (main_loop_vinfo); |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** No longer preferring vector" |
| " mode %s after reanalyzing the loop" |
| " as a main loop\n", |
| GET_MODE_NAME |
| (main_loop_vinfo->vector_mode)); |
| delete main_loop_vinfo; |
| } |
| } |
| } |
| |
| if (first_loop_vinfo == NULL) |
| { |
| first_loop_vinfo = loop_vinfo; |
| lowest_th = LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo); |
| } |
| else if (vect_epilogues |
| /* For now only allow one epilogue loop. */ |
| && first_loop_vinfo->epilogue_vinfos.is_empty ()) |
| { |
| first_loop_vinfo->epilogue_vinfos.safe_push (loop_vinfo); |
| poly_uint64 th = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo); |
| gcc_assert (!LOOP_REQUIRES_VERSIONING (loop_vinfo) |
| || maybe_ne (lowest_th, 0U)); |
| /* Keep track of the known smallest versioning |
| threshold. */ |
| if (ordered_p (lowest_th, th)) |
| lowest_th = ordered_min (lowest_th, th); |
| } |
| else |
| { |
| delete loop_vinfo; |
| loop_vinfo = opt_loop_vec_info::success (NULL); |
| } |
| |
| /* Only vectorize epilogues if PARAM_VECT_EPILOGUES_NOMASK is |
| enabled, SIMDUID is not set, it is the innermost loop and we have |
| either already found the loop's SIMDLEN or there was no SIMDLEN to |
| begin with. |
| TODO: Enable epilogue vectorization for loops with SIMDUID set. */ |
| vect_epilogues = (!simdlen |
| && loop->inner == NULL |
| && param_vect_epilogues_nomask |
| && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo) |
| && !loop->simduid |
| /* For now only allow one epilogue loop, but allow |
| pick_lowest_cost_p to replace it. */ |
| && (first_loop_vinfo->epilogue_vinfos.is_empty () |
| || pick_lowest_cost_p)); |
| |
| /* Commit to first_loop_vinfo if we have no reason to try |
| alternatives. */ |
| if (!simdlen && !vect_epilogues && !pick_lowest_cost_p) |
| break; |
| } |
| else |
| { |
| delete loop_vinfo; |
| loop_vinfo = opt_loop_vec_info::success (NULL); |
| if (fatal) |
| { |
| gcc_checking_assert (first_loop_vinfo == NULL); |
| break; |
| } |
| } |
| |
| /* Handle the case that the original loop can use partial |
| vectorization, but want to only adopt it for the epilogue. |
| The retry should be in the same mode as original. */ |
| if (vect_epilogues |
| && loop_vinfo |
| && LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo)) |
| { |
| gcc_assert (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) |
| && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Re-trying analysis with same vector mode" |
| " %s for epilogue with partial vectors.\n", |
| GET_MODE_NAME (loop_vinfo->vector_mode)); |
| continue; |
| } |
| |
| if (mode_i < vector_modes.length () |
| && VECTOR_MODE_P (autodetected_vector_mode) |
| && (related_vector_mode (vector_modes[mode_i], |
| GET_MODE_INNER (autodetected_vector_mode)) |
| == autodetected_vector_mode) |
| && (related_vector_mode (autodetected_vector_mode, |
| GET_MODE_INNER (vector_modes[mode_i])) |
| == vector_modes[mode_i])) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Skipping vector mode %s, which would" |
| " repeat the analysis for %s\n", |
| GET_MODE_NAME (vector_modes[mode_i]), |
| GET_MODE_NAME (autodetected_vector_mode)); |
| mode_i += 1; |
| } |
| |
| if (mode_i == vector_modes.length () |
| || autodetected_vector_mode == VOIDmode) |
| break; |
| |
| /* Try the next biggest vector size. */ |
| next_vector_mode = vector_modes[mode_i++]; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Re-trying analysis with vector mode %s\n", |
| GET_MODE_NAME (next_vector_mode)); |
| } |
| |
| if (first_loop_vinfo) |
| { |
| loop->aux = (loop_vec_info) first_loop_vinfo; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "***** Choosing vector mode %s\n", |
| GET_MODE_NAME (first_loop_vinfo->vector_mode)); |
| LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo) = lowest_th; |
| return first_loop_vinfo; |
| } |
| |
| return opt_loop_vec_info::propagate_failure (res); |
| } |
| |
| /* Return true if there is an in-order reduction function for CODE, storing |
| it in *REDUC_FN if so. */ |
| |
| static bool |
| fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn) |
| { |
| switch (code) |
| { |
| case PLUS_EXPR: |
| *reduc_fn = IFN_FOLD_LEFT_PLUS; |
| return true; |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Function reduction_fn_for_scalar_code |
| |
| Input: |
| CODE - tree_code of a reduction operations. |
| |
| Output: |
| REDUC_FN - the corresponding internal function to be used to reduce the |
| vector of partial results into a single scalar result, or IFN_LAST |
| if the operation is a supported reduction operation, but does not have |
| such an internal function. |
| |
| Return FALSE if CODE currently cannot be vectorized as reduction. */ |
| |
| bool |
| reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn) |
| { |
|
|