| /* Analysis Utilities for Loop Vectorization. |
| Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software |
| Foundation, Inc. |
| Contributed by Dorit Naishlos <dorit@il.ibm.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "ggc.h" |
| #include "tree.h" |
| #include "target.h" |
| #include "basic-block.h" |
| #include "diagnostic.h" |
| #include "tree-flow.h" |
| #include "tree-dump.h" |
| #include "timevar.h" |
| #include "cfgloop.h" |
| #include "expr.h" |
| #include "optabs.h" |
| #include "params.h" |
| #include "tree-chrec.h" |
| #include "tree-data-ref.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-vectorizer.h" |
| #include "toplev.h" |
| #include "recog.h" |
| |
| static bool vect_can_advance_ivs_p (loop_vec_info); |
| |
| /* Return the smallest scalar part of STMT. |
| This is used to determine the vectype of the stmt. We generally set the |
| vectype according to the type of the result (lhs). For stmts whose |
| result-type is different than the type of the arguments (e.g., demotion, |
| promotion), vectype will be reset appropriately (later). Note that we have |
| to visit the smallest datatype in this function, because that determines the |
| VF. If the smallest datatype in the loop is present only as the rhs of a |
| promotion operation - we'd miss it. |
| Such a case, where a variable of this datatype does not appear in the lhs |
| anywhere in the loop, can only occur if it's an invariant: e.g.: |
| 'int_x = (int) short_inv', which we'd expect to have been optimized away by |
| invariant motion. However, we cannot rely on invariant motion to always take |
| invariants out of the loop, and so in the case of promotion we also have to |
| check the rhs. |
| LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding |
| types. */ |
| |
| tree |
| vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, |
| HOST_WIDE_INT *rhs_size_unit) |
| { |
| tree scalar_type = gimple_expr_type (stmt); |
| HOST_WIDE_INT lhs, rhs; |
| |
| lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| |
| if (is_gimple_assign (stmt) |
| && (gimple_assign_cast_p (stmt) |
| || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR |
| || gimple_assign_rhs_code (stmt) == FLOAT_EXPR)) |
| { |
| tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| |
| rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); |
| if (rhs < lhs) |
| scalar_type = rhs_type; |
| } |
| |
| *lhs_size_unit = lhs; |
| *rhs_size_unit = rhs; |
| return scalar_type; |
| } |
| |
| |
| /* Function vect_determine_vectorization_factor |
| |
| Determine the vectorization factor (VF). VF is the number of data elements |
| that are operated upon in parallel in a single iteration of the vectorized |
| loop. For example, when vectorizing a loop that operates on 4byte elements, |
| on a target with vector size (VS) 16byte, the VF is set to 4, since 4 |
| elements can fit in a single vector register. |
| |
| We currently support vectorization of loops in which all types operated upon |
| are of the same size. Therefore this function currently sets VF according to |
| the size of the types operated upon, and fails if there are multiple sizes |
| in the loop. |
| |
| VF is also the factor by which the loop iterations are strip-mined, e.g.: |
| original loop: |
| for (i=0; i<N; i++){ |
| a[i] = b[i] + c[i]; |
| } |
| |
| vectorized loop: |
| for (i=0; i<N; i+=VF){ |
| a[i:VF] = b[i:VF] + c[i:VF]; |
| } |
| */ |
| |
| static bool |
| vect_determine_vectorization_factor (loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| gimple_stmt_iterator si; |
| unsigned int vectorization_factor = 0; |
| tree scalar_type; |
| gimple phi; |
| tree vectype; |
| unsigned int nunits; |
| stmt_vec_info stmt_info; |
| int i; |
| HOST_WIDE_INT dummy; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_determine_vectorization_factor ==="); |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| phi = gsi_stmt (si); |
| stmt_info = vinfo_for_stmt (phi); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "==> examining phi: "); |
| print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); |
| } |
| |
| gcc_assert (stmt_info); |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); |
| scalar_type = TREE_TYPE (PHI_RESULT (phi)); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "get vectype for scalar type: "); |
| print_generic_expr (vect_dump, scalar_type, TDF_SLIM); |
| } |
| |
| vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vectype) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, |
| "not vectorized: unsupported data-type "); |
| print_generic_expr (vect_dump, scalar_type, TDF_SLIM); |
| } |
| return false; |
| } |
| STMT_VINFO_VECTYPE (stmt_info) = vectype; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "vectype: "); |
| print_generic_expr (vect_dump, vectype, TDF_SLIM); |
| } |
| |
| nunits = TYPE_VECTOR_SUBPARTS (vectype); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "nunits = %d", nunits); |
| |
| if (!vectorization_factor |
| || (nunits > vectorization_factor)) |
| vectorization_factor = nunits; |
| } |
| } |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "==> examining statement: "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| if (gimple_has_volatile_ops (stmt)) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: stmt has volatile" |
| " operands"); |
| |
| return false; |
| } |
| |
| gcc_assert (stmt_info); |
| |
| /* skip stmts which do not need to be vectorized. */ |
| if (!STMT_VINFO_RELEVANT_P (stmt_info) |
| && !STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "skip."); |
| continue; |
| } |
| |
| if (gimple_get_lhs (stmt) == NULL_TREE) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, "not vectorized: irregular stmt."); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| |
| if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, "not vectorized: vector stmt in loop:"); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| |
| if (STMT_VINFO_VECTYPE (stmt_info)) |
| { |
| /* The only case when a vectype had been already set is for stmts |
| that contain a dataref, or for "pattern-stmts" (stmts generated |
| by the vectorizer to represent/replace a certain idiom). */ |
| gcc_assert (STMT_VINFO_DATA_REF (stmt_info) |
| || is_pattern_stmt_p (stmt_info)); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| } |
| else |
| { |
| |
| gcc_assert (! STMT_VINFO_DATA_REF (stmt_info) |
| && !is_pattern_stmt_p (stmt_info)); |
| |
| scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, |
| &dummy); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "get vectype for scalar type: "); |
| print_generic_expr (vect_dump, scalar_type, TDF_SLIM); |
| } |
| |
| vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vectype) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, |
| "not vectorized: unsupported data-type "); |
| print_generic_expr (vect_dump, scalar_type, TDF_SLIM); |
| } |
| return false; |
| } |
| STMT_VINFO_VECTYPE (stmt_info) = vectype; |
| } |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "vectype: "); |
| print_generic_expr (vect_dump, vectype, TDF_SLIM); |
| } |
| |
| nunits = TYPE_VECTOR_SUBPARTS (vectype); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "nunits = %d", nunits); |
| |
| if (!vectorization_factor |
| || (nunits > vectorization_factor)) |
| vectorization_factor = nunits; |
| |
| } |
| } |
| |
| /* TODO: Analyze cost. Decide if worth while to vectorize. */ |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "vectorization factor = %d", vectorization_factor); |
| if (vectorization_factor <= 1) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: unsupported data-type"); |
| return false; |
| } |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| |
| return true; |
| } |
| |
| |
| /* SLP costs are calculated according to SLP instance unrolling factor (i.e., |
| the number of created vector stmts depends on the unrolling factor). However, |
| the actual number of vector stmts for every SLP node depends on VF which is |
| set later in vect_analyze_operations(). Hence, SLP costs should be updated. |
| In this function we assume that the inside costs calculated in |
| vect_model_xxx_cost are linear in ncopies. */ |
| |
| static void |
| vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo) |
| { |
| unsigned int i, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); |
| slp_instance instance; |
| |
| if (vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "=== vect_update_slp_costs_according_to_vf ==="); |
| |
| for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++) |
| /* We assume that costs are linear in ncopies. */ |
| SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance) *= vf |
| / SLP_INSTANCE_UNROLLING_FACTOR (instance); |
| } |
| |
| |
| /* Function vect_analyze_operations. |
| |
| Scan the loop stmts and make sure they are all vectorizable. */ |
| |
| static bool |
| vect_analyze_operations (loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
| int nbbs = loop->num_nodes; |
| gimple_stmt_iterator si; |
| unsigned int vectorization_factor = 0; |
| int i; |
| bool ok; |
| gimple phi; |
| stmt_vec_info stmt_info; |
| bool need_to_vectorize = false; |
| int min_profitable_iters; |
| int min_scalar_loop_bound; |
| unsigned int th; |
| bool only_slp_in_loop = true; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_analyze_operations ==="); |
| |
| gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); |
| vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| |
| for (i = 0; i < nbbs; i++) |
| { |
| basic_block bb = bbs[i]; |
| |
| for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| phi = gsi_stmt (si); |
| ok = true; |
| |
| stmt_info = vinfo_for_stmt (phi); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "examining phi: "); |
| print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); |
| } |
| |
| if (! is_loop_header_bb_p (bb)) |
| { |
| /* inner-loop loop-closed exit phi in outer-loop vectorization |
| (i.e. a phi in the tail of the outer-loop). |
| FORNOW: we currently don't support the case that these phis |
| are not used in the outerloop, cause this case requires |
| to actually do something here. */ |
| if (!STMT_VINFO_RELEVANT_P (stmt_info) |
| || STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, |
| "Unsupported loop-closed phi in outer-loop."); |
| return false; |
| } |
| continue; |
| } |
| |
| gcc_assert (stmt_info); |
| |
| if (STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| /* FORNOW: not yet supported. */ |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: value used after loop."); |
| return false; |
| } |
| |
| if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_loop |
| && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) |
| { |
| /* A scalar-dependence cycle that we don't support. */ |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: scalar dependence cycle."); |
| return false; |
| } |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| need_to_vectorize = true; |
| if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) |
| ok = vectorizable_induction (phi, NULL, NULL); |
| } |
| |
| if (!ok) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, |
| "not vectorized: relevant phi not supported: "); |
| print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| } |
| |
| for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
| { |
| gimple stmt = gsi_stmt (si); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "==> examining statement: "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| gcc_assert (stmt_info); |
| |
| /* skip stmts which do not need to be vectorized. |
| this is expected to include: |
| - the COND_EXPR which is the loop exit condition |
| - any LABEL_EXPRs in the loop |
| - computations that are used only for array indexing or loop |
| control */ |
| |
| if (!STMT_VINFO_RELEVANT_P (stmt_info) |
| && !STMT_VINFO_LIVE_P (stmt_info)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "irrelevant."); |
| continue; |
| } |
| |
| switch (STMT_VINFO_DEF_TYPE (stmt_info)) |
| { |
| case vect_loop_def: |
| break; |
| |
| case vect_reduction_def: |
| gcc_assert (relevance == vect_used_in_outer |
| || relevance == vect_used_in_outer_by_reduction |
| || relevance == vect_unused_in_loop); |
| break; |
| |
| case vect_induction_def: |
| case vect_constant_def: |
| case vect_invariant_def: |
| case vect_unknown_def_type: |
| default: |
| gcc_unreachable (); |
| } |
| |
| if (STMT_VINFO_RELEVANT_P (stmt_info)) |
| { |
| gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))); |
| gcc_assert (STMT_VINFO_VECTYPE (stmt_info)); |
| need_to_vectorize = true; |
| } |
| |
| ok = true; |
| if (STMT_VINFO_RELEVANT_P (stmt_info) |
| || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) |
| ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL) |
| || vectorizable_type_demotion (stmt, NULL, NULL, NULL) |
| || vectorizable_conversion (stmt, NULL, NULL, NULL) |
| || vectorizable_operation (stmt, NULL, NULL, NULL) |
| || vectorizable_assignment (stmt, NULL, NULL, NULL) |
| || vectorizable_load (stmt, NULL, NULL, NULL, NULL) |
| || vectorizable_call (stmt, NULL, NULL) |
| || vectorizable_store (stmt, NULL, NULL, NULL) |
| || vectorizable_condition (stmt, NULL, NULL) |
| || vectorizable_reduction (stmt, NULL, NULL)); |
| |
| if (!ok) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, "not vectorized: relevant stmt not "); |
| fprintf (vect_dump, "supported: "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| |
| /* Stmts that are (also) "live" (i.e. - that are used out of the loop) |
| need extra handling, except for vectorizable reductions. */ |
| if (STMT_VINFO_LIVE_P (stmt_info) |
| && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
| ok = vectorizable_live_operation (stmt, NULL, NULL); |
| |
| if (!ok) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, "not vectorized: live stmt not "); |
| fprintf (vect_dump, "supported: "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| |
| if (!PURE_SLP_STMT (stmt_info)) |
| { |
| /* STMT needs loop-based vectorization. */ |
| only_slp_in_loop = false; |
| |
| /* Groups of strided accesses whose size is not a power of 2 are |
| not vectorizable yet using loop-vectorization. Therefore, if |
| this stmt feeds non-SLP-able stmts (i.e., this stmt has to be |
| both SLPed and loop-based vectorized), the loop cannot be |
| vectorized. */ |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info) |
| && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt ( |
| DR_GROUP_FIRST_DR (stmt_info)))) == -1) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "not vectorized: the size of group " |
| "of strided accesses is not a power of 2"); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| return false; |
| } |
| } |
| } /* stmts in bb */ |
| } /* bbs */ |
| |
| /* All operations in the loop are either irrelevant (deal with loop |
| control, or dead), or only used outside the loop and can be moved |
| out of the loop (e.g. invariants, inductions). The loop can be |
| optimized away by scalar optimizations. We're better off not |
| touching this loop. */ |
| if (!need_to_vectorize) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, |
| "All the computation can be taken out of the loop."); |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, |
| "not vectorized: redundant loop. no profit to vectorize."); |
| return false; |
| } |
| |
| /* If all the stmts in the loop can be SLPed, we perform only SLP, and |
| vectorization factor of the loop is the unrolling factor required by the |
| SLP instances. If that unrolling factor is 1, we say, that we perform |
| pure SLP on loop - cross iteration parallelism is not exploited. */ |
| if (only_slp_in_loop) |
| vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); |
| else |
| vectorization_factor = least_common_multiple (vectorization_factor, |
| LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); |
| |
| LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, |
| "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC, |
| vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo)); |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor)) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: iteration count too small."); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump,"not vectorized: iteration count smaller than " |
| "vectorization factor."); |
| return false; |
| } |
| |
| /* Analyze cost. Decide if worth while to vectorize. */ |
| |
| /* Once VF is set, SLP costs should be updated since the number of created |
| vector stmts depends on VF. */ |
| vect_update_slp_costs_according_to_vf (loop_vinfo); |
| |
| min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo); |
| LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters; |
| |
| if (min_profitable_iters < 0) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: vectorization not profitable."); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "not vectorized: vector version will never be " |
| "profitable."); |
| return false; |
| } |
| |
| min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) |
| * vectorization_factor) - 1); |
| |
| /* Use the cost model only if it is more conservative than user specified |
| threshold. */ |
| |
| th = (unsigned) min_scalar_loop_bound; |
| if (min_profitable_iters |
| && (!min_scalar_loop_bound |
| || min_profitable_iters > min_scalar_loop_bound)) |
| th = (unsigned) min_profitable_iters; |
| |
| if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: vectorization not " |
| "profitable."); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "not vectorized: iteration count smaller than " |
| "user specified loop bound parameter or minimum " |
| "profitable iterations (whichever is more conservative)."); |
| return false; |
| } |
| |
| if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) |
| || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0 |
| || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "epilog loop required."); |
| if (!vect_can_advance_ivs_p (loop_vinfo)) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, |
| "not vectorized: can't create epilog loop 1."); |
| return false; |
| } |
| if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop))) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, |
| "not vectorized: can't create epilog loop 2."); |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| |
| /* Function exist_non_indexing_operands_for_use_p |
| |
| USE is one of the uses attached to STMT. Check if USE is |
| used in STMT for anything other than indexing an array. */ |
| |
| static bool |
| exist_non_indexing_operands_for_use_p (tree use, gimple stmt) |
| { |
| tree operand; |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| |
| /* USE corresponds to some operand in STMT. If there is no data |
| reference in STMT, then any operand that corresponds to USE |
| is not indexing an array. */ |
| if (!STMT_VINFO_DATA_REF (stmt_info)) |
| return true; |
| |
| /* STMT has a data_ref. FORNOW this means that its of one of |
| the following forms: |
| -1- ARRAY_REF = var |
| -2- var = ARRAY_REF |
| (This should have been verified in analyze_data_refs). |
| |
| 'var' in the second case corresponds to a def, not a use, |
| so USE cannot correspond to any operands that are not used |
| for array indexing. |
| |
| Therefore, all we need to check is if STMT falls into the |
| first case, and whether var corresponds to USE. */ |
| |
| if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) |
| return false; |
| |
| if (!gimple_assign_copy_p (stmt)) |
| return false; |
| operand = gimple_assign_rhs1 (stmt); |
| |
| if (TREE_CODE (operand) != SSA_NAME) |
| return false; |
| |
| if (operand == use) |
| return true; |
| |
| return false; |
| } |
| |
| |
| /* Function vect_analyze_scalar_cycles_1. |
| |
| Examine the cross iteration def-use cycles of scalar variables |
| in LOOP. LOOP_VINFO represents the loop that is now being |
| considered for vectorization (can be LOOP, or an outer-loop |
| enclosing LOOP). */ |
| |
| static void |
| vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) |
| { |
| basic_block bb = loop->header; |
| tree dumy; |
| VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64); |
| gimple_stmt_iterator gsi; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_analyze_scalar_cycles ==="); |
| |
| /* First - identify all inductions. */ |
| for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple phi = gsi_stmt (gsi); |
| tree access_fn = NULL; |
| tree def = PHI_RESULT (phi); |
| stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "Analyze phi: "); |
| print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); |
| } |
| |
| /* Skip virtual phi's. The data dependences that are associated with |
| virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ |
| if (!is_gimple_reg (SSA_NAME_VAR (def))) |
| continue; |
| |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; |
| |
| /* Analyze the evolution function. */ |
| access_fn = analyze_scalar_evolution (loop, def); |
| if (access_fn && vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "Access function of PHI: "); |
| print_generic_expr (vect_dump, access_fn, TDF_SLIM); |
| } |
| |
| if (!access_fn |
| || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy)) |
| { |
| VEC_safe_push (gimple, heap, worklist, phi); |
| continue; |
| } |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Detected induction."); |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; |
| } |
| |
| |
| /* Second - identify all reductions. */ |
| while (VEC_length (gimple, worklist) > 0) |
| { |
| gimple phi = VEC_pop (gimple, worklist); |
| tree def = PHI_RESULT (phi); |
| stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); |
| gimple reduc_stmt; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "Analyze phi: "); |
| print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM); |
| } |
| |
| gcc_assert (is_gimple_reg (SSA_NAME_VAR (def))); |
| gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); |
| |
| reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi); |
| if (reduc_stmt) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Detected reduction."); |
| STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; |
| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = |
| vect_reduction_def; |
| } |
| else |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Unknown def-use cycle pattern."); |
| } |
| |
| VEC_free (gimple, heap, worklist); |
| return; |
| } |
| |
| |
| /* Function vect_analyze_scalar_cycles. |
| |
| Examine the cross iteration def-use cycles of scalar variables, by |
| analyzing the loop-header PHIs of scalar variables; Classify each |
| cycle as one of the following: invariant, induction, reduction, unknown. |
| We do that for the loop represented by LOOP_VINFO, and also to its |
| inner-loop, if exists. |
| Examples for scalar cycles: |
| |
| Example1: reduction: |
| |
| loop1: |
| for (i=0; i<N; i++) |
| sum += a[i]; |
| |
| Example2: induction: |
| |
| loop2: |
| for (i=0; i<N; i++) |
| a[i] = i; */ |
| |
| static void |
| vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop); |
| |
| /* When vectorizing an outer-loop, the inner-loop is executed sequentially. |
| Reductions in such inner-loop therefore have different properties than |
| the reductions in the nest that gets vectorized: |
| 1. When vectorized, they are executed in the same order as in the original |
| scalar loop, so we can't change the order of computation when |
| vectorizing them. |
| 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the |
| current checks are too strict. */ |
| |
| if (loop->inner) |
| vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); |
| } |
| |
| |
| /* Find the place of the data-ref in STMT in the interleaving chain that starts |
| from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */ |
| |
| static int |
| vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt) |
| { |
| gimple next_stmt = first_stmt; |
| int result = 0; |
| |
| if (first_stmt != DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt))) |
| return -1; |
| |
| while (next_stmt && next_stmt != stmt) |
| { |
| result++; |
| next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt)); |
| } |
| |
| if (next_stmt) |
| return result; |
| else |
| return -1; |
| } |
| |
| |
| /* Function vect_insert_into_interleaving_chain. |
| |
| Insert DRA into the interleaving chain of DRB according to DRA's INIT. */ |
| |
| static void |
| vect_insert_into_interleaving_chain (struct data_reference *dra, |
| struct data_reference *drb) |
| { |
| gimple prev, next; |
| tree next_init; |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| |
| prev = DR_GROUP_FIRST_DR (stmtinfo_b); |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)); |
| while (next) |
| { |
| next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0) |
| { |
| /* Insert here. */ |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra); |
| DR_GROUP_NEXT_DR (stmtinfo_a) = next; |
| return; |
| } |
| prev = next; |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)); |
| } |
| |
| /* We got to the end of the list. Insert here. */ |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra); |
| DR_GROUP_NEXT_DR (stmtinfo_a) = NULL; |
| } |
| |
| |
| /* Function vect_update_interleaving_chain. |
| |
| For two data-refs DRA and DRB that are a part of a chain interleaved data |
| accesses, update the interleaving chain. DRB's INIT is smaller than DRA's. |
| |
| There are four possible cases: |
| 1. New stmts - both DRA and DRB are not a part of any chain: |
| FIRST_DR = DRB |
| NEXT_DR (DRB) = DRA |
| 2. DRB is a part of a chain and DRA is not: |
| no need to update FIRST_DR |
| no need to insert DRB |
| insert DRA according to init |
| 3. DRA is a part of a chain and DRB is not: |
| if (init of FIRST_DR > init of DRB) |
| FIRST_DR = DRB |
| NEXT(FIRST_DR) = previous FIRST_DR |
| else |
| insert DRB according to its init |
| 4. both DRA and DRB are in some interleaving chains: |
| choose the chain with the smallest init of FIRST_DR |
| insert the nodes of the second chain into the first one. */ |
| |
| static void |
| vect_update_interleaving_chain (struct data_reference *drb, |
| struct data_reference *dra) |
| { |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| tree next_init, init_dra_chain, init_drb_chain; |
| gimple first_a, first_b; |
| tree node_init; |
| gimple node, prev, next, first_stmt; |
| |
| /* 1. New stmts - both DRA and DRB are not a part of any chain. */ |
| if (!DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b)) |
| { |
| DR_GROUP_FIRST_DR (stmtinfo_a) = DR_STMT (drb); |
| DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb); |
| DR_GROUP_NEXT_DR (stmtinfo_b) = DR_STMT (dra); |
| return; |
| } |
| |
| /* 2. DRB is a part of a chain and DRA is not. */ |
| if (!DR_GROUP_FIRST_DR (stmtinfo_a) && DR_GROUP_FIRST_DR (stmtinfo_b)) |
| { |
| DR_GROUP_FIRST_DR (stmtinfo_a) = DR_GROUP_FIRST_DR (stmtinfo_b); |
| /* Insert DRA into the chain of DRB. */ |
| vect_insert_into_interleaving_chain (dra, drb); |
| return; |
| } |
| |
| /* 3. DRA is a part of a chain and DRB is not. */ |
| if (DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b)) |
| { |
| gimple old_first_stmt = DR_GROUP_FIRST_DR (stmtinfo_a); |
| tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt ( |
| old_first_stmt))); |
| gimple tmp; |
| |
| if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0) |
| { |
| /* DRB's init is smaller than the init of the stmt previously marked |
| as the first stmt of the interleaving chain of DRA. Therefore, we |
| update FIRST_STMT and put DRB in the head of the list. */ |
| DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb); |
| DR_GROUP_NEXT_DR (stmtinfo_b) = old_first_stmt; |
| |
| /* Update all the stmts in the list to point to the new FIRST_STMT. */ |
| tmp = old_first_stmt; |
| while (tmp) |
| { |
| DR_GROUP_FIRST_DR (vinfo_for_stmt (tmp)) = DR_STMT (drb); |
| tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (tmp)); |
| } |
| } |
| else |
| { |
| /* Insert DRB in the list of DRA. */ |
| vect_insert_into_interleaving_chain (drb, dra); |
| DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a); |
| } |
| return; |
| } |
| |
| /* 4. both DRA and DRB are in some interleaving chains. */ |
| first_a = DR_GROUP_FIRST_DR (stmtinfo_a); |
| first_b = DR_GROUP_FIRST_DR (stmtinfo_b); |
| if (first_a == first_b) |
| return; |
| init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a))); |
| init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b))); |
| |
| if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0) |
| { |
| /* Insert the nodes of DRA chain into the DRB chain. |
| After inserting a node, continue from this node of the DRB chain (don't |
| start from the beginning. */ |
| node = DR_GROUP_FIRST_DR (stmtinfo_a); |
| prev = DR_GROUP_FIRST_DR (stmtinfo_b); |
| first_stmt = first_b; |
| } |
| else |
| { |
| /* Insert the nodes of DRB chain into the DRA chain. |
| After inserting a node, continue from this node of the DRA chain (don't |
| start from the beginning. */ |
| node = DR_GROUP_FIRST_DR (stmtinfo_b); |
| prev = DR_GROUP_FIRST_DR (stmtinfo_a); |
| first_stmt = first_a; |
| } |
| |
| while (node) |
| { |
| node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node))); |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)); |
| while (next) |
| { |
| next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (next_init, node_init) > 0) |
| { |
| /* Insert here. */ |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node; |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = next; |
| prev = node; |
| break; |
| } |
| prev = next; |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)); |
| } |
| if (!next) |
| { |
| /* We got to the end of the list. Insert here. */ |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node; |
| DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = NULL; |
| prev = node; |
| } |
| DR_GROUP_FIRST_DR (vinfo_for_stmt (node)) = first_stmt; |
| node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node)); |
| } |
| } |
| |
| |
| /* Function vect_equal_offsets. |
| |
| Check if OFFSET1 and OFFSET2 are identical expressions. */ |
| |
| static bool |
| vect_equal_offsets (tree offset1, tree offset2) |
| { |
| bool res0, res1; |
| |
| STRIP_NOPS (offset1); |
| STRIP_NOPS (offset2); |
| |
| if (offset1 == offset2) |
| return true; |
| |
| if (TREE_CODE (offset1) != TREE_CODE (offset2) |
| || !BINARY_CLASS_P (offset1) |
| || !BINARY_CLASS_P (offset2)) |
| return false; |
| |
| res0 = vect_equal_offsets (TREE_OPERAND (offset1, 0), |
| TREE_OPERAND (offset2, 0)); |
| res1 = vect_equal_offsets (TREE_OPERAND (offset1, 1), |
| TREE_OPERAND (offset2, 1)); |
| |
| return (res0 && res1); |
| } |
| |
| |
| /* Function vect_check_interleaving. |
| |
| Check if DRA and DRB are a part of interleaving. In case they are, insert |
| DRA and DRB in an interleaving chain. */ |
| |
| static void |
| vect_check_interleaving (struct data_reference *dra, |
| struct data_reference *drb) |
| { |
| HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b; |
| |
| /* Check that the data-refs have same first location (except init) and they |
| are both either store or load (not load and store). */ |
| if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb) |
| && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR |
| || TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR |
| || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0) |
| != TREE_OPERAND (DR_BASE_ADDRESS (drb),0))) |
| || !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb)) |
| || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) |
| || DR_IS_READ (dra) != DR_IS_READ (drb)) |
| return; |
| |
| /* Check: |
| 1. data-refs are of the same type |
| 2. their steps are equal |
| 3. the step is greater than the difference between data-refs' inits */ |
| type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); |
| type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); |
| |
| if (type_size_a != type_size_b |
| || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb)) |
| || !types_compatible_p (TREE_TYPE (DR_REF (dra)), |
| TREE_TYPE (DR_REF (drb)))) |
| return; |
| |
| init_a = TREE_INT_CST_LOW (DR_INIT (dra)); |
| init_b = TREE_INT_CST_LOW (DR_INIT (drb)); |
| step = TREE_INT_CST_LOW (DR_STEP (dra)); |
| |
| if (init_a > init_b) |
| { |
| /* If init_a == init_b + the size of the type * k, we have an interleaving, |
| and DRB is accessed before DRA. */ |
| diff_mod_size = (init_a - init_b) % type_size_a; |
| |
| if ((init_a - init_b) > step) |
| return; |
| |
| if (diff_mod_size == 0) |
| { |
| vect_update_interleaving_chain (drb, dra); |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "Detected interleaving "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| return; |
| } |
| } |
| else |
| { |
| /* If init_b == init_a + the size of the type * k, we have an |
| interleaving, and DRA is accessed before DRB. */ |
| diff_mod_size = (init_b - init_a) % type_size_a; |
| |
| if ((init_b - init_a) > step) |
| return; |
| |
| if (diff_mod_size == 0) |
| { |
| vect_update_interleaving_chain (dra, drb); |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "Detected interleaving "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| return; |
| } |
| } |
| } |
| |
| /* Check if data references pointed by DR_I and DR_J are same or |
| belong to same interleaving group. Return FALSE if drs are |
| different, otherwise return TRUE. */ |
| |
| static bool |
| vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j) |
| { |
| gimple stmt_i = DR_STMT (dr_i); |
| gimple stmt_j = DR_STMT (dr_j); |
| |
| if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0) |
| || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i)) |
| && DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j)) |
| && (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i)) |
| == DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j))))) |
| return true; |
| else |
| return false; |
| } |
| |
| /* If address ranges represented by DDR_I and DDR_J are equal, |
| return TRUE, otherwise return FALSE. */ |
| |
| static bool |
| vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j) |
| { |
| if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j)) |
| && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j))) |
| || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j)) |
| && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j)))) |
| return true; |
| else |
| return false; |
| } |
| |
| /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be |
| tested at run-time. Return TRUE if DDR was successfully inserted. |
| Return false if versioning is not supported. */ |
| |
| static bool |
| vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0) |
| return false; |
| |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "mark for run-time aliasing test between "); |
| print_generic_expr (vect_dump, DR_REF (DDR_A (ddr)), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (DDR_B (ddr)), TDF_SLIM); |
| } |
| |
| if (optimize_loop_nest_for_size_p (loop)) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| fprintf (vect_dump, "versioning not supported when optimizing for size."); |
| return false; |
| } |
| |
| /* FORNOW: We don't support versioning with outer-loop vectorization. */ |
| if (loop->inner) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| fprintf (vect_dump, "versioning not yet supported for outer-loops."); |
| return false; |
| } |
| |
| VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr); |
| return true; |
| } |
| |
| /* Function vect_analyze_data_ref_dependence. |
| |
| Return TRUE if there (might) exist a dependence between a memory-reference |
| DRA and a memory-reference DRB. When versioning for alias may check a |
| dependence at run-time, return FALSE. */ |
| |
| static bool |
| vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, |
| loop_vec_info loop_vinfo) |
| { |
| unsigned int i; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra)))); |
| int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb)))); |
| lambda_vector dist_v; |
| unsigned int loop_depth; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| { |
| /* Independent data accesses. */ |
| vect_check_interleaving (dra, drb); |
| return false; |
| } |
| |
| if ((DR_IS_READ (dra) && DR_IS_READ (drb)) || dra == drb) |
| return false; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, |
| "versioning for alias required: can't determine dependence between "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| if (DDR_NUM_DIST_VECTS (ddr) == 0) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "versioning for alias required: bad dist vector for "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); |
| for (i = 0; VEC_iterate (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v); i++) |
| { |
| int dist = dist_v[loop_depth]; |
| |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| fprintf (vect_dump, "dependence distance = %d.", dist); |
| |
| /* Same loop iteration. */ |
| if (dist % vectorization_factor == 0 && dra_size == drb_size) |
| { |
| /* Two references with distance zero have the same alignment. */ |
| VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb); |
| VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra); |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "accesses have the same alignment."); |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "dependence distance modulo vf == 0 between "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| |
| /* For interleaving, mark that there is a read-write dependency if |
| necessary. We check before that one of the data-refs is store. */ |
| if (DR_IS_READ (dra)) |
| DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true; |
| else |
| { |
| if (DR_IS_READ (drb)) |
| DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true; |
| } |
| |
| continue; |
| } |
| |
| if (abs (dist) >= vectorization_factor |
| || (dist > 0 && DDR_REVERSED_P (ddr))) |
| { |
| /* Dependence distance does not create dependence, as far as |
| vectorization is concerned, in this case. If DDR_REVERSED_P the |
| order of the data-refs in DDR was reversed (to make distance |
| vector positive), and the actual distance is negative. */ |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| fprintf (vect_dump, "dependence distance >= VF or negative."); |
| continue; |
| } |
| |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| fprintf (vect_dump, |
| "not vectorized, possible dependence " |
| "between data-refs "); |
| print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM); |
| } |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Function vect_analyze_data_ref_dependences. |
| |
| Examine all the data references in the loop, and make sure there do not |
| exist any data dependences between them. */ |
| |
| static bool |
| vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo) |
| { |
| unsigned int i; |
| VEC (ddr_p, heap) * ddrs = LOOP_VINFO_DDRS (loop_vinfo); |
| struct data_dependence_relation *ddr; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_analyze_dependences ==="); |
| |
| for (i = 0; VEC_iterate (ddr_p, ddrs, i, ddr); i++) |
| if (vect_analyze_data_ref_dependence (ddr, loop_vinfo)) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_compute_data_ref_alignment |
| |
| Compute the misalignment of the data reference DR. |
| |
| Output: |
| 1. If during the misalignment computation it is found that the data reference |
| cannot be vectorized then false is returned. |
| 2. DR_MISALIGNMENT (DR) is defined. |
| |
| FOR NOW: No analysis is actually performed. Misalignment is calculated |
| only for trivial cases. TODO. */ |
| |
| static bool |
| vect_compute_data_ref_alignment (struct data_reference *dr) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| tree ref = DR_REF (dr); |
| tree vectype; |
| tree base, base_addr; |
| bool base_aligned; |
| tree misalign; |
| tree aligned_to, alignment; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "vect_compute_data_ref_alignment:"); |
| |
| /* Initialize misalignment to unknown. */ |
| SET_DR_MISALIGNMENT (dr, -1); |
| |
| misalign = DR_INIT (dr); |
| aligned_to = DR_ALIGNED_TO (dr); |
| base_addr = DR_BASE_ADDRESS (dr); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| /* In case the dataref is in an inner-loop of the loop that is being |
| vectorized (LOOP), we use the base and misalignment information |
| relative to the outer-loop (LOOP). This is ok only if the misalignment |
| stays the same throughout the execution of the inner-loop, which is why |
| we have to check that the stride of the dataref in the inner-loop evenly |
| divides by the vector size. */ |
| if (nested_in_vect_loop_p (loop, stmt)) |
| { |
| tree step = DR_STEP (dr); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| |
| if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) |
| { |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "inner step divides the vector-size."); |
| misalign = STMT_VINFO_DR_INIT (stmt_info); |
| aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info); |
| base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info); |
| } |
| else |
| { |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "inner step doesn't divide the vector-size."); |
| misalign = NULL_TREE; |
| } |
| } |
| |
| base = build_fold_indirect_ref (base_addr); |
| alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT); |
| |
| if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0) |
| || !misalign) |
| { |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| { |
| fprintf (vect_dump, "Unknown alignment for access: "); |
| print_generic_expr (vect_dump, base, TDF_SLIM); |
| } |
| return true; |
| } |
| |
| if ((DECL_P (base) |
| && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)), |
| alignment) >= 0) |
| || (TREE_CODE (base_addr) == SSA_NAME |
| && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE ( |
| TREE_TYPE (base_addr)))), |
| alignment) >= 0)) |
| base_aligned = true; |
| else |
| base_aligned = false; |
| |
| if (!base_aligned) |
| { |
| /* Do not change the alignment of global variables if |
| flag_section_anchors is enabled. */ |
| if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)) |
| || (TREE_STATIC (base) && flag_section_anchors)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "can't force alignment of ref: "); |
| print_generic_expr (vect_dump, ref, TDF_SLIM); |
| } |
| return true; |
| } |
| |
| /* Force the alignment of the decl. |
| NOTE: This is the only change to the code we make during |
| the analysis phase, before deciding to vectorize the loop. */ |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "force alignment"); |
| DECL_ALIGN (base) = TYPE_ALIGN (vectype); |
| DECL_USER_ALIGN (base) = 1; |
| } |
| |
| /* At this point we assume that the base is aligned. */ |
| gcc_assert (base_aligned |
| || (TREE_CODE (base) == VAR_DECL |
| && DECL_ALIGN (base) >= TYPE_ALIGN (vectype))); |
| |
| /* Modulo alignment. */ |
| misalign = size_binop (TRUNC_MOD_EXPR, misalign, alignment); |
| |
| if (!host_integerp (misalign, 1)) |
| { |
| /* Negative or overflowed misalignment value. */ |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "unexpected misalign value"); |
| return false; |
| } |
| |
| SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign)); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr)); |
| print_generic_expr (vect_dump, ref, TDF_SLIM); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Function vect_compute_data_refs_alignment |
| |
| Compute the misalignment of data references in the loop. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| static bool |
| vect_compute_data_refs_alignment (loop_vec_info loop_vinfo) |
| { |
| VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct data_reference *dr; |
| unsigned int i; |
| |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| if (!vect_compute_data_ref_alignment (dr)) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_update_misalignment_for_peel |
| |
| DR - the data reference whose misalignment is to be adjusted. |
| DR_PEEL - the data reference whose misalignment is being made |
| zero in the vector loop by the peel. |
| NPEEL - the number of iterations in the peel loop if the misalignment |
| of DR_PEEL is known at compile time. */ |
| |
| static void |
| vect_update_misalignment_for_peel (struct data_reference *dr, |
| struct data_reference *dr_peel, int npeel) |
| { |
| unsigned int i; |
| VEC(dr_p,heap) *same_align_drs; |
| struct data_reference *current_dr; |
| int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); |
| int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel)))); |
| stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); |
| stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel)); |
| |
| /* For interleaved data accesses the step in the loop must be multiplied by |
| the size of the interleaving group. */ |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) |
| dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info))); |
| if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info)) |
| dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info); |
| |
| /* It can be assumed that the data refs with the same alignment as dr_peel |
| are aligned in the vector loop. */ |
| same_align_drs |
| = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel))); |
| for (i = 0; VEC_iterate (dr_p, same_align_drs, i, current_dr); i++) |
| { |
| if (current_dr != dr) |
| continue; |
| gcc_assert (DR_MISALIGNMENT (dr) / dr_size == |
| DR_MISALIGNMENT (dr_peel) / dr_peel_size); |
| SET_DR_MISALIGNMENT (dr, 0); |
| return; |
| } |
| |
| if (known_alignment_for_access_p (dr) |
| && known_alignment_for_access_p (dr_peel)) |
| { |
| int misal = DR_MISALIGNMENT (dr); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| misal += npeel * dr_size; |
| misal %= GET_MODE_SIZE (TYPE_MODE (vectype)); |
| SET_DR_MISALIGNMENT (dr, misal); |
| return; |
| } |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Setting misalignment to -1."); |
| SET_DR_MISALIGNMENT (dr, -1); |
| } |
| |
| |
| /* Function vect_verify_datarefs_alignment |
| |
| Return TRUE if all data references in the loop can be |
| handled with respect to alignment. */ |
| |
| static bool |
| vect_verify_datarefs_alignment (loop_vec_info loop_vinfo) |
| { |
| VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct data_reference *dr; |
| enum dr_alignment_support supportable_dr_alignment; |
| unsigned int i; |
| |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| |
| /* For interleaving, only the alignment of the first access matters. */ |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info) |
| && DR_GROUP_FIRST_DR (stmt_info) != stmt) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr); |
| if (!supportable_dr_alignment) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| { |
| if (DR_IS_READ (dr)) |
| fprintf (vect_dump, |
| "not vectorized: unsupported unaligned load."); |
| else |
| fprintf (vect_dump, |
| "not vectorized: unsupported unaligned store."); |
| } |
| return false; |
| } |
| if (supportable_dr_alignment != dr_aligned |
| && vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "Vectorizing an unaligned access."); |
| } |
| return true; |
| } |
| |
| |
| /* Function vector_alignment_reachable_p |
| |
| Return true if vector alignment for DR is reachable by peeling |
| a few loop iterations. Return false otherwise. */ |
| |
| static bool |
| vector_alignment_reachable_p (struct data_reference *dr) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) |
| { |
| /* For interleaved access we peel only if number of iterations in |
| the prolog loop ({VF - misalignment}), is a multiple of the |
| number of the interleaved accesses. */ |
| int elem_size, mis_in_elements; |
| int nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| /* FORNOW: handle only known alignment. */ |
| if (!known_alignment_for_access_p (dr)) |
| return false; |
| |
| elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements; |
| mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; |
| |
| if ((nelements - mis_in_elements) % DR_GROUP_SIZE (stmt_info)) |
| return false; |
| } |
| |
| /* If misalignment is known at the compile time then allow peeling |
| only if natural alignment is reachable through peeling. */ |
| if (known_alignment_for_access_p (dr) && !aligned_access_p (dr)) |
| { |
| HOST_WIDE_INT elmsize = |
| int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize); |
| fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr)); |
| } |
| if (DR_MISALIGNMENT (dr) % elmsize) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "data size does not divide the misalignment.\n"); |
| return false; |
| } |
| } |
| |
| if (!known_alignment_for_access_p (dr)) |
| { |
| tree type = (TREE_TYPE (DR_REF (dr))); |
| tree ba = DR_BASE_OBJECT (dr); |
| bool is_packed = false; |
| |
| if (ba) |
| is_packed = contains_packed_reference (ba); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed); |
| if (targetm.vectorize.vector_alignment_reachable (type, is_packed)) |
| return true; |
| else |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_enhance_data_refs_alignment |
| |
| This pass will use loop versioning and loop peeling in order to enhance |
| the alignment of data references in the loop. |
| |
| FOR NOW: we assume that whatever versioning/peeling takes place, only the |
| original loop is to be vectorized; Any other loops that are created by |
| the transformations performed in this pass - are not supposed to be |
| vectorized. This restriction will be relaxed. |
| |
| This pass will require a cost model to guide it whether to apply peeling |
| or versioning or a combination of the two. For example, the scheme that |
| intel uses when given a loop with several memory accesses, is as follows: |
| choose one memory access ('p') which alignment you want to force by doing |
| peeling. Then, either (1) generate a loop in which 'p' is aligned and all |
| other accesses are not necessarily aligned, or (2) use loop versioning to |
| generate one loop in which all accesses are aligned, and another loop in |
| which only 'p' is necessarily aligned. |
| |
| ("Automatic Intra-Register Vectorization for the Intel Architecture", |
| Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International |
| Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) |
| |
| Devising a cost model is the most critical aspect of this work. It will |
| guide us on which access to peel for, whether to use loop versioning, how |
| many versions to create, etc. The cost model will probably consist of |
| generic considerations as well as target specific considerations (on |
| powerpc for example, misaligned stores are more painful than misaligned |
| loads). |
| |
| Here are the general steps involved in alignment enhancements: |
| |
| -- original loop, before alignment analysis: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = unknown |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- After vect_compute_data_refs_alignment: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 1: we do loop versioning: |
| if (p is aligned) { |
| for (i=0; i<N; i++){ # loop 1A |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i=0; i<N; i++){ # loop 1B |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| -- Possibility 2: we do loop peeling: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| for (i = 3; i < N; i++){ # loop 2A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 3: combination of loop peeling and versioning: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| if (p is aligned) { |
| for (i = 3; i<N; i++){ # loop 3A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i = 3; i<N; i++){ # loop 3B |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| These loops are later passed to loop_transform to be vectorized. The |
| vectorizer will use the alignment information to guide the transformation |
| (whether to generate regular loads/stores, or with special handling for |
| misalignment). */ |
| |
| static bool |
| vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) |
| { |
| VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| enum dr_alignment_support supportable_dr_alignment; |
| struct data_reference *dr0 = NULL; |
| struct data_reference *dr; |
| unsigned int i; |
| bool do_peeling = false; |
| bool do_versioning = false; |
| bool stat; |
| gimple stmt; |
| stmt_vec_info stmt_info; |
| int vect_versioning_for_alias_required; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_enhance_data_refs_alignment ==="); |
| |
| /* While cost model enhancements are expected in the future, the high level |
| view of the code at this time is as follows: |
| |
| A) If there is a misaligned write then see if peeling to align this write |
| can make all data references satisfy vect_supportable_dr_alignment. |
| If so, update data structures as needed and return true. Note that |
| at this time vect_supportable_dr_alignment is known to return false |
| for a misaligned write. |
| |
| B) If peeling wasn't possible and there is a data reference with an |
| unknown misalignment that does not satisfy vect_supportable_dr_alignment |
| then see if loop versioning checks can be used to make all data |
| references satisfy vect_supportable_dr_alignment. If so, update |
| data structures as needed and return true. |
| |
| C) If neither peeling nor versioning were successful then return false if |
| any data reference does not satisfy vect_supportable_dr_alignment. |
| |
| D) Return true (all data references satisfy vect_supportable_dr_alignment). |
| |
| Note, Possibility 3 above (which is peeling and versioning together) is not |
| being done at this time. */ |
| |
| /* (1) Peeling to force alignment. */ |
| |
| /* (1.1) Decide whether to perform peeling, and how many iterations to peel: |
| Considerations: |
| + How many accesses will become aligned due to the peeling |
| - How many accesses will become unaligned due to the peeling, |
| and the cost of misaligned accesses. |
| - The cost of peeling (the extra runtime checks, the increase |
| in code size). |
| |
| The scheme we use FORNOW: peel to force the alignment of the first |
| misaligned store in the loop. |
| Rationale: misaligned stores are not yet supported. |
| |
| TODO: Use a cost model. */ |
| |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info) |
| && DR_GROUP_FIRST_DR (stmt_info) != stmt) |
| continue; |
| |
| if (!DR_IS_READ (dr) && !aligned_access_p (dr)) |
| { |
| do_peeling = vector_alignment_reachable_p (dr); |
| if (do_peeling) |
| dr0 = dr; |
| if (!do_peeling && vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "vector alignment may not be reachable"); |
| break; |
| } |
| } |
| |
| vect_versioning_for_alias_required = |
| (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)) > 0); |
| |
| /* Temporarily, if versioning for alias is required, we disable peeling |
| until we support peeling and versioning. Often peeling for alignment |
| will require peeling for loop-bound, which in turn requires that we |
| know how to adjust the loop ivs after the loop. */ |
| if (vect_versioning_for_alias_required |
| || !vect_can_advance_ivs_p (loop_vinfo) |
| || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) |
| do_peeling = false; |
| |
| if (do_peeling) |
| { |
| int mis; |
| int npeel = 0; |
| gimple stmt = DR_STMT (dr0); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| int nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| if (known_alignment_for_access_p (dr0)) |
| { |
| /* Since it's known at compile time, compute the number of iterations |
| in the peeled loop (the peeling factor) for use in updating |
| DR_MISALIGNMENT values. The peeling factor is the vectorization |
| factor minus the misalignment as an element count. */ |
| mis = DR_MISALIGNMENT (dr0); |
| mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0)))); |
| npeel = nelements - mis; |
| |
| /* For interleaved data access every iteration accesses all the |
| members of the group, therefore we divide the number of iterations |
| by the group size. */ |
| stmt_info = vinfo_for_stmt (DR_STMT (dr0)); |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) |
| npeel /= DR_GROUP_SIZE (stmt_info); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Try peeling by %d", npeel); |
| } |
| |
| /* Ensure that all data refs can be vectorized after the peel. */ |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| { |
| int save_misalignment; |
| |
| if (dr == dr0) |
| continue; |
| |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_STRIDED_ACCESS (stmt_info) |
| && DR_GROUP_FIRST_DR (stmt_info) != stmt) |
| continue; |
| |
| save_misalignment = DR_MISALIGNMENT (dr); |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr); |
| SET_DR_MISALIGNMENT (dr, save_misalignment); |
| |
| if (!supportable_dr_alignment) |
| { |
| do_peeling = false; |
| break; |
| } |
| } |
| |
| if (do_peeling) |
| { |
| /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i. |
| If the misalignment of DR_i is identical to that of dr0 then set |
| DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and |
| dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i) |
| by the peeling factor times the element size of DR_i (MOD the |
| vectorization factor times the size). Otherwise, the |
| misalignment of DR_i must be set to unknown. */ |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| if (dr != dr0) |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| |
| LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0; |
| LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0); |
| SET_DR_MISALIGNMENT (dr0, 0); |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "Alignment of access forced using peeling."); |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Peeling for alignment will be applied."); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| gcc_assert (stat); |
| return stat; |
| } |
| } |
| |
| |
| /* (2) Versioning to force alignment. */ |
| |
| /* Try versioning if: |
| 1) flag_tree_vect_loop_version is TRUE |
| 2) optimize loop for speed |
| 3) there is at least one unsupported misaligned data ref with an unknown |
| misalignment, and |
| 4) all misaligned data refs with a known misalignment are supported, and |
| 5) the number of runtime alignment checks is within reason. */ |
| |
| do_versioning = |
| flag_tree_vect_loop_version |
| && optimize_loop_nest_for_speed_p (loop) |
| && (!loop->inner); /* FORNOW */ |
| |
| if (do_versioning) |
| { |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (aligned_access_p (dr) |
| || (STMT_VINFO_STRIDED_ACCESS (stmt_info) |
| && DR_GROUP_FIRST_DR (stmt_info) != stmt)) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr); |
| |
| if (!supportable_dr_alignment) |
| { |
| gimple stmt; |
| int mask; |
| tree vectype; |
| |
| if (known_alignment_for_access_p (dr) |
| || VEC_length (gimple, |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) |
| >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)) |
| { |
| do_versioning = false; |
| break; |
| } |
| |
| stmt = DR_STMT (dr); |
| vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); |
| gcc_assert (vectype); |
| |
| /* The rightmost bits of an aligned address must be zeros. |
| Construct the mask needed for this test. For example, |
| GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the |
| mask must be 15 = 0xf. */ |
| mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; |
| |
| /* FORNOW: use the same mask to test all potentially unaligned |
| references in the loop. The vectorizer currently supports |
| a single vector size, see the reference to |
| GET_MODE_NUNITS (TYPE_MODE (vectype)) where the |
| vectorization factor is computed. */ |
| gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo) |
| || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask); |
| LOOP_VINFO_PTR_MASK (loop_vinfo) = mask; |
| VEC_safe_push (gimple, heap, |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), |
| DR_STMT (dr)); |
| } |
| } |
| |
| /* Versioning requires at least one misaligned data reference. */ |
| if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) == 0) |
| do_versioning = false; |
| else if (!do_versioning) |
| VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0); |
| } |
| |
| if (do_versioning) |
| { |
| VEC(gimple,heap) *may_misalign_stmts |
| = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); |
| gimple stmt; |
| |
| /* It can now be assumed that the data references in the statements |
| in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version |
| of the loop being vectorized. */ |
| for (i = 0; VEC_iterate (gimple, may_misalign_stmts, i, stmt); i++) |
| { |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| dr = STMT_VINFO_DATA_REF (stmt_info); |
| SET_DR_MISALIGNMENT (dr, 0); |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "Alignment of access forced using versioning."); |
| } |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Versioning for alignment will be applied."); |
| |
| /* Peeling and versioning can't be done together at this time. */ |
| gcc_assert (! (do_peeling && do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| gcc_assert (stat); |
| return stat; |
| } |
| |
| /* This point is reached if neither peeling nor versioning is being done. */ |
| gcc_assert (! (do_peeling || do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| return stat; |
| } |
| |
| |
| /* Function vect_analyze_data_refs_alignment |
| |
| Analyze the alignment of the data-references in the loop. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| static bool |
| vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_analyze_data_refs_alignment ==="); |
| |
| if (!vect_compute_data_refs_alignment (loop_vinfo)) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, |
| "not vectorized: can't calculate alignment for data ref."); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze groups of strided accesses: check that DR belongs to a group of |
| strided accesses of legal size, step, etc. Detect gaps, single element |
| interleaving, and other special cases. Set strided access info. |
| Collect groups of strided stores for further use in SLP analysis. */ |
| |
| static bool |
| vect_analyze_group_access (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| HOST_WIDE_INT stride; |
| bool slp_impossible = false; |
| |
| /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the |
| interleaving group (including gaps). */ |
| stride = dr_step / type_size; |
| |
| /* Not consecutive access is possible only if it is a part of interleaving. */ |
| if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt))) |
| { |
| /* Check if it this DR is a part of interleaving, and is a single |
| element of the group that is accessed in the loop. */ |
| |
| /* Gaps are supported only for loads. STEP must be a multiple of the type |
| size. The size of the group must be a power of 2. */ |
| if (DR_IS_READ (dr) |
| && (dr_step % type_size) == 0 |
| && stride > 0 |
| && exact_log2 (stride) != -1) |
| { |
| DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = stmt; |
| DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride; |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "Detected single element interleaving %d ", |
| DR_GROUP_SIZE (vinfo_for_stmt (stmt))); |
| print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM); |
| fprintf (vect_dump, " step "); |
| print_generic_expr (vect_dump, step, TDF_SLIM); |
| } |
| return true; |
| } |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "not consecutive access"); |
| return false; |
| } |
| |
| if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) == stmt) |
| { |
| /* First stmt in the interleaving chain. Check the chain. */ |
| gimple next = DR_GROUP_NEXT_DR (vinfo_for_stmt (stmt)); |
| struct data_reference *data_ref = dr; |
| unsigned int count = 1; |
| tree next_step; |
| tree prev_init = DR_INIT (data_ref); |
| gimple prev = stmt; |
| HOST_WIDE_INT diff, count_in_bytes, gaps = 0; |
| |
| while (next) |
| { |
| /* Skip same data-refs. In case that two or more stmts share data-ref |
| (supported only for loads), we vectorize only the first stmt, and |
| the rest get their vectorized loads from the first one. */ |
| if (!tree_int_cst_compare (DR_INIT (data_ref), |
| DR_INIT (STMT_VINFO_DATA_REF ( |
| vinfo_for_stmt (next))))) |
| { |
| if (!DR_IS_READ (data_ref)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Two store stmts share the same dr."); |
| return false; |
| } |
| |
| /* Check that there is no load-store dependencies for this loads |
| to prevent a case of load-store-load to the same location. */ |
| if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next)) |
| || DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev))) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, |
| "READ_WRITE dependence in interleaving."); |
| return false; |
| } |
| |
| /* For load use the same data-ref load. */ |
| DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; |
| |
| prev = next; |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next)); |
| continue; |
| } |
| prev = next; |
| |
| /* Check that all the accesses have the same STEP. */ |
| next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (step, next_step)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "not consecutive access in interleaving"); |
| return false; |
| } |
| |
| data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next)); |
| /* Check that the distance between two accesses is equal to the type |
| size. Otherwise, we have gaps. */ |
| diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) |
| - TREE_INT_CST_LOW (prev_init)) / type_size; |
| if (diff != 1) |
| { |
| /* FORNOW: SLP of accesses with gaps is not supported. */ |
| slp_impossible = true; |
| if (!DR_IS_READ (data_ref)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "interleaved store with gaps"); |
| return false; |
| } |
| |
| gaps += diff - 1; |
| } |
| |
| /* Store the gap from the previous member of the group. If there is no |
| gap in the access, DR_GROUP_GAP is always 1. */ |
| DR_GROUP_GAP (vinfo_for_stmt (next)) = diff; |
| |
| prev_init = DR_INIT (data_ref); |
| next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next)); |
| /* Count the number of data-refs in the chain. */ |
| count++; |
| } |
| |
| /* COUNT is the number of accesses found, we multiply it by the size of |
| the type to get COUNT_IN_BYTES. */ |
| count_in_bytes = type_size * count; |
| |
| /* Check that the size of the interleaving (including gaps) is not greater |
| than STEP. */ |
| if (dr_step && dr_step < count_in_bytes + gaps * type_size) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "interleaving size is greater than step for "); |
| print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM); |
| } |
| return false; |
| } |
| |
| /* Check that the size of the interleaving is equal to STEP for stores, |
| i.e., that there are no gaps. */ |
| if (dr_step != count_in_bytes) |
| { |
| if (DR_IS_READ (dr)) |
| { |
| slp_impossible = true; |
| /* There is a gap after the last load in the group. This gap is a |
| difference between the stride and the number of elements. When |
| there is no gap, this difference should be 0. */ |
| DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count; |
| } |
| else |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "interleaved store with gaps"); |
| return false; |
| } |
| } |
| |
| /* Check that STEP is a multiple of type size. */ |
| if ((dr_step % type_size) != 0) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "step is not a multiple of type size: step "); |
| print_generic_expr (vect_dump, step, TDF_SLIM); |
| fprintf (vect_dump, " size "); |
| print_generic_expr (vect_dump, TYPE_SIZE_UNIT (scalar_type), |
| TDF_SLIM); |
| } |
| return false; |
| } |
| |
| /* FORNOW: we handle only interleaving that is a power of 2. |
| We don't fail here if it may be still possible to vectorize the |
| group using SLP. If not, the size of the group will be checked in |
| vect_analyze_operations, and the vectorization will fail. */ |
| if (exact_log2 (stride) == -1) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "interleaving is not a power of 2"); |
| |
| if (slp_impossible) |
| return false; |
| } |
| DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride; |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Detected interleaving of size %d", (int)stride); |
| |
| /* SLP: create an SLP data structure for every interleaving group of |
| stores for further analysis in vect_analyse_slp. */ |
| if (!DR_IS_READ (dr) && !slp_impossible) |
| VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo), stmt); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze the access pattern of the data-reference DR. |
| In case of non-consecutive accesses call vect_analyze_group_access() to |
| analyze groups of strided accesses. */ |
| |
| static bool |
| vect_analyze_data_ref_access (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| |
| if (!step) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "bad data-ref access"); |
| return false; |
| } |
| |
| /* Don't allow invariant accesses. */ |
| if (dr_step == 0) |
| return false; |
| |
| if (nested_in_vect_loop_p (loop, stmt)) |
| { |
| /* Interleaved accesses are not yet supported within outer-loop |
| vectorization for references in the inner-loop. */ |
| DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL; |
| |
| /* For the rest of the analysis we use the outer-loop step. */ |
| step = STMT_VINFO_DR_STEP (stmt_info); |
| dr_step = TREE_INT_CST_LOW (step); |
| |
| if (dr_step == 0) |
| { |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "zero step in outer loop."); |
| if (DR_IS_READ (dr)) |
| return true; |
| else |
| return false; |
| } |
| } |
| |
| /* Consecutive? */ |
| if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))) |
| { |
| /* Mark that it is not interleaving. */ |
| DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL; |
| return true; |
| } |
| |
| if (nested_in_vect_loop_p (loop, stmt)) |
| { |
| if (vect_print_dump_info (REPORT_ALIGNMENT)) |
| fprintf (vect_dump, "strided access in outer loop."); |
| return false; |
| } |
| |
| /* Not consecutive access - check if it's a part of interleaving group. */ |
| return vect_analyze_group_access (dr); |
| } |
| |
| |
| /* Function vect_analyze_data_ref_accesses. |
| |
| Analyze the access pattern of all the data references in the loop. |
| |
| FORNOW: the only access pattern that is considered vectorizable is a |
| simple step 1 (consecutive) access. |
| |
| FORNOW: handle only arrays and pointer accesses. */ |
| |
| static bool |
| vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo) |
| { |
| unsigned int i; |
| VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct data_reference *dr; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_analyze_data_ref_accesses ==="); |
| |
| for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) |
| if (!vect_analyze_data_ref_access (dr)) |
| { |
| if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)) |
| fprintf (vect_dump, "not vectorized: complicated access pattern."); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_prune_runtime_alias_test_list. |
| |
| Prune a list of ddrs to be tested at run-time by versioning for alias. |
| Return FALSE if resulting list of ddrs is longer then allowed by |
| PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */ |
| |
| static bool |
| vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) |
| { |
| VEC (ddr_p, heap) * ddrs = |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); |
| unsigned i, j; |
| |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "=== vect_prune_runtime_alias_test_list ==="); |
| |
| for (i = 0; i < VEC_length (ddr_p, ddrs); ) |
| { |
| bool found; |
| ddr_p ddr_i; |
| |
| ddr_i = VEC_index (ddr_p, ddrs, i); |
| found = false; |
| |
| for (j = 0; j < i; j++) |
| { |
| ddr_p ddr_j = VEC_index (ddr_p, ddrs, j); |
| |
| if (vect_vfa_range_equal (ddr_i, ddr_j)) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, "found equal ranges "); |
| print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_i)), TDF_SLIM); |
| fprintf (vect_dump, ", "); |
| print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_i)), TDF_SLIM); |
| fprintf (vect_dump, " and "); |
| print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_j)), TDF_SLIM); |
| fprintf (vect_dump, ", "); |
| print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_j)), TDF_SLIM); |
| } |
| found = true; |
| break; |
| } |
| } |
| |
| if (found) |
| { |
| VEC_ordered_remove (ddr_p, ddrs, i); |
| continue; |
| } |
| i++; |
| } |
| |
| if (VEC_length (ddr_p, ddrs) > |
| (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)) |
| { |
| if (vect_print_dump_info (REPORT_DR_DETAILS)) |
| { |
| fprintf (vect_dump, |
| "disable versioning for alias - max number of generated " |
| "checks exceeded."); |
| } |
| |
| VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0); |
| |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Recursively free the memory allocated for the SLP tree rooted at NODE. */ |
| |
| static void |
| vect_free_slp_tree (slp_tree node) |
| { |
| if (!node) |
| return; |
| |
| if (SLP_TREE_LEFT (node)) |
| vect_free_slp_tree (SLP_TREE_LEFT (node)); |
| |
| if (SLP_TREE_RIGHT (node)) |
| vect_free_slp_tree (SLP_TREE_RIGHT (node)); |
| |
| VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node)); |
| |
| if (SLP_TREE_VEC_STMTS (node)) |
| VEC_free (gimple, heap, SLP_TREE_VEC_STMTS (node)); |
| |
| free (node); |
| } |
| |
| |
| /* Free the memory allocated for the SLP instance. */ |
| |
| void |
| vect_free_slp_instance (slp_instance instance) |
| { |
| vect_free_slp_tree (SLP_INSTANCE_TREE (instance)); |
| VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (instance)); |
| VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (instance)); |
| } |
| |
| |
| /* Get the defs for the rhs of STMT (collect them in DEF_STMTS0/1), check that |
| they are of a legal type and that they match the defs of the first stmt of |
| the SLP group (stored in FIRST_STMT_...). */ |
| |
| static bool |
| vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, slp_tree slp_node, |
| gimple stmt, VEC (gimple, heap) **def_stmts0, |
| VEC (gimple, heap) **def_stmts1, |
| enum vect_def_type *first_stmt_dt0, |
| enum vect_def_type *first_stmt_dt1, |
| tree *first_stmt_def0_type, |
| tree *first_stmt_def1_type, |
| tree *first_stmt_const_oprnd, |
| int ncopies_for_cost, |
| bool *pattern0, bool *pattern1) |
| { |
| tree oprnd; |
| unsigned int i, number_of_oprnds; |
| tree def; |
| gimple def_stmt; |
| enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
| stmt_vec_info stmt_info = |
| vinfo_for_stmt (VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0)); |
| enum gimple_rhs_class rhs_class; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| rhs_class = get_gimple_rhs_class (gimple_assign_rhs_code (stmt)); |
| number_of_oprnds = gimple_num_ops (stmt) - 1; /* RHS only */ |
| |
| for (i = 0; i < number_of_oprnds; i++) |
| { |
| oprnd = gimple_op (stmt, i + 1); |
| |
| if (!vect_is_simple_use (oprnd, loop_vinfo, &def_stmt, &def, &dt[i]) |
| || (!def_stmt && dt[i] != vect_constant_def)) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: can't find def for "); |
| print_generic_expr (vect_dump, oprnd, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| /* Check if DEF_STMT is a part of a pattern and get the def stmt from |
| the pattern. Check that all the stmts of the node are in the |
| pattern. */ |
| if (def_stmt && gimple_bb (def_stmt) |
| && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) |
| && vinfo_for_stmt (def_stmt) |
| && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))) |
| { |
| if (!*first_stmt_dt0) |
| *pattern0 = true; |
| else |
| { |
| if (i == 1 && !*first_stmt_dt1) |
| *pattern1 = true; |
| else if ((i == 0 && !*pattern0) || (i == 1 && !*pattern1)) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "Build SLP failed: some of the stmts" |
| " are in a pattern, and others are not "); |
| print_generic_expr (vect_dump, oprnd, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| } |
| |
| def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)); |
| dt[i] = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)); |
| |
| if (*dt == vect_unknown_def_type) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "Unsupported pattern."); |
| return false; |
| } |
| |
| switch (gimple_code (def_stmt)) |
| { |
| case GIMPLE_PHI: |
| def = gimple_phi_result (def_stmt); |
| break; |
| |
| case GIMPLE_ASSIGN: |
| def = gimple_assign_lhs (def_stmt); |
| break; |
| |
| default: |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| fprintf (vect_dump, "unsupported defining stmt: "); |
| return false; |
| } |
| } |
| |
| if (!*first_stmt_dt0) |
| { |
| /* op0 of the first stmt of the group - store its info. */ |
| *first_stmt_dt0 = dt[i]; |
| if (def) |
| *first_stmt_def0_type = TREE_TYPE (def); |
| else |
| *first_stmt_const_oprnd = oprnd; |
| |
| /* Analyze costs (for the first stmt of the group only). */ |
| if (rhs_class != GIMPLE_SINGLE_RHS) |
| /* Not memory operation (we don't call this functions for loads). */ |
| vect_model_simple_cost (stmt_info, ncopies_for_cost, dt, slp_node); |
| else |
| /* Store. */ |
| vect_model_store_cost (stmt_info, ncopies_for_cost, dt[0], slp_node); |
| } |
| |
| else |
| { |
| if (!*first_stmt_dt1 && i == 1) |
| { |
| /* op1 of the first stmt of the group - store its info. */ |
| *first_stmt_dt1 = dt[i]; |
| if (def) |
| *first_stmt_def1_type = TREE_TYPE (def); |
| else |
| { |
| /* We assume that the stmt contains only one constant |
| operand. We fail otherwise, to be on the safe side. */ |
| if (*first_stmt_const_oprnd) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "Build SLP failed: two constant " |
| "oprnds in stmt"); |
| return false; |
| } |
| *first_stmt_const_oprnd = oprnd; |
| } |
| } |
| else |
| { |
| /* Not first stmt of the group, check that the def-stmt/s match |
| the def-stmt/s of the first stmt. */ |
| if ((i == 0 |
| && (*first_stmt_dt0 != dt[i] |
| || (*first_stmt_def0_type && def |
| && *first_stmt_def0_type != TREE_TYPE (def)))) |
| || (i == 1 |
| && (*first_stmt_dt1 != dt[i] |
| || (*first_stmt_def1_type && def |
| && *first_stmt_def1_type != TREE_TYPE (def)))) |
| || (!def |
| && TREE_TYPE (*first_stmt_const_oprnd) |
| != TREE_TYPE (oprnd))) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "Build SLP failed: different types "); |
| |
| return false; |
| } |
| } |
| } |
| |
| /* Check the types of the definitions. */ |
| switch (dt[i]) |
| { |
| case vect_constant_def: |
| case vect_invariant_def: |
| break; |
| |
| case vect_loop_def: |
| if (i == 0) |
| VEC_safe_push (gimple, heap, *def_stmts0, def_stmt); |
| else |
| VEC_safe_push (gimple, heap, *def_stmts1, def_stmt); |
| break; |
| |
| default: |
| /* FORNOW: Not supported. */ |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: illegal type of def "); |
| print_generic_expr (vect_dump, def, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| |
| /* Recursively build an SLP tree starting from NODE. |
| Fail (and return FALSE) if def-stmts are not isomorphic, require data |
| permutation or are of unsupported types of operation. Otherwise, return |
| TRUE. */ |
| |
| static bool |
| vect_build_slp_tree (loop_vec_info loop_vinfo, slp_tree *node, |
| unsigned int group_size, |
| int *inside_cost, int *outside_cost, |
| int ncopies_for_cost, unsigned int *max_nunits, |
| VEC (int, heap) **load_permutation, |
| VEC (slp_tree, heap) **loads) |
| { |
| VEC (gimple, heap) *def_stmts0 = VEC_alloc (gimple, heap, group_size); |
| VEC (gimple, heap) *def_stmts1 = VEC_alloc (gimple, heap, group_size); |
| unsigned int i; |
| VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (*node); |
| gimple stmt = VEC_index (gimple, stmts, 0); |
| enum vect_def_type first_stmt_dt0 = 0, first_stmt_dt1 = 0; |
| enum tree_code first_stmt_code = 0, rhs_code; |
| tree first_stmt_def1_type = NULL_TREE, first_stmt_def0_type = NULL_TREE; |
| tree lhs; |
| bool stop_recursion = false, need_same_oprnds = false; |
| tree vectype, scalar_type, first_op1 = NULL_TREE; |
| unsigned int vectorization_factor = 0, ncopies; |
| optab optab; |
| int icode; |
| enum machine_mode optab_op2_mode; |
| enum machine_mode vec_mode; |
| tree first_stmt_const_oprnd = NULL_TREE; |
| struct data_reference *first_dr; |
| bool pattern0 = false, pattern1 = false; |
| HOST_WIDE_INT dummy; |
| bool permutation = false; |
| unsigned int load_place; |
| gimple first_load; |
| |
| /* For every stmt in NODE find its def stmt/s. */ |
| for (i = 0; VEC_iterate (gimple, stmts, i, stmt); i++) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP for "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| lhs = gimple_get_lhs (stmt); |
| if (lhs == NULL_TREE) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, |
| "Build SLP failed: not GIMPLE_ASSIGN nor GIMPLE_CALL"); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); |
| vectype = get_vectype_for_scalar_type (scalar_type); |
| if (!vectype) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: unsupported data-type "); |
| print_generic_expr (vect_dump, scalar_type, TDF_SLIM); |
| } |
| return false; |
| } |
| |
| gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); |
| vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| ncopies = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype); |
| if (ncopies > 1 && vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "SLP with multiple types "); |
| |
| /* In case of multiple types we need to detect the smallest type. */ |
| if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype)) |
| *max_nunits = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| if (is_gimple_call (stmt)) |
| rhs_code = CALL_EXPR; |
| else |
| rhs_code = gimple_assign_rhs_code (stmt); |
| |
| /* Check the operation. */ |
| if (i == 0) |
| { |
| first_stmt_code = rhs_code; |
| |
| /* Shift arguments should be equal in all the packed stmts for a |
| vector shift with scalar shift operand. */ |
| if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR |
| || rhs_code == LROTATE_EXPR |
| || rhs_code == RROTATE_EXPR) |
| { |
| vec_mode = TYPE_MODE (vectype); |
| |
| /* First see if we have a vector/vector shift. */ |
| optab = optab_for_tree_code (rhs_code, vectype, |
| optab_vector); |
| |
| if (!optab |
| || (optab->handlers[(int) vec_mode].insn_code |
| == CODE_FOR_nothing)) |
| { |
| /* No vector/vector shift, try for a vector/scalar shift. */ |
| optab = optab_for_tree_code (rhs_code, vectype, |
| optab_scalar); |
| |
| if (!optab) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "Build SLP failed: no optab."); |
| return false; |
| } |
| icode = (int) optab->handlers[(int) vec_mode].insn_code; |
| if (icode == CODE_FOR_nothing) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| fprintf (vect_dump, "Build SLP failed: " |
| "op not supported by target."); |
| return false; |
| } |
| optab_op2_mode = insn_data[icode].operand[2].mode; |
| if (!VECTOR_MODE_P (optab_op2_mode)) |
| { |
| need_same_oprnds = true; |
| first_op1 = gimple_assign_rhs2 (stmt); |
| } |
| } |
| } |
| } |
| else |
| { |
| if (first_stmt_code != rhs_code |
| && (first_stmt_code != IMAGPART_EXPR |
| || rhs_code != REALPART_EXPR) |
| && (first_stmt_code != REALPART_EXPR |
| || rhs_code != IMAGPART_EXPR)) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, |
| "Build SLP failed: different operation in stmt "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| if (need_same_oprnds |
| && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0)) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, |
| "Build SLP failed: different shift arguments in "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| } |
| |
| /* Strided store or load. */ |
| if (STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt))) |
| { |
| if (REFERENCE_CLASS_P (lhs)) |
| { |
| /* Store. */ |
| if (!vect_get_and_check_slp_defs (loop_vinfo, *node, stmt, |
| &def_stmts0, &def_stmts1, |
| &first_stmt_dt0, |
| &first_stmt_dt1, |
| &first_stmt_def0_type, |
| &first_stmt_def1_type, |
| &first_stmt_const_oprnd, |
| ncopies_for_cost, |
| &pattern0, &pattern1)) |
| return false; |
| } |
| else |
| { |
| /* Load. */ |
| /* FORNOW: Check that there is no gap between the loads. */ |
| if ((DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) == stmt |
| && DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 0) |
| || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) != stmt |
| && DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 1)) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: strided " |
| "loads have gaps "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| /* Check that the size of interleaved loads group is not |
| greater than the SLP group size. */ |
| if (DR_GROUP_SIZE (vinfo_for_stmt (stmt)) |
| > ncopies * group_size) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: the number of " |
| "interleaved loads is greater than" |
| " the SLP group size "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| first_load = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)); |
| |
| if (first_load == stmt) |
| { |
| first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)); |
| if (vect_supportable_dr_alignment (first_dr) |
| == dr_unaligned_unsupported) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: unsupported " |
| "unaligned load "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| /* Analyze costs (for the first stmt in the group). */ |
| vect_model_load_cost (vinfo_for_stmt (stmt), |
| ncopies_for_cost, *node); |
| } |
| |
| /* Store the place of this load in the interleaving chain. In |
| case that permutation is needed we later decide if a specific |
| permutation is supported. */ |
| load_place = vect_get_place_in_interleaving_chain (stmt, |
| first_load); |
| if (load_place != i) |
| permutation = true; |
| |
| VEC_safe_push (int, heap, *load_permutation, load_place); |
| |
| /* We stop the tree when we reach a group of loads. */ |
| stop_recursion = true; |
| continue; |
| } |
| } /* Strided access. */ |
| else |
| { |
| if (TREE_CODE_CLASS (rhs_code) == tcc_reference) |
| { |
| /* Not strided load. */ |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: not strided load "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| /* FORNOW: Not strided loads are not supported. */ |
| return false; |
| } |
| |
| /* Not memory operation. */ |
| if (TREE_CODE_CLASS (rhs_code) != tcc_binary |
| && TREE_CODE_CLASS (rhs_code) != tcc_unary) |
| { |
| if (vect_print_dump_info (REPORT_SLP)) |
| { |
| fprintf (vect_dump, "Build SLP failed: operation"); |
| fprintf (vect_dump, " unsupported "); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| |
| return false; |
| } |
| |
| /* Find the def-stmts. */ |
| if (!vect_get_and_check_slp_defs (loop_vinfo, *node, stmt, |
| &def_stmts0, &def_stmts1, |
| &first_stmt_dt0, &first_stmt_dt1, |
| &first_stmt_def0_type, |
| &first_stmt_def1_type, |
| &first_stmt_const_oprnd, |
| ncopies_for_cost, |
| &pattern0, &pattern1)) |
| return false; |
| } |
| } |
| |
| /* Add the costs of the node to the overall instance costs. */ |
| *inside_cost += SLP_TREE_INSIDE_OF_LOOP_COST (*node); |
| *outside_cost += SLP_TREE_OUTSIDE_OF_LOOP_COST (*node); |
| |
| /* Strided loads were reached - stop the recursion. */ |
| if (stop_recursion) |
| { |
| if (permutation) |
| { |
| VEC_safe_push (slp_tree, heap, *loads, *node); |
| *inside_cost += TARG_VEC_PERMUTE_COST * group_size; |
| } |
| |
| return true; |
| } |
| |
| /* Create SLP_TREE nodes for the definition node/s. */ |
| if (first_stmt_dt0 == vect_loop_def) |
| { |
| slp_tree left_node = XNEW (struct _slp_tree); |
| SLP_TREE_SCALAR_STMTS (left_node) = def_stmts0; |
| SLP_TREE_VEC_STMTS (left_node) = NULL; |
| SLP_TREE_LEFT (left_node) = NULL; |
| SLP_TREE_RIGHT (left_node) = NULL; |
| SLP_TREE_OUTSIDE_OF_LOOP_COST (left_node) = 0; |
| SLP_TREE_INSIDE_OF_LOOP_COST (left_node) = 0; |
| if (!vect_build_slp_tree (loop_vinfo, &left_node, group_size, |
| inside_cost, outside_cost, ncopies_for_cost, |
| max_nunits, load_permutation, loads)) |
| return false; |
| |
| SLP_TREE_LEFT (*node) = left_node; |
| } |
| |
| if (first_stmt_dt1 == vect_loop_def) |
| { |
| slp_tree right_node = XNEW (struct _slp_tree); |
| SLP_TREE_SCALAR_STMTS (right_node) = def_stmts1; |
| SLP_TREE_VEC_STMTS (right_node) = NULL; |
| SLP_TREE_LEFT (right_node) = NULL; |
| SLP_TREE_RIGHT (right_node) = NULL; |
| SLP_TREE_OUTSIDE_OF_LOOP_COST (right_node) = 0; |
| SLP_TREE_INSIDE_OF_LOOP_COST (right_node) = 0; |
| if (!vect_build_slp_tree (loop_vinfo, &right_node, group_size, |
| inside_cost, outside_cost, ncopies_for_cost, |
| max_nunits, load_permutation, loads)) |
| return false; |
| |
| SLP_TREE_RIGHT (*node) = right_node; |
| } |
| |
| return true; |
| } |
| |
| |
| static void |
| vect_print_slp_tree (slp_tree node) |
| { |
| int i; |
| gimple stmt; |
| |
| if (!node) |
| return; |
| |
| fprintf (vect_dump, "node "); |
| for (i = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt); i++) |
| { |
| fprintf (vect_dump, "\n\tstmt %d ", i); |
| print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); |
| } |
| fprintf (vect_dump, "\n"); |
| |
| vect_print_slp_tree (SLP_TREE_LEFT (node)); |
| vect_print_slp_tree (SLP_TREE_RIGHT (node)); |
| } |
| |
| |
| /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID). |
| If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index |
| J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the |
| stmts in NODE are to be marked. */ |
| |
| static void |
| vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j) |
| { |
| int i; |
| gimple stmt; |
| |
| if (!node) |
| return; |
| |
| for (i = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt); i++) |
| if (j < 0 || i == j) |
| STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark; |
| |
| vect_mark_slp_stmts (SLP_TREE_LEFT (node), mark, j); |
| vect_mark_slp_stmts (SLP_TREE_RIGHT (node), mark, j); |
| } |
| |
| |
| /* Check if the permutation required by the SLP INSTANCE is supported. |
| Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */ |
| |
| static bool |
| vect_supported_slp_permutation_p (slp_instance instance) |
| { |
| slp_tree node = VEC_index (slp_tree, SLP_INSTANCE_LOADS (instance), 0); |
| gimple stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0); |
| gimple first_load = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)); |
| VEC (slp_tree, heap) *sorted_loads = NULL; |
| int index; |
| slp_tree *tmp_loads = NULL; |
| int group_size = SLP_INSTANCE_GROUP_SIZE (instance), i, j; |
| slp_tree load; |
| |
| /* FORNOW: The only supported loads permutation is loads from the same |
| location in all the loads in the node, when the data-refs in |
| nodes of LOADS constitute an interleaving chain. |
| Sort the nodes according to the order of accesses in the chain. */ |
| tmp_loads = (slp_tree *) xmalloc (sizeof (slp_tree) * group_size); |
| for (i = 0, j = 0; |
| VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance), i, index) |
| && VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), j, load); |
| i += group_size, j++) |
| { |
| gimple scalar_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (load), 0); |
| /* Check that the loads are all in the same interleaving chain. */ |
| if (DR_GROUP_FIRST_DR (vinfo_for_stmt (scalar_stmt)) != first_load) |
| { |
| if (vect_print_dump_info (REPORT_DETAILS)) |
| { |
| fprintf (vect_dump, "Build SLP failed: unsupported data " |
| "permutation "); |
| print_gimple_stmt (vect_dump, scalar_stmt, 0, TDF_SLIM); |
| } |
| |
| free (tmp_loads); |
| return false; |
| } |
| |
| tmp_loads[index] = load; |
| } |
| |
| sorted_loads = VEC_alloc (slp_tree, heap, group_size); |
| for (i = 0; i < group_size; i++) |
| VEC_safe_push (slp_tree, heap, sorted_loads, tmp_loads[i]); |
| |
|