| /* Data References Analysis and Manipulation Utilities for Vectorization. |
| Copyright (C) 2003-2017 Free Software Foundation, Inc. |
| Contributed by Dorit Naishlos <dorit@il.ibm.com> |
| and Ira Rosen <irar@il.ibm.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "predict.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "ssa.h" |
| #include "optabs-tree.h" |
| #include "cgraph.h" |
| #include "dumpfile.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "tree-eh.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "tree-ssa-loop-ivopts.h" |
| #include "tree-ssa-loop-manip.h" |
| #include "tree-ssa-loop.h" |
| #include "cfgloop.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-vectorizer.h" |
| #include "expr.h" |
| #include "builtins.h" |
| #include "params.h" |
| #include "tree-cfg.h" |
| #include "tree-hash-traits.h" |
| |
| /* Return true if load- or store-lanes optab OPTAB is implemented for |
| COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */ |
| |
| static bool |
| vect_lanes_optab_supported_p (const char *name, convert_optab optab, |
| tree vectype, unsigned HOST_WIDE_INT count) |
| { |
| machine_mode mode; |
| scalar_int_mode array_mode; |
| bool limit_p; |
| |
| mode = TYPE_MODE (vectype); |
| limit_p = !targetm.array_mode_supported_p (mode, count); |
| if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode), |
| limit_p).exists (&array_mode)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n", |
| GET_MODE_NAME (mode), count); |
| return false; |
| } |
| |
| if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "cannot use %s<%s><%s>\n", name, |
| GET_MODE_NAME (array_mode), GET_MODE_NAME (mode)); |
| return false; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode), |
| GET_MODE_NAME (mode)); |
| |
| return true; |
| } |
| |
| |
| /* Return the smallest scalar part of STMT. |
| This is used to determine the vectype of the stmt. We generally set the |
| vectype according to the type of the result (lhs). For stmts whose |
| result-type is different than the type of the arguments (e.g., demotion, |
| promotion), vectype will be reset appropriately (later). Note that we have |
| to visit the smallest datatype in this function, because that determines the |
| VF. If the smallest datatype in the loop is present only as the rhs of a |
| promotion operation - we'd miss it. |
| Such a case, where a variable of this datatype does not appear in the lhs |
| anywhere in the loop, can only occur if it's an invariant: e.g.: |
| 'int_x = (int) short_inv', which we'd expect to have been optimized away by |
| invariant motion. However, we cannot rely on invariant motion to always |
| take invariants out of the loop, and so in the case of promotion we also |
| have to check the rhs. |
| LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding |
| types. */ |
| |
| tree |
| vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit, |
| HOST_WIDE_INT *rhs_size_unit) |
| { |
| tree scalar_type = gimple_expr_type (stmt); |
| HOST_WIDE_INT lhs, rhs; |
| |
| /* During the analysis phase, this function is called on arbitrary |
| statements that might not have scalar results. */ |
| if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type))) |
| return scalar_type; |
| |
| lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| |
| if (is_gimple_assign (stmt) |
| && (gimple_assign_cast_p (stmt) |
| || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR |
| || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR |
| || gimple_assign_rhs_code (stmt) == FLOAT_EXPR)) |
| { |
| tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| |
| rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); |
| if (rhs < lhs) |
| scalar_type = rhs_type; |
| } |
| |
| *lhs_size_unit = lhs; |
| *rhs_size_unit = rhs; |
| return scalar_type; |
| } |
| |
| |
| /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be |
| tested at run-time. Return TRUE if DDR was successfully inserted. |
| Return false if versioning is not supported. */ |
| |
| static bool |
| vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0) |
| return false; |
| |
| if (!runtime_alias_check_p (ddr, loop, |
| optimize_loop_nest_for_speed_p (loop))) |
| return false; |
| |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr); |
| return true; |
| } |
| |
| |
| /* A subroutine of vect_analyze_data_ref_dependence. Handle |
| DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence |
| distances. These distances are conservatively correct but they don't |
| reflect a guaranteed dependence. |
| |
| Return true if this function does all the work necessary to avoid |
| an alias or false if the caller should use the dependence distances |
| to limit the vectorization factor in the usual way. LOOP_DEPTH is |
| the depth of the loop described by LOOP_VINFO and the other arguments |
| are as for vect_analyze_data_ref_dependence. */ |
| |
| static bool |
| vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr, |
| loop_vec_info loop_vinfo, |
| int loop_depth, int *max_vf) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| lambda_vector dist_v; |
| unsigned int i; |
| FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
| { |
| int dist = dist_v[loop_depth]; |
| if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr))) |
| { |
| /* If the user asserted safelen >= DIST consecutive iterations |
| can be executed concurrently, assume independence. |
| |
| ??? An alternative would be to add the alias check even |
| in this case, and vectorize the fallback loop with the |
| maximum VF set to safelen. However, if the user has |
| explicitly given a length, it's less likely that that |
| would be a win. */ |
| if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen) |
| { |
| if (loop->safelen < *max_vf) |
| *max_vf = loop->safelen; |
| LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; |
| continue; |
| } |
| |
| /* For dependence distances of 2 or more, we have the option |
| of limiting VF or checking for an alias at runtime. |
| Prefer to check at runtime if we can, to avoid limiting |
| the VF unnecessarily when the bases are in fact independent. |
| |
| Note that the alias checks will be removed if the VF ends up |
| being small enough. */ |
| return vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| } |
| return true; |
| } |
| |
| |
| /* Function vect_analyze_data_ref_dependence. |
| |
| Return TRUE if there (might) exist a dependence between a memory-reference |
| DRA and a memory-reference DRB. When versioning for alias may check a |
| dependence at run-time, return FALSE. Adjust *MAX_VF according to |
| the data dependence. */ |
| |
| static bool |
| vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, |
| loop_vec_info loop_vinfo, int *max_vf) |
| { |
| unsigned int i; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| lambda_vector dist_v; |
| unsigned int loop_depth; |
| |
| /* In loop analysis all data references should be vectorizable. */ |
| if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a) |
| || !STMT_VINFO_VECTORIZABLE (stmtinfo_b)) |
| gcc_unreachable (); |
| |
| /* Independent data accesses. */ |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| return false; |
| |
| if (dra == drb |
| || (DR_IS_READ (dra) && DR_IS_READ (drb))) |
| return false; |
| |
| /* We do not have to consider dependences between accesses that belong |
| to the same group. */ |
| if (GROUP_FIRST_ELEMENT (stmtinfo_a) |
| && GROUP_FIRST_ELEMENT (stmtinfo_a) == GROUP_FIRST_ELEMENT (stmtinfo_b)) |
| return false; |
| |
| /* Even if we have an anti-dependence then, as the vectorized loop covers at |
| least two scalar iterations, there is always also a true dependence. |
| As the vectorizer does not re-order loads and stores we can ignore |
| the anti-dependence if TBAA can disambiguate both DRs similar to the |
| case with known negative distance anti-dependences (positive |
| distance anti-dependences would violate TBAA constraints). */ |
| if (((DR_IS_READ (dra) && DR_IS_WRITE (drb)) |
| || (DR_IS_WRITE (dra) && DR_IS_READ (drb))) |
| && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)), |
| get_alias_set (DR_REF (drb)))) |
| return false; |
| |
| /* Unknown data dependence. */ |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
| { |
| /* If user asserted safelen consecutive iterations can be |
| executed concurrently, assume independence. */ |
| if (loop->safelen >= 2) |
| { |
| if (loop->safelen < *max_vf) |
| *max_vf = loop->safelen; |
| LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; |
| return false; |
| } |
| |
| if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a) |
| || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias not supported for: " |
| "can't determine dependence between "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return true; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias required: " |
| "can't determine dependence between "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| /* Known data dependence. */ |
| if (DDR_NUM_DIST_VECTS (ddr) == 0) |
| { |
| /* If user asserted safelen consecutive iterations can be |
| executed concurrently, assume independence. */ |
| if (loop->safelen >= 2) |
| { |
| if (loop->safelen < *max_vf) |
| *max_vf = loop->safelen; |
| LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; |
| return false; |
| } |
| |
| if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a) |
| || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias not supported for: " |
| "bad dist vector for "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return true; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias required: " |
| "bad dist vector for "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); |
| |
| if (DDR_COULD_BE_INDEPENDENT_P (ddr) |
| && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo, |
| loop_depth, max_vf)) |
| return false; |
| |
| FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
| { |
| int dist = dist_v[loop_depth]; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance = %d.\n", dist); |
| |
| if (dist == 0) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance == 0 between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| |
| /* When we perform grouped accesses and perform implicit CSE |
| by detecting equal accesses and doing disambiguation with |
| runtime alias tests like for |
| .. = a[i]; |
| .. = a[i+1]; |
| a[i] = ..; |
| a[i+1] = ..; |
| *p = ..; |
| .. = a[i]; |
| .. = a[i+1]; |
| where we will end up loading { a[i], a[i+1] } once, make |
| sure that inserting group loads before the first load and |
| stores after the last store will do the right thing. |
| Similar for groups like |
| a[i] = ...; |
| ... = a[i]; |
| a[i+1] = ...; |
| where loads from the group interleave with the store. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a) |
| || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)) |
| { |
| gimple *earlier_stmt; |
| earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); |
| if (DR_IS_WRITE |
| (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "READ_WRITE dependence in interleaving." |
| "\n"); |
| return true; |
| } |
| } |
| |
| continue; |
| } |
| |
| if (dist > 0 && DDR_REVERSED_P (ddr)) |
| { |
| /* If DDR_REVERSED_P the order of the data-refs in DDR was |
| reversed (to make distance vector positive), and the actual |
| distance is negative. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "dependence distance negative.\n"); |
| /* Record a negative dependence distance to later limit the |
| amount of stmt copying / unrolling we can perform. |
| Only need to handle read-after-write dependence. */ |
| if (DR_IS_READ (drb) |
| && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0 |
| || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist)) |
| STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist; |
| continue; |
| } |
| |
| if (abs (dist) >= 2 |
| && abs (dist) < *max_vf) |
| { |
| /* The dependence distance requires reduction of the maximal |
| vectorization factor. */ |
| *max_vf = abs (dist); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "adjusting maximal vectorization factor to %i\n", |
| *max_vf); |
| } |
| |
| if (abs (dist) >= *max_vf) |
| { |
| /* Dependence distance does not create dependence, as far as |
| vectorization is concerned, in this case. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance >= VF.\n"); |
| continue; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized, possible dependence " |
| "between data-refs "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Function vect_analyze_data_ref_dependences. |
| |
| Examine all the data references in the loop, and make sure there do not |
| exist any data dependences between them. Set *MAX_VF according to |
| the maximum vectorization factor the data dependences allow. */ |
| |
| bool |
| vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf) |
| { |
| unsigned int i; |
| struct data_dependence_relation *ddr; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_ref_dependences ===\n"); |
| |
| LOOP_VINFO_DDRS (loop_vinfo) |
| .create (LOOP_VINFO_DATAREFS (loop_vinfo).length () |
| * LOOP_VINFO_DATAREFS (loop_vinfo).length ()); |
| LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true; |
| /* We need read-read dependences to compute STMT_VINFO_SAME_ALIGN_REFS. */ |
| if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo), |
| &LOOP_VINFO_DDRS (loop_vinfo), |
| LOOP_VINFO_LOOP_NEST (loop_vinfo), true)) |
| return false; |
| |
| /* For epilogues we either have no aliases or alias versioning |
| was applied to original loop. Therefore we may just get max_vf |
| using VF of original loop. */ |
| if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) |
| *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo); |
| else |
| FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr) |
| if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf)) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_slp_analyze_data_ref_dependence. |
| |
| Return TRUE if there (might) exist a dependence between a memory-reference |
| DRA and a memory-reference DRB. When versioning for alias may check a |
| dependence at run-time, return FALSE. Adjust *MAX_VF according to |
| the data dependence. */ |
| |
| static bool |
| vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) |
| { |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| |
| /* We need to check dependences of statements marked as unvectorizable |
| as well, they still can prohibit vectorization. */ |
| |
| /* Independent data accesses. */ |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| return false; |
| |
| if (dra == drb) |
| return false; |
| |
| /* Read-read is OK. */ |
| if (DR_IS_READ (dra) && DR_IS_READ (drb)) |
| return false; |
| |
| /* If dra and drb are part of the same interleaving chain consider |
| them independent. */ |
| if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra))) |
| && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra))) |
| == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb))))) |
| return false; |
| |
| /* Unknown data dependence. */ |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't determine dependence between "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| } |
| else if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "determined dependence between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze dependences involved in the transform of SLP NODE. STORES |
| contain the vector of scalar stores of this instance if we are |
| disambiguating the loads. */ |
| |
| static bool |
| vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, |
| vec<gimple *> stores, gimple *last_store) |
| { |
| /* This walks over all stmts involved in the SLP load/store done |
| in NODE verifying we can sink them up to the last stmt in the |
| group. */ |
| gimple *last_access = vect_find_last_scalar_stmt_in_slp (node); |
| for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k) |
| { |
| gimple *access = SLP_TREE_SCALAR_STMTS (node)[k]; |
| if (access == last_access) |
| continue; |
| data_reference *dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (access)); |
| for (gimple_stmt_iterator gsi = gsi_for_stmt (access); |
| gsi_stmt (gsi) != last_access; gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| if (! gimple_vuse (stmt) |
| || (DR_IS_READ (dr_a) && ! gimple_vdef (stmt))) |
| continue; |
| |
| /* If we couldn't record a (single) data reference for this |
| stmt we have to give up. */ |
| /* ??? Here and below if dependence analysis fails we can resort |
| to the alias oracle which can handle more kinds of stmts. */ |
| data_reference *dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)); |
| if (!dr_b) |
| return false; |
| |
| bool dependent = false; |
| /* If we run into a store of this same instance (we've just |
| marked those) then delay dependence checking until we run |
| into the last store because this is where it will have |
| been sunk to (and we verify if we can do that as well). */ |
| if (gimple_visited_p (stmt)) |
| { |
| if (stmt != last_store) |
| continue; |
| unsigned i; |
| gimple *store; |
| FOR_EACH_VEC_ELT (stores, i, store) |
| { |
| data_reference *store_dr |
| = STMT_VINFO_DATA_REF (vinfo_for_stmt (store)); |
| ddr_p ddr = initialize_data_dependence_relation |
| (dr_a, store_dr, vNULL); |
| dependent = vect_slp_analyze_data_ref_dependence (ddr); |
| free_dependence_relation (ddr); |
| if (dependent) |
| break; |
| } |
| } |
| else |
| { |
| ddr_p ddr = initialize_data_dependence_relation (dr_a, |
| dr_b, vNULL); |
| dependent = vect_slp_analyze_data_ref_dependence (ddr); |
| free_dependence_relation (ddr); |
| } |
| if (dependent) |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| |
| /* Function vect_analyze_data_ref_dependences. |
| |
| Examine all the data references in the basic-block, and make sure there |
| do not exist any data dependences between them. Set *MAX_VF according to |
| the maximum vectorization factor the data dependences allow. */ |
| |
| bool |
| vect_slp_analyze_instance_dependence (slp_instance instance) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_slp_analyze_instance_dependence ===\n"); |
| |
| /* The stores of this instance are at the root of the SLP tree. */ |
| slp_tree store = SLP_INSTANCE_TREE (instance); |
| if (! STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (store)[0]))) |
| store = NULL; |
| |
| /* Verify we can sink stores to the vectorized stmt insert location. */ |
| gimple *last_store = NULL; |
| if (store) |
| { |
| if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL)) |
| return false; |
| |
| /* Mark stores in this instance and remember the last one. */ |
| last_store = vect_find_last_scalar_stmt_in_slp (store); |
| for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k) |
| gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], true); |
| } |
| |
| bool res = true; |
| |
| /* Verify we can sink loads to the vectorized stmt insert location, |
| special-casing stores of this instance. */ |
| slp_tree load; |
| unsigned int i; |
| FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load) |
| if (! vect_slp_analyze_node_dependences (instance, load, |
| store |
| ? SLP_TREE_SCALAR_STMTS (store) |
| : vNULL, last_store)) |
| { |
| res = false; |
| break; |
| } |
| |
| /* Unset the visited flag. */ |
| if (store) |
| for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k) |
| gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], false); |
| |
| return res; |
| } |
| |
| /* Record in VINFO the base alignment guarantee given by DRB. STMT is |
| the statement that contains DRB, which is useful for recording in the |
| dump file. */ |
| |
| static void |
| vect_record_base_alignment (vec_info *vinfo, gimple *stmt, |
| innermost_loop_behavior *drb) |
| { |
| bool existed; |
| innermost_loop_behavior *&entry |
| = vinfo->base_alignments.get_or_insert (drb->base_address, &existed); |
| if (!existed || entry->base_alignment < drb->base_alignment) |
| { |
| entry = drb; |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "recording new base alignment for "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address); |
| dump_printf (MSG_NOTE, "\n"); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| " alignment: %d\n", drb->base_alignment); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| " misalignment: %d\n", drb->base_misalignment); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| " based on: "); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); |
| } |
| } |
| } |
| |
| /* If the region we're going to vectorize is reached, all unconditional |
| data references occur at least once. We can therefore pool the base |
| alignment guarantees from each unconditional reference. Do this by |
| going through all the data references in VINFO and checking whether |
| the containing statement makes the reference unconditionally. If so, |
| record the alignment of the base address in VINFO so that it can be |
| used for all other references with the same base. */ |
| |
| void |
| vect_record_base_alignments (vec_info *vinfo) |
| { |
| loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); |
| struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; |
| data_reference *dr; |
| unsigned int i; |
| FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr) |
| if (!DR_IS_CONDITIONAL_IN_STMT (dr)) |
| { |
| gimple *stmt = DR_STMT (dr); |
| vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr)); |
| |
| /* If DR is nested in the loop that is being vectorized, we can also |
| record the alignment of the base wrt the outer loop. */ |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| vect_record_base_alignment |
| (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info)); |
| } |
| } |
| } |
| |
| /* Return the target alignment for the vectorized form of DR. */ |
| |
| static unsigned int |
| vect_calculate_target_alignment (struct data_reference *dr) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| return targetm.vectorize.preferred_vector_alignment (vectype); |
| } |
| |
| /* Function vect_compute_data_ref_alignment |
| |
| Compute the misalignment of the data reference DR. |
| |
| Output: |
| 1. If during the misalignment computation it is found that the data reference |
| cannot be vectorized then false is returned. |
| 2. DR_MISALIGNMENT (DR) is defined. |
| |
| FOR NOW: No analysis is actually performed. Misalignment is calculated |
| only for trivial cases. TODO. */ |
| |
| bool |
| vect_compute_data_ref_alignment (struct data_reference *dr) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments; |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = NULL; |
| tree ref = DR_REF (dr); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vect_compute_data_ref_alignment:\n"); |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| /* Initialize misalignment to unknown. */ |
| SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN); |
| |
| innermost_loop_behavior *drb = vect_dr_behavior (dr); |
| bool step_preserves_misalignment_p; |
| |
| unsigned HOST_WIDE_INT vector_alignment |
| = vect_calculate_target_alignment (dr) / BITS_PER_UNIT; |
| DR_TARGET_ALIGNMENT (dr) = vector_alignment; |
| |
| /* No step for BB vectorization. */ |
| if (!loop) |
| { |
| gcc_assert (integer_zerop (drb->step)); |
| step_preserves_misalignment_p = true; |
| } |
| |
| /* In case the dataref is in an inner-loop of the loop that is being |
| vectorized (LOOP), we use the base and misalignment information |
| relative to the outer-loop (LOOP). This is ok only if the misalignment |
| stays the same throughout the execution of the inner-loop, which is why |
| we have to check that the stride of the dataref in the inner-loop evenly |
| divides by the vector alignment. */ |
| else if (nested_in_vect_loop_p (loop, stmt)) |
| { |
| step_preserves_misalignment_p |
| = (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0; |
| |
| if (dump_enabled_p ()) |
| { |
| if (step_preserves_misalignment_p) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "inner step divides the vector alignment.\n"); |
| else |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "inner step doesn't divide the vector" |
| " alignment.\n"); |
| } |
| } |
| |
| /* Similarly we can only use base and misalignment information relative to |
| an innermost loop if the misalignment stays the same throughout the |
| execution of the loop. As above, this is the case if the stride of |
| the dataref evenly divides by the alignment. */ |
| else |
| { |
| unsigned vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| step_preserves_misalignment_p |
| = ((DR_STEP_ALIGNMENT (dr) * vf) % vector_alignment) == 0; |
| |
| if (!step_preserves_misalignment_p && dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "step doesn't divide the vector alignment.\n"); |
| } |
| |
| unsigned int base_alignment = drb->base_alignment; |
| unsigned int base_misalignment = drb->base_misalignment; |
| |
| /* Calculate the maximum of the pooled base address alignment and the |
| alignment that we can compute for DR itself. */ |
| innermost_loop_behavior **entry = base_alignments->get (drb->base_address); |
| if (entry && base_alignment < (*entry)->base_alignment) |
| { |
| base_alignment = (*entry)->base_alignment; |
| base_misalignment = (*entry)->base_misalignment; |
| } |
| |
| if (drb->offset_alignment < vector_alignment |
| || !step_preserves_misalignment_p |
| /* We need to know whether the step wrt the vectorized loop is |
| negative when computing the starting misalignment below. */ |
| || TREE_CODE (drb->step) != INTEGER_CST) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown alignment for access: "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return true; |
| } |
| |
| if (base_alignment < vector_alignment) |
| { |
| tree base = drb->base_address; |
| if (TREE_CODE (base) == ADDR_EXPR) |
| base = TREE_OPERAND (base, 0); |
| if (!vect_can_force_dr_alignment_p (base, |
| vector_alignment * BITS_PER_UNIT)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "can't force alignment of ref: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| return true; |
| } |
| |
| if (DECL_USER_ALIGN (base)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not forcing alignment of user-aligned " |
| "variable: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, base); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| return true; |
| } |
| |
| /* Force the alignment of the decl. |
| NOTE: This is the only change to the code we make during |
| the analysis phase, before deciding to vectorize the loop. */ |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "force alignment of "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| DR_VECT_AUX (dr)->base_decl = base; |
| DR_VECT_AUX (dr)->base_misaligned = true; |
| base_misalignment = 0; |
| } |
| unsigned int misalignment = (base_misalignment |
| + TREE_INT_CST_LOW (drb->init)); |
| |
| /* If this is a backward running DR then first access in the larger |
| vectype actually is N-1 elements before the address in the DR. |
| Adjust misalign accordingly. */ |
| if (tree_int_cst_sgn (drb->step) < 0) |
| /* PLUS because STEP is negative. */ |
| misalignment += ((TYPE_VECTOR_SUBPARTS (vectype) - 1) |
| * TREE_INT_CST_LOW (drb->step)); |
| |
| SET_DR_MISALIGNMENT (dr, misalignment & (vector_alignment - 1)); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr)); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_update_misalignment_for_peel. |
| Sets DR's misalignment |
| - to 0 if it has the same alignment as DR_PEEL, |
| - to the misalignment computed using NPEEL if DR's salignment is known, |
| - to -1 (unknown) otherwise. |
| |
| DR - the data reference whose misalignment is to be adjusted. |
| DR_PEEL - the data reference whose misalignment is being made |
| zero in the vector loop by the peel. |
| NPEEL - the number of iterations in the peel loop if the misalignment |
| of DR_PEEL is known at compile time. */ |
| |
| static void |
| vect_update_misalignment_for_peel (struct data_reference *dr, |
| struct data_reference *dr_peel, int npeel) |
| { |
| unsigned int i; |
| vec<dr_p> same_aligned_drs; |
| struct data_reference *current_dr; |
| int dr_size = vect_get_scalar_dr_size (dr); |
| int dr_peel_size = vect_get_scalar_dr_size (dr_peel); |
| stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); |
| stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel)); |
| |
| /* For interleaved data accesses the step in the loop must be multiplied by |
| the size of the interleaving group. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); |
| if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info)) |
| dr_peel_size *= GROUP_SIZE (peel_stmt_info); |
| |
| /* It can be assumed that the data refs with the same alignment as dr_peel |
| are aligned in the vector loop. */ |
| same_aligned_drs |
| = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel))); |
| FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr) |
| { |
| if (current_dr != dr) |
| continue; |
| gcc_assert (!known_alignment_for_access_p (dr) |
| || !known_alignment_for_access_p (dr_peel) |
| || (DR_MISALIGNMENT (dr) / dr_size |
| == DR_MISALIGNMENT (dr_peel) / dr_peel_size)); |
| SET_DR_MISALIGNMENT (dr, 0); |
| return; |
| } |
| |
| if (known_alignment_for_access_p (dr) |
| && known_alignment_for_access_p (dr_peel)) |
| { |
| bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0; |
| int misal = DR_MISALIGNMENT (dr); |
| misal += negative ? -npeel * dr_size : npeel * dr_size; |
| misal &= DR_TARGET_ALIGNMENT (dr) - 1; |
| SET_DR_MISALIGNMENT (dr, misal); |
| return; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \ |
| "to unknown (-1).\n"); |
| SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN); |
| } |
| |
| |
| /* Function verify_data_ref_alignment |
| |
| Return TRUE if DR can be handled with respect to alignment. */ |
| |
| static bool |
| verify_data_ref_alignment (data_reference_p dr) |
| { |
| enum dr_alignment_support supportable_dr_alignment |
| = vect_supportable_dr_alignment (dr, false); |
| if (!supportable_dr_alignment) |
| { |
| if (dump_enabled_p ()) |
| { |
| if (DR_IS_READ (dr)) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported unaligned load."); |
| else |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported unaligned " |
| "store."); |
| |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dr)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
| } |
| return false; |
| } |
| |
| if (supportable_dr_alignment != dr_aligned && dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Vectorizing an unaligned access.\n"); |
| |
| return true; |
| } |
| |
| /* Function vect_verify_datarefs_alignment |
| |
| Return TRUE if all data references in the loop can be |
| handled with respect to alignment. */ |
| |
| bool |
| vect_verify_datarefs_alignment (loop_vec_info vinfo) |
| { |
| vec<data_reference_p> datarefs = vinfo->datarefs; |
| struct data_reference *dr; |
| unsigned int i; |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| |
| if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
| continue; |
| |
| /* For interleaving, only the alignment of the first access matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* Strided accesses perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| if (! verify_data_ref_alignment (dr)) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Given an memory reference EXP return whether its alignment is less |
| than its size. */ |
| |
| static bool |
| not_size_aligned (tree exp) |
| { |
| if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) |
| return true; |
| |
| return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) |
| > get_object_alignment (exp)); |
| } |
| |
| /* Function vector_alignment_reachable_p |
| |
| Return true if vector alignment for DR is reachable by peeling |
| a few loop iterations. Return false otherwise. */ |
| |
| static bool |
| vector_alignment_reachable_p (struct data_reference *dr) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| { |
| /* For interleaved access we peel only if number of iterations in |
| the prolog loop ({VF - misalignment}), is a multiple of the |
| number of the interleaved accesses. */ |
| int elem_size, mis_in_elements; |
| int nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| /* FORNOW: handle only known alignment. */ |
| if (!known_alignment_for_access_p (dr)) |
| return false; |
| |
| elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements; |
| mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; |
| |
| if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info)) |
| return false; |
| } |
| |
| /* If misalignment is known at the compile time then allow peeling |
| only if natural alignment is reachable through peeling. */ |
| if (known_alignment_for_access_p (dr) && !aligned_access_p (dr)) |
| { |
| HOST_WIDE_INT elmsize = |
| int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize); |
| dump_printf (MSG_NOTE, |
| ". misalignment = %d.\n", DR_MISALIGNMENT (dr)); |
| } |
| if (DR_MISALIGNMENT (dr) % elmsize) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "data size does not divide the misalignment.\n"); |
| return false; |
| } |
| } |
| |
| if (!known_alignment_for_access_p (dr)) |
| { |
| tree type = TREE_TYPE (DR_REF (dr)); |
| bool is_packed = not_size_aligned (DR_REF (dr)); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown misalignment, %snaturally aligned\n", |
| is_packed ? "not " : ""); |
| return targetm.vectorize.vector_alignment_reachable (type, is_packed); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Calculate the cost of the memory access represented by DR. */ |
| |
| static void |
| vect_get_data_access_cost (struct data_reference *dr, |
| unsigned int *inside_cost, |
| unsigned int *outside_cost, |
| stmt_vector_for_cost *body_cost_vec) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| int ncopies; |
| |
| if (PURE_SLP_STMT (stmt_info)) |
| ncopies = 1; |
| else |
| ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info)); |
| |
| if (DR_IS_READ (dr)) |
| vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost, |
| NULL, body_cost_vec, false); |
| else |
| vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vect_get_data_access_cost: inside_cost = %d, " |
| "outside_cost = %d.\n", *inside_cost, *outside_cost); |
| } |
| |
| |
| typedef struct _vect_peel_info |
| { |
| struct data_reference *dr; |
| int npeel; |
| unsigned int count; |
| } *vect_peel_info; |
| |
| typedef struct _vect_peel_extended_info |
| { |
| struct _vect_peel_info peel_info; |
| unsigned int inside_cost; |
| unsigned int outside_cost; |
| } *vect_peel_extended_info; |
| |
| |
| /* Peeling hashtable helpers. */ |
| |
| struct peel_info_hasher : free_ptr_hash <_vect_peel_info> |
| { |
| static inline hashval_t hash (const _vect_peel_info *); |
| static inline bool equal (const _vect_peel_info *, const _vect_peel_info *); |
| }; |
| |
| inline hashval_t |
| peel_info_hasher::hash (const _vect_peel_info *peel_info) |
| { |
| return (hashval_t) peel_info->npeel; |
| } |
| |
| inline bool |
| peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b) |
| { |
| return (a->npeel == b->npeel); |
| } |
| |
| |
| /* Insert DR into peeling hash table with NPEEL as key. */ |
| |
| static void |
| vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab, |
| loop_vec_info loop_vinfo, struct data_reference *dr, |
| int npeel) |
| { |
| struct _vect_peel_info elem, *slot; |
| _vect_peel_info **new_slot; |
| bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
| |
| elem.npeel = npeel; |
| slot = peeling_htab->find (&elem); |
| if (slot) |
| slot->count++; |
| else |
| { |
| slot = XNEW (struct _vect_peel_info); |
| slot->npeel = npeel; |
| slot->dr = dr; |
| slot->count = 1; |
| new_slot = peeling_htab->find_slot (slot, INSERT); |
| *new_slot = slot; |
| } |
| |
| if (!supportable_dr_alignment |
| && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
| slot->count += VECT_MAX_COST; |
| } |
| |
| |
| /* Traverse peeling hash table to find peeling option that aligns maximum |
| number of data accesses. */ |
| |
| int |
| vect_peeling_hash_get_most_frequent (_vect_peel_info **slot, |
| _vect_peel_extended_info *max) |
| { |
| vect_peel_info elem = *slot; |
| |
| if (elem->count > max->peel_info.count |
| || (elem->count == max->peel_info.count |
| && max->peel_info.npeel > elem->npeel)) |
| { |
| max->peel_info.npeel = elem->npeel; |
| max->peel_info.count = elem->count; |
| max->peel_info.dr = elem->dr; |
| } |
| |
| return 1; |
| } |
| |
| /* Get the costs of peeling NPEEL iterations checking data access costs |
| for all data refs. If UNKNOWN_MISALIGNMENT is true, we assume DR0's |
| misalignment will be zero after peeling. */ |
| |
| static void |
| vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs, |
| struct data_reference *dr0, |
| unsigned int *inside_cost, |
| unsigned int *outside_cost, |
| stmt_vector_for_cost *body_cost_vec, |
| unsigned int npeel, |
| bool unknown_misalignment) |
| { |
| unsigned i; |
| data_reference *dr; |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
| continue; |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* Strided accesses perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| int save_misalignment; |
| save_misalignment = DR_MISALIGNMENT (dr); |
| if (npeel == 0) |
| ; |
| else if (unknown_misalignment && dr == dr0) |
| SET_DR_MISALIGNMENT (dr, 0); |
| else |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| vect_get_data_access_cost (dr, inside_cost, outside_cost, |
| body_cost_vec); |
| SET_DR_MISALIGNMENT (dr, save_misalignment); |
| } |
| } |
| |
| /* Traverse peeling hash table and calculate cost for each peeling option. |
| Find the one with the lowest cost. */ |
| |
| int |
| vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot, |
| _vect_peel_extended_info *min) |
| { |
| vect_peel_info elem = *slot; |
| int dummy; |
| unsigned int inside_cost = 0, outside_cost = 0; |
| gimple *stmt = DR_STMT (elem->dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| stmt_vector_for_cost prologue_cost_vec, body_cost_vec, |
| epilogue_cost_vec; |
| |
| prologue_cost_vec.create (2); |
| body_cost_vec.create (2); |
| epilogue_cost_vec.create (2); |
| |
| vect_get_peeling_costs_all_drs (LOOP_VINFO_DATAREFS (loop_vinfo), |
| elem->dr, &inside_cost, &outside_cost, |
| &body_cost_vec, elem->npeel, false); |
| |
| body_cost_vec.release (); |
| |
| outside_cost += vect_get_known_peeling_cost |
| (loop_vinfo, elem->npeel, &dummy, |
| &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), |
| &prologue_cost_vec, &epilogue_cost_vec); |
| |
| /* Prologue and epilogue costs are added to the target model later. |
| These costs depend only on the scalar iteration cost, the |
| number of peeling iterations finally chosen, and the number of |
| misaligned statements. So discard the information found here. */ |
| prologue_cost_vec.release (); |
| epilogue_cost_vec.release (); |
| |
| if (inside_cost < min->inside_cost |
| || (inside_cost == min->inside_cost |
| && outside_cost < min->outside_cost)) |
| { |
| min->inside_cost = inside_cost; |
| min->outside_cost = outside_cost; |
| min->peel_info.dr = elem->dr; |
| min->peel_info.npeel = elem->npeel; |
| min->peel_info.count = elem->count; |
| } |
| |
| return 1; |
| } |
| |
| |
| /* Choose best peeling option by traversing peeling hash table and either |
| choosing an option with the lowest cost (if cost model is enabled) or the |
| option that aligns as many accesses as possible. */ |
| |
| static struct _vect_peel_extended_info |
| vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab, |
| loop_vec_info loop_vinfo) |
| { |
| struct _vect_peel_extended_info res; |
| |
| res.peel_info.dr = NULL; |
| |
| if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
| { |
| res.inside_cost = INT_MAX; |
| res.outside_cost = INT_MAX; |
| peeling_htab->traverse <_vect_peel_extended_info *, |
| vect_peeling_hash_get_lowest_cost> (&res); |
| } |
| else |
| { |
| res.peel_info.count = 0; |
| peeling_htab->traverse <_vect_peel_extended_info *, |
| vect_peeling_hash_get_most_frequent> (&res); |
| res.inside_cost = 0; |
| res.outside_cost = 0; |
| } |
| |
| return res; |
| } |
| |
| /* Return true if the new peeling NPEEL is supported. */ |
| |
| static bool |
| vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference *dr0, |
| unsigned npeel) |
| { |
| unsigned i; |
| struct data_reference *dr = NULL; |
| vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| gimple *stmt; |
| stmt_vec_info stmt_info; |
| enum dr_alignment_support supportable_dr_alignment; |
| |
| /* Ensure that all data refs can be vectorized after the peel. */ |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| int save_misalignment; |
| |
| if (dr == dr0) |
| continue; |
| |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* Strided accesses perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| save_misalignment = DR_MISALIGNMENT (dr); |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
| SET_DR_MISALIGNMENT (dr, save_misalignment); |
| |
| if (!supportable_dr_alignment) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_enhance_data_refs_alignment |
| |
| This pass will use loop versioning and loop peeling in order to enhance |
| the alignment of data references in the loop. |
| |
| FOR NOW: we assume that whatever versioning/peeling takes place, only the |
| original loop is to be vectorized. Any other loops that are created by |
| the transformations performed in this pass - are not supposed to be |
| vectorized. This restriction will be relaxed. |
| |
| This pass will require a cost model to guide it whether to apply peeling |
| or versioning or a combination of the two. For example, the scheme that |
| intel uses when given a loop with several memory accesses, is as follows: |
| choose one memory access ('p') which alignment you want to force by doing |
| peeling. Then, either (1) generate a loop in which 'p' is aligned and all |
| other accesses are not necessarily aligned, or (2) use loop versioning to |
| generate one loop in which all accesses are aligned, and another loop in |
| which only 'p' is necessarily aligned. |
| |
| ("Automatic Intra-Register Vectorization for the Intel Architecture", |
| Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International |
| Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) |
| |
| Devising a cost model is the most critical aspect of this work. It will |
| guide us on which access to peel for, whether to use loop versioning, how |
| many versions to create, etc. The cost model will probably consist of |
| generic considerations as well as target specific considerations (on |
| powerpc for example, misaligned stores are more painful than misaligned |
| loads). |
| |
| Here are the general steps involved in alignment enhancements: |
| |
| -- original loop, before alignment analysis: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = unknown |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- After vect_compute_data_refs_alignment: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 1: we do loop versioning: |
| if (p is aligned) { |
| for (i=0; i<N; i++){ # loop 1A |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i=0; i<N; i++){ # loop 1B |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| -- Possibility 2: we do loop peeling: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| for (i = 3; i < N; i++){ # loop 2A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 3: combination of loop peeling and versioning: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| if (p is aligned) { |
| for (i = 3; i<N; i++){ # loop 3A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i = 3; i<N; i++){ # loop 3B |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| These loops are later passed to loop_transform to be vectorized. The |
| vectorizer will use the alignment information to guide the transformation |
| (whether to generate regular loads/stores, or with special handling for |
| misalignment). */ |
| |
| bool |
| vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) |
| { |
| vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| enum dr_alignment_support supportable_dr_alignment; |
| struct data_reference *dr0 = NULL, *first_store = NULL; |
| struct data_reference *dr; |
| unsigned int i, j; |
| bool do_peeling = false; |
| bool do_versioning = false; |
| bool stat; |
| gimple *stmt; |
| stmt_vec_info stmt_info; |
| unsigned int npeel = 0; |
| bool one_misalignment_known = false; |
| bool one_misalignment_unknown = false; |
| bool one_dr_unsupportable = false; |
| struct data_reference *unsupportable_dr = NULL; |
| unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| unsigned possible_npeel_number = 1; |
| tree vectype; |
| unsigned int nelements, mis, same_align_drs_max = 0; |
| hash_table<peel_info_hasher> peeling_htab (1); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_enhance_data_refs_alignment ===\n"); |
| |
| /* Reset data so we can safely be called multiple times. */ |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0); |
| LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0; |
| |
| /* While cost model enhancements are expected in the future, the high level |
| view of the code at this time is as follows: |
| |
| A) If there is a misaligned access then see if peeling to align |
| this access can make all data references satisfy |
| vect_supportable_dr_alignment. If so, update data structures |
| as needed and return true. |
| |
| B) If peeling wasn't possible and there is a data reference with an |
| unknown misalignment that does not satisfy vect_supportable_dr_alignment |
| then see if loop versioning checks can be used to make all data |
| references satisfy vect_supportable_dr_alignment. If so, update |
| data structures as needed and return true. |
| |
| C) If neither peeling nor versioning were successful then return false if |
| any data reference does not satisfy vect_supportable_dr_alignment. |
| |
| D) Return true (all data references satisfy vect_supportable_dr_alignment). |
| |
| Note, Possibility 3 above (which is peeling and versioning together) is not |
| being done at this time. */ |
| |
| /* (1) Peeling to force alignment. */ |
| |
| /* (1.1) Decide whether to perform peeling, and how many iterations to peel: |
| Considerations: |
| + How many accesses will become aligned due to the peeling |
| - How many accesses will become unaligned due to the peeling, |
| and the cost of misaligned accesses. |
| - The cost of peeling (the extra runtime checks, the increase |
| in code size). */ |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
| continue; |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* For invariant accesses there is nothing to enhance. */ |
| if (integer_zerop (DR_STEP (dr))) |
| continue; |
| |
| /* Strided accesses perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
| do_peeling = vector_alignment_reachable_p (dr); |
| if (do_peeling) |
| { |
| if (known_alignment_for_access_p (dr)) |
| { |
| unsigned int npeel_tmp = 0; |
| bool negative = tree_int_cst_compare (DR_STEP (dr), |
| size_zero_node) < 0; |
| |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| unsigned int target_align = DR_TARGET_ALIGNMENT (dr); |
| unsigned int dr_size = vect_get_scalar_dr_size (dr); |
| mis = (negative ? DR_MISALIGNMENT (dr) : -DR_MISALIGNMENT (dr)); |
| if (DR_MISALIGNMENT (dr) != 0) |
| npeel_tmp = (mis & (target_align - 1)) / dr_size; |
| |
| /* For multiple types, it is possible that the bigger type access |
| will have more than one peeling option. E.g., a loop with two |
| types: one of size (vector size / 4), and the other one of |
| size (vector size / 8). Vectorization factor will 8. If both |
| accesses are misaligned by 3, the first one needs one scalar |
| iteration to be aligned, and the second one needs 5. But the |
| first one will be aligned also by peeling 5 scalar |
| iterations, and in that case both accesses will be aligned. |
| Hence, except for the immediate peeling amount, we also want |
| to try to add full vector size, while we don't exceed |
| vectorization factor. |
| We do this automatically for cost model, since we calculate |
| cost for every peeling option. */ |
| if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
| { |
| if (STMT_SLP_TYPE (stmt_info)) |
| possible_npeel_number |
| = (vf * GROUP_SIZE (stmt_info)) / nelements; |
| else |
| possible_npeel_number = vf / nelements; |
| |
| /* NPEEL_TMP is 0 when there is no misalignment, but also |
| allow peeling NELEMENTS. */ |
| if (DR_MISALIGNMENT (dr) == 0) |
| possible_npeel_number++; |
| } |
| |
| /* Save info about DR in the hash table. Also include peeling |
| amounts according to the explanation above. */ |
| for (j = 0; j < possible_npeel_number; j++) |
| { |
| vect_peeling_hash_insert (&peeling_htab, loop_vinfo, |
| dr, npeel_tmp); |
| npeel_tmp += target_align / dr_size; |
| } |
| |
| one_misalignment_known = true; |
| } |
| else |
| { |
| /* If we don't know any misalignment values, we prefer |
| peeling for data-ref that has the maximum number of data-refs |
| with the same alignment, unless the target prefers to align |
| stores over load. */ |
| unsigned same_align_drs |
| = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length (); |
| if (!dr0 |
| || same_align_drs_max < same_align_drs) |
| { |
| same_align_drs_max = same_align_drs; |
| dr0 = dr; |
| } |
| /* For data-refs with the same number of related |
| accesses prefer the one where the misalign |
| computation will be invariant in the outermost loop. */ |
| else if (same_align_drs_max == same_align_drs) |
| { |
| struct loop *ivloop0, *ivloop; |
| ivloop0 = outermost_invariant_loop_for_expr |
| (loop, DR_BASE_ADDRESS (dr0)); |
| ivloop = outermost_invariant_loop_for_expr |
| (loop, DR_BASE_ADDRESS (dr)); |
| if ((ivloop && !ivloop0) |
| || (ivloop && ivloop0 |
| && flow_loop_nested_p (ivloop, ivloop0))) |
| dr0 = dr; |
| } |
| |
| one_misalignment_unknown = true; |
| |
| /* Check for data refs with unsupportable alignment that |
| can be peeled. */ |
| if (!supportable_dr_alignment) |
| { |
| one_dr_unsupportable = true; |
| unsupportable_dr = dr; |
| } |
| |
| if (!first_store && DR_IS_WRITE (dr)) |
| first_store = dr; |
| } |
| } |
| else |
| { |
| if (!aligned_access_p (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "vector alignment may not be reachable\n"); |
| break; |
| } |
| } |
| } |
| |
| /* Check if we can possibly peel the loop. */ |
| if (!vect_can_advance_ivs_p (loop_vinfo) |
| || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)) |
| || loop->inner) |
| do_peeling = false; |
| |
| struct _vect_peel_extended_info peel_for_known_alignment; |
| struct _vect_peel_extended_info peel_for_unknown_alignment; |
| struct _vect_peel_extended_info best_peel; |
| |
| peel_for_unknown_alignment.inside_cost = INT_MAX; |
| peel_for_unknown_alignment.outside_cost = INT_MAX; |
| peel_for_unknown_alignment.peel_info.count = 0; |
| |
| if (do_peeling |
| && one_misalignment_unknown) |
| { |
| /* Check if the target requires to prefer stores over loads, i.e., if |
| misaligned stores are more expensive than misaligned loads (taking |
| drs with same alignment into account). */ |
| unsigned int load_inside_cost = 0; |
| unsigned int load_outside_cost = 0; |
| unsigned int store_inside_cost = 0; |
| unsigned int store_outside_cost = 0; |
| |
| stmt_vector_for_cost dummy; |
| dummy.create (2); |
| vect_get_peeling_costs_all_drs (datarefs, dr0, |
| &load_inside_cost, |
| &load_outside_cost, |
| &dummy, vf / 2, true); |
| dummy.release (); |
| |
| if (first_store) |
| { |
| dummy.create (2); |
| vect_get_peeling_costs_all_drs (datarefs, first_store, |
| &store_inside_cost, |
| &store_outside_cost, |
| &dummy, vf / 2, true); |
| dummy.release (); |
| } |
| else |
| { |
| store_inside_cost = INT_MAX; |
| store_outside_cost = INT_MAX; |
| } |
| |
| if (load_inside_cost > store_inside_cost |
| || (load_inside_cost == store_inside_cost |
| && load_outside_cost > store_outside_cost)) |
| { |
| dr0 = first_store; |
| peel_for_unknown_alignment.inside_cost = store_inside_cost; |
| peel_for_unknown_alignment.outside_cost = store_outside_cost; |
| } |
| else |
| { |
| peel_for_unknown_alignment.inside_cost = load_inside_cost; |
| peel_for_unknown_alignment.outside_cost = load_outside_cost; |
| } |
| |
| stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; |
| prologue_cost_vec.create (2); |
| epilogue_cost_vec.create (2); |
| |
| int dummy2; |
| peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost |
| (loop_vinfo, vf / 2, &dummy2, |
| &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), |
| &prologue_cost_vec, &epilogue_cost_vec); |
| |
| prologue_cost_vec.release (); |
| epilogue_cost_vec.release (); |
| |
| peel_for_unknown_alignment.peel_info.count = 1 |
| + STMT_VINFO_SAME_ALIGN_REFS |
| (vinfo_for_stmt (DR_STMT (dr0))).length (); |
| } |
| |
| peel_for_unknown_alignment.peel_info.npeel = 0; |
| peel_for_unknown_alignment.peel_info.dr = dr0; |
| |
| best_peel = peel_for_unknown_alignment; |
| |
| peel_for_known_alignment.inside_cost = INT_MAX; |
| peel_for_known_alignment.outside_cost = INT_MAX; |
| peel_for_known_alignment.peel_info.count = 0; |
| peel_for_known_alignment.peel_info.dr = NULL; |
| |
| if (do_peeling && one_misalignment_known) |
| { |
| /* Peeling is possible, but there is no data access that is not supported |
| unless aligned. So we try to choose the best possible peeling from |
| the hash table. */ |
| peel_for_known_alignment = vect_peeling_hash_choose_best_peeling |
| (&peeling_htab, loop_vinfo); |
| } |
| |
| /* Compare costs of peeling for known and unknown alignment. */ |
| if (peel_for_known_alignment.peel_info.dr != NULL |
| && peel_for_unknown_alignment.inside_cost |
| >= peel_for_known_alignment.inside_cost) |
| { |
| best_peel = peel_for_known_alignment; |
| |
| /* If the best peeling for known alignment has NPEEL == 0, perform no |
| peeling at all except if there is an unsupportable dr that we can |
| align. */ |
| if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable) |
| do_peeling = false; |
| } |
| |
| /* If there is an unsupportable data ref, prefer this over all choices so far |
| since we'd have to discard a chosen peeling except when it accidentally |
| aligned the unsupportable data ref. */ |
| if (one_dr_unsupportable) |
| dr0 = unsupportable_dr; |
| else if (do_peeling) |
| { |
| /* Calculate the penalty for no peeling, i.e. leaving everything as-is. |
| TODO: Use nopeel_outside_cost or get rid of it? */ |
| unsigned nopeel_inside_cost = 0; |
| unsigned nopeel_outside_cost = 0; |
| |
| stmt_vector_for_cost dummy; |
| dummy.create (2); |
| vect_get_peeling_costs_all_drs (datarefs, NULL, &nopeel_inside_cost, |
| &nopeel_outside_cost, &dummy, 0, false); |
| dummy.release (); |
| |
| /* Add epilogue costs. As we do not peel for alignment here, no prologue |
| costs will be recorded. */ |
| stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; |
| prologue_cost_vec.create (2); |
| epilogue_cost_vec.create (2); |
| |
| int dummy2; |
| nopeel_outside_cost += vect_get_known_peeling_cost |
| (loop_vinfo, 0, &dummy2, |
| &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), |
| &prologue_cost_vec, &epilogue_cost_vec); |
| |
| prologue_cost_vec.release (); |
| epilogue_cost_vec.release (); |
| |
| npeel = best_peel.peel_info.npeel; |
| dr0 = best_peel.peel_info.dr; |
| |
| /* If no peeling is not more expensive than the best peeling we |
| have so far, don't perform any peeling. */ |
| if (nopeel_inside_cost <= best_peel.inside_cost) |
| do_peeling = false; |
| } |
| |
| if (do_peeling) |
| { |
| stmt = DR_STMT (dr0); |
| stmt_info = vinfo_for_stmt (stmt); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| if (known_alignment_for_access_p (dr0)) |
| { |
| bool negative = tree_int_cst_compare (DR_STEP (dr0), |
| size_zero_node) < 0; |
| if (!npeel) |
| { |
| /* Since it's known at compile time, compute the number of |
| iterations in the peeled loop (the peeling factor) for use in |
| updating DR_MISALIGNMENT values. The peeling factor is the |
| vectorization factor minus the misalignment as an element |
| count. */ |
| mis = negative ? DR_MISALIGNMENT (dr0) : -DR_MISALIGNMENT (dr0); |
| unsigned int target_align = DR_TARGET_ALIGNMENT (dr0); |
| npeel = ((mis & (target_align - 1)) |
| / vect_get_scalar_dr_size (dr0)); |
| } |
| |
| /* For interleaved data access every iteration accesses all the |
| members of the group, therefore we divide the number of iterations |
| by the group size. */ |
| stmt_info = vinfo_for_stmt (DR_STMT (dr0)); |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| npeel /= GROUP_SIZE (stmt_info); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Try peeling by %d\n", npeel); |
| } |
| |
| /* Ensure that all datarefs can be vectorized after the peel. */ |
| if (!vect_peeling_supportable (loop_vinfo, dr0, npeel)) |
| do_peeling = false; |
| |
| /* Check if all datarefs are supportable and log. */ |
| if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0) |
| { |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| if (!stat) |
| do_peeling = false; |
| else |
| return stat; |
| } |
| |
| /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */ |
| if (do_peeling) |
| { |
| unsigned max_allowed_peel |
| = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT); |
| if (max_allowed_peel != (unsigned)-1) |
| { |
| unsigned max_peel = npeel; |
| if (max_peel == 0) |
| { |
| unsigned int target_align = DR_TARGET_ALIGNMENT (dr0); |
| max_peel = target_align / vect_get_scalar_dr_size (dr0) - 1; |
| } |
| if (max_peel > max_allowed_peel) |
| { |
| do_peeling = false; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Disable peeling, max peels reached: %d\n", max_peel); |
| } |
| } |
| } |
| |
| /* Cost model #2 - if peeling may result in a remaining loop not |
| iterating enough to be vectorized then do not peel. */ |
| if (do_peeling |
| && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) |
| { |
| unsigned max_peel |
| = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel; |
| if (LOOP_VINFO_INT_NITERS (loop_vinfo) |
| < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel) |
| do_peeling = false; |
| } |
| |
| if (do_peeling) |
| { |
| /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i. |
| If the misalignment of DR_i is identical to that of dr0 then set |
| DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and |
| dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i) |
| by the peeling factor times the element size of DR_i (MOD the |
| vectorization factor times the size). Otherwise, the |
| misalignment of DR_i must be set to unknown. */ |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| if (dr != dr0) |
| { |
| /* Strided accesses perform only component accesses, alignment |
| is irrelevant for them. */ |
| stmt_info = vinfo_for_stmt (DR_STMT (dr)); |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| } |
| |
| LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0; |
| if (npeel) |
| LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel; |
| else |
| LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
| = DR_MISALIGNMENT (dr0); |
| SET_DR_MISALIGNMENT (dr0, 0); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Alignment of access forced using peeling.\n"); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Peeling for alignment will be applied.\n"); |
| } |
| |
| /* The inside-loop cost will be accounted for in vectorizable_load |
| and vectorizable_store correctly with adjusted alignments. |
| Drop the body_cst_vec on the floor here. */ |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| gcc_assert (stat); |
| return stat; |
| } |
| } |
| |
| /* (2) Versioning to force alignment. */ |
| |
| /* Try versioning if: |
| 1) optimize loop for speed |
| 2) there is at least one unsupported misaligned data ref with an unknown |
| misalignment, and |
| 3) all misaligned data refs with a known misalignment are supported, and |
| 4) the number of runtime alignment checks is within reason. */ |
| |
| do_versioning = |
| optimize_loop_nest_for_speed_p (loop) |
| && (!loop->inner); /* FORNOW */ |
| |
| if (do_versioning) |
| { |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (aligned_access_p (dr) |
| || (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt)) |
| continue; |
| |
| if (STMT_VINFO_STRIDED_P (stmt_info)) |
| { |
| /* Strided loads perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| do_versioning = false; |
| break; |
| } |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
| |
| if (!supportable_dr_alignment) |
| { |
| gimple *stmt; |
| int mask; |
| tree vectype; |
| |
| if (known_alignment_for_access_p (dr) |
| || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length () |
| >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)) |
| { |
| do_versioning = false; |
| break; |
| } |
| |
| stmt = DR_STMT (dr); |
| vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); |
| gcc_assert (vectype); |
| |
| /* The rightmost bits of an aligned address must be zeros. |
| Construct the mask needed for this test. For example, |
| GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the |
| mask must be 15 = 0xf. */ |
| mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; |
| |
| /* FORNOW: use the same mask to test all potentially unaligned |
| references in the loop. The vectorizer currently supports |
| a single vector size, see the reference to |
| GET_MODE_NUNITS (TYPE_MODE (vectype)) where the |
| vectorization factor is computed. */ |
| gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo) |
| || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask); |
| LOOP_VINFO_PTR_MASK (loop_vinfo) = mask; |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push ( |
| DR_STMT (dr)); |
| } |
| } |
| |
| /* Versioning requires at least one misaligned data reference. */ |
| if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
| do_versioning = false; |
| else if (!do_versioning) |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0); |
| } |
| |
| if (do_versioning) |
| { |
| vec<gimple *> may_misalign_stmts |
| = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); |
| gimple *stmt; |
| |
| /* It can now be assumed that the data references in the statements |
| in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version |
| of the loop being vectorized. */ |
| FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt) |
| { |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| dr = STMT_VINFO_DATA_REF (stmt_info); |
| SET_DR_MISALIGNMENT (dr, 0); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Alignment of access forced using versioning.\n"); |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Versioning for alignment will be applied.\n"); |
| |
| /* Peeling and versioning can't be done together at this time. */ |
| gcc_assert (! (do_peeling && do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| gcc_assert (stat); |
| return stat; |
| } |
| |
| /* This point is reached if neither peeling nor versioning is being done. */ |
| gcc_assert (! (do_peeling || do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo); |
| return stat; |
| } |
| |
| |
| /* Function vect_find_same_alignment_drs. |
| |
| Update group and alignment relations according to the chosen |
| vectorization factor. */ |
| |
| static void |
| vect_find_same_alignment_drs (struct data_dependence_relation *ddr) |
| { |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| return; |
| |
| if (dra == drb) |
| return; |
| |
| if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0) |
| || !operand_equal_p (DR_OFFSET (dra), DR_OFFSET (drb), 0) |
| || !operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) |
| return; |
| |
| /* Two references with distance zero have the same alignment. */ |
| offset_int diff = (wi::to_offset (DR_INIT (dra)) |
| - wi::to_offset (DR_INIT (drb))); |
| if (diff != 0) |
| { |
| /* Get the wider of the two alignments. */ |
| unsigned int align_a = (vect_calculate_target_alignment (dra) |
| / BITS_PER_UNIT); |
| unsigned int align_b = (vect_calculate_target_alignment (drb) |
| / BITS_PER_UNIT); |
| unsigned int max_align = MAX (align_a, align_b); |
| |
| /* Require the gap to be a multiple of the larger vector alignment. */ |
| if (!wi::multiple_of_p (diff, max_align, SIGNED)) |
| return; |
| } |
| |
| STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb); |
| STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "accesses have the same alignment: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| } |
| |
| |
| /* Function vect_analyze_data_refs_alignment |
| |
| Analyze the alignment of the data-references in the loop. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| bool |
| vect_analyze_data_refs_alignment (loop_vec_info vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_refs_alignment ===\n"); |
| |
| /* Mark groups of data references with same alignment using |
| data dependence information. */ |
| vec<ddr_p> ddrs = vinfo->ddrs; |
| struct data_dependence_relation *ddr; |
| unsigned int i; |
| |
| FOR_EACH_VEC_ELT (ddrs, i, ddr) |
| vect_find_same_alignment_drs (ddr); |
| |
| vec<data_reference_p> datarefs = vinfo->datarefs; |
| struct data_reference *dr; |
| |
| vect_record_base_alignments (vinfo); |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); |
| if (STMT_VINFO_VECTORIZABLE (stmt_info) |
| && !vect_compute_data_ref_alignment (dr)) |
| { |
| /* Strided accesses perform only component accesses, misalignment |
| information is irrelevant for them. */ |
| if (STMT_VINFO_STRIDED_P (stmt_info) |
| && !STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| continue; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: can't calculate alignment " |
| "for data ref.\n"); |
| |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze alignment of DRs of stmts in NODE. */ |
| |
| static bool |
| vect_slp_analyze_and_verify_node_alignment (slp_tree node) |
| { |
| /* We vectorize from the first scalar stmt in the node unless |
| the node is permuted in which case we start from the first |
| element in the group. */ |
| gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0]; |
| data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
| if (SLP_TREE_LOAD_PERMUTATION (node).exists ()) |
| first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)); |
| |
| data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); |
| if (! vect_compute_data_ref_alignment (dr) |
| /* For creating the data-ref pointer we need alignment of the |
| first element anyway. */ |
| || (dr != first_dr |
| && ! vect_compute_data_ref_alignment (first_dr)) |
| || ! verify_data_ref_alignment (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: bad data alignment in basic " |
| "block.\n"); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_slp_analyze_instance_alignment |
| |
| Analyze the alignment of the data-references in the SLP instance. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| bool |
| vect_slp_analyze_and_verify_instance_alignment (slp_instance instance) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_slp_analyze_and_verify_instance_alignment ===\n"); |
| |
| slp_tree node; |
| unsigned i; |
| FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node) |
| if (! vect_slp_analyze_and_verify_node_alignment (node)) |
| return false; |
| |
| node = SLP_INSTANCE_TREE (instance); |
| if (STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0])) |
| && ! vect_slp_analyze_and_verify_node_alignment |
| (SLP_INSTANCE_TREE (instance))) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Analyze groups of accesses: check that DR belongs to a group of |
| accesses of legal size, step, etc. Detect gaps, single element |
| interleaving, and other special cases. Set grouped access info. |
| Collect groups of strided stores for further use in SLP analysis. |
| Worker for vect_analyze_group_access. */ |
| |
| static bool |
| vect_analyze_group_access_1 (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
| HOST_WIDE_INT dr_step = -1; |
| HOST_WIDE_INT groupsize, last_accessed_element = 1; |
| bool slp_impossible = false; |
| |
| /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the |
| size of the interleaving group (including gaps). */ |
| if (tree_fits_shwi_p (step)) |
| { |
| dr_step = tree_to_shwi (step); |
| /* Check that STEP is a multiple of type size. Otherwise there is |
| a non-element-sized gap at the end of the group which we |
| cannot represent in GROUP_GAP or GROUP_SIZE. |
| ??? As we can handle non-constant step fine here we should |
| simply remove uses of GROUP_GAP between the last and first |
| element and instead rely on DR_STEP. GROUP_SIZE then would |
| simply not include that gap. */ |
| if ((dr_step % type_size) != 0) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Step "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, step); |
| dump_printf (MSG_NOTE, |
| " is not a multiple of the element size for "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| return false; |
| } |
| groupsize = absu_hwi (dr_step) / type_size; |
| } |
| else |
| groupsize = 0; |
| |
| /* Not consecutive access is possible only if it is a part of interleaving. */ |
| if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) |
| { |
| /* Check if it this DR is a part of interleaving, and is a single |
| element of the group that is accessed in the loop. */ |
| |
| /* Gaps are supported only for loads. STEP must be a multiple of the type |
| size. The size of the group must be a power of 2. */ |
| if (DR_IS_READ (dr) |
| && (dr_step % type_size) == 0 |
| && groupsize > 0 |
| && pow2p_hwi (groupsize)) |
| { |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; |
| GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
| GROUP_GAP (stmt_info) = groupsize - 1; |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected single element interleaving "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); |
| dump_printf (MSG_NOTE, " step "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, step); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| return true; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not consecutive access "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| /* Mark the statement as unvectorizable. */ |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; |
| return true; |
| } |
| |
| dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n"); |
| STMT_VINFO_STRIDED_P (stmt_info) = true; |
| return true; |
| } |
| |
| if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt) |
| { |
| /* First stmt in the interleaving chain. Check the chain. */ |
| gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); |
| struct data_reference *data_ref = dr; |
| unsigned int count = 1; |
| tree prev_init = DR_INIT (data_ref); |
| gimple *prev = stmt; |
| HOST_WIDE_INT diff, gaps = 0; |
| |
| while (next) |
| { |
| /* Skip same data-refs. In case that two or more stmts share |
| data-ref (supported only for loads), we vectorize only the first |
| stmt, and the rest get their vectorized loads from the first |
| one. */ |
| if (!tree_int_cst_compare (DR_INIT (data_ref), |
| DR_INIT (STMT_VINFO_DATA_REF ( |
| vinfo_for_stmt (next))))) |
| { |
| if (DR_IS_WRITE (data_ref)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Two store stmts share the same dr.\n"); |
| return false; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Two or more load stmts share the same dr.\n"); |
| |
| /* For load use the same data-ref load. */ |
| GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; |
| |
| prev = next; |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
| continue; |
| } |
| |
| prev = next; |
| data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next)); |
| |
| /* All group members have the same STEP by construction. */ |
| gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0)); |
| |
| /* Check that the distance between two accesses is equal to the type |
| size. Otherwise, we have gaps. */ |
| diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) |
| - TREE_INT_CST_LOW (prev_init)) / type_size; |
| if (diff != 1) |
| { |
| /* FORNOW: SLP of accesses with gaps is not supported. */ |
| slp_impossible = true; |
| if (DR_IS_WRITE (data_ref)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "interleaved store with gaps\n"); |
| return false; |
| } |
| |
| gaps += diff - 1; |
| } |
| |
| last_accessed_element += diff; |
| |
| /* Store the gap from the previous member of the group. If there is no |
| gap in the access, GROUP_GAP is always 1. */ |
| GROUP_GAP (vinfo_for_stmt (next)) = diff; |
| |
| prev_init = DR_INIT (data_ref); |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
| /* Count the number of data-refs in the chain. */ |
| count++; |
| } |
| |
| if (groupsize == 0) |
| groupsize = count + gaps; |
| |
| /* This could be UINT_MAX but as we are generating code in a very |
| inefficient way we have to cap earlier. See PR78699 for example. */ |
| if (groupsize > 4096) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "group is too large\n"); |
| return false; |
| } |
| |
| /* Check that the size of the interleaving is equal to count for stores, |
| i.e., that there are no gaps. */ |
| if (groupsize != count |
| && !DR_IS_READ (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "interleaved store with gaps\n"); |
| return false; |
| } |
| |
| /* If there is a gap after the last load in the group it is the |
| difference between the groupsize and the last accessed |
| element. |
| When there is no gap, this difference should be 0. */ |
| GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element; |
| |
| GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected interleaving "); |
| if (DR_IS_READ (dr)) |
| dump_printf (MSG_NOTE, "load "); |
| else |
| dump_printf (MSG_NOTE, "store "); |
| dump_printf (MSG_NOTE, "of size %u starting with ", |
| (unsigned)groupsize); |
| dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); |
| if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "There is a gap of %u elements after the group\n", |
| GROUP_GAP (vinfo_for_stmt (stmt))); |
| } |
| |
| /* SLP: create an SLP data structure for every interleaving group of |
| stores for further analysis in vect_analyse_slp. */ |
| if (DR_IS_WRITE (dr) && !slp_impossible) |
| { |
| if (loop_vinfo) |
| LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt); |
| if (bb_vinfo) |
| BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt); |
| } |
| } |
| |
| return true; |
| } |
| |
| /* Analyze groups of accesses: check that DR belongs to a group of |
| accesses of legal size, step, etc. Detect gaps, single element |
| interleaving, and other special cases. Set grouped access info. |
| Collect groups of strided stores for further use in SLP analysis. */ |
| |
| static bool |
| vect_analyze_group_access (struct data_reference *dr) |
| { |
| if (!vect_analyze_group_access_1 (dr)) |
| { |
| /* Dissolve the group if present. */ |
| gimple *next; |
| gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr))); |
| while (stmt) |
| { |
| stmt_vec_info vinfo = vinfo_for_stmt (stmt); |
| next = GROUP_NEXT_ELEMENT (vinfo); |
| GROUP_FIRST_ELEMENT (vinfo) = NULL; |
| GROUP_NEXT_ELEMENT (vinfo) = NULL; |
| stmt = next; |
| } |
| return false; |
| } |
| return true; |
| } |
| |
| /* Analyze the access pattern of the data-reference DR. |
| In case of non-consecutive accesses call vect_analyze_group_access() to |
| analyze groups of accesses. */ |
| |
| static bool |
| vect_analyze_data_ref_access (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| gimple *stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = NULL; |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if (loop_vinfo && !step) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data-ref access in loop\n"); |
| return false; |
| } |
| |
| /* Allow loads with zero step in inner-loop vectorization. */ |
| if (loop_vinfo && integer_zerop (step)) |
| { |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| if (!nested_in_vect_loop_p (loop, stmt)) |
| return DR_IS_READ (dr); |
| /* Allow references with zero step for outer loops marked |
| with pragma omp simd only - it guarantees absence of |
| loop-carried dependencies between inner loop iterations. */ |
| if (!loop->force_vectorize) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "zero step in inner loop of nest\n"); |
| return false; |
| } |
| } |
| |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| /* Interleaved accesses are not yet supported within outer-loop |
| vectorization for references in the inner-loop. */ |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| |
| /* For the rest of the analysis we use the outer-loop step. */ |
| step = STMT_VINFO_DR_STEP (stmt_info); |
| if (integer_zerop (step)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "zero step in outer loop.\n"); |
| return DR_IS_READ (dr); |
| } |
| } |
| |
| /* Consecutive? */ |
| if (TREE_CODE (step) == INTEGER_CST) |
| { |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) |
| || (dr_step < 0 |
| && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) |
| { |
| /* Mark that it is not interleaving. */ |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| return true; |
| } |
| } |
| |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "grouped access in outer loop.\n"); |
| return false; |
| } |
| |
| |
| /* Assume this is a DR handled by non-constant strided load case. */ |
| if (TREE_CODE (step) != INTEGER_CST) |
| return (STMT_VINFO_STRIDED_P (stmt_info) |
| && (!STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| || vect_analyze_group_access (dr))); |
| |
| /* Not consecutive access - check if it's a part of interleaving group. */ |
| return vect_analyze_group_access (dr); |
| } |
| |
| /* Compare two data-references DRA and DRB to group them into chunks |
| suitable for grouping. */ |
| |
| static int |
| dr_group_sort_cmp (const void *dra_, const void *drb_) |
| { |
| data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_); |
| data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_); |
| int cmp; |
| |
| /* Stabilize sort. */ |
| if (dra == drb) |
| return 0; |
| |
| /* DRs in different loops never belong to the same group. */ |
| loop_p loopa = gimple_bb (DR_STMT (dra))->loop_father; |
| loop_p loopb = gimple_bb (DR_STMT (drb))->loop_father; |
| if (loopa != loopb) |
| return loopa->num < loopb->num ? -1 : 1; |
| |
| /* Ordering of DRs according to base. */ |
| cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra), |
| DR_BASE_ADDRESS (drb)); |
| if (cmp != 0) |
| return cmp; |
| |
| /* And according to DR_OFFSET. */ |
| cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)); |
| if (cmp != 0) |
| return cmp; |
| |
| /* Put reads before writes. */ |
| if (DR_IS_READ (dra) != DR_IS_READ (drb)) |
| return DR_IS_READ (dra) ? -1 : 1; |
| |
| /* Then sort after access size. */ |
| cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))), |
| TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); |
| if (cmp != 0) |
| return cmp; |
| |
| /* And after step. */ |
| cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)); |
| if (cmp != 0) |
| return cmp; |
| |
| /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */ |
| cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)); |
| if (cmp == 0) |
| return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1; |
| return cmp; |
| } |
| |
| /* Function vect_analyze_data_ref_accesses. |
| |
| Analyze the access pattern of all the data references in the loop. |
| |
| FORNOW: the only access pattern that is considered vectorizable is a |
| simple step 1 (consecutive) access. |
| |
| FORNOW: handle only arrays and pointer accesses. */ |
| |
| bool |
| vect_analyze_data_ref_accesses (vec_info *vinfo) |
| { |
| unsigned int i; |
| vec<data_reference_p> datarefs = vinfo->datarefs; |
| struct data_reference *dr; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_ref_accesses ===\n"); |
| |
| if (datarefs.is_empty ()) |
| return true; |
| |
| /* Sort the array of datarefs to make building the interleaving chains |
| linear. Don't modify the original vector's order, it is needed for |
| determining what dependencies are reversed. */ |
| vec<data_reference_p> datarefs_copy = datarefs.copy (); |
| datarefs_copy.qsort (dr_group_sort_cmp); |
| |
| /* Build the interleaving chains. */ |
| for (i = 0; i < datarefs_copy.length () - 1;) |
| { |
| data_reference_p dra = datarefs_copy[i]; |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info lastinfo = NULL; |
| if (! STMT_VINFO_VECTORIZABLE (stmtinfo_a)) |
| { |
| ++i; |
| continue; |
| } |
| for (i = i + 1; i < datarefs_copy.length (); ++i) |
| { |
| data_reference_p drb = datarefs_copy[i]; |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| if (! STMT_VINFO_VECTORIZABLE (stmtinfo_b)) |
| break; |
| |
| /* ??? Imperfect sorting (non-compatible types, non-modulo |
| accesses, same accesses) can lead to a group to be artificially |
| split here as we don't just skip over those. If it really |
| matters we can push those to a worklist and re-iterate |
| over them. The we can just skip ahead to the next DR here. */ |
| |
| /* DRs in a different loop should not be put into the same |
| interleaving group. */ |
| if (gimple_bb (DR_STMT (dra))->loop_father |
| != gimple_bb (DR_STMT (drb))->loop_father) |
| break; |
| |
| /* Check that the data-refs have same first location (except init) |
| and they are both either store or load (not load and store, |
| not masked loads or stores). */ |
| if (DR_IS_READ (dra) != DR_IS_READ (drb) |
| || data_ref_compare_tree (DR_BASE_ADDRESS (dra), |
| DR_BASE_ADDRESS (drb)) != 0 |
| || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0 |
| || !gimple_assign_single_p (DR_STMT (dra)) |
| || !gimple_assign_single_p (DR_STMT (drb))) |
| break; |
| |
| /* Check that the data-refs have the same constant size. */ |
| tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))); |
| tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))); |
| if (!tree_fits_uhwi_p (sza) |
| || !tree_fits_uhwi_p (szb) |
| || !tree_int_cst_equal (sza, szb)) |
| break; |
| |
| /* Check that the data-refs have the same step. */ |
| if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0) |
| break; |
| |
| /* Do not place the same access in the interleaving chain twice. */ |
| if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0) |
| break; |
| |
| /* Check the types are compatible. |
| ??? We don't distinguish this during sorting. */ |
| if (!types_compatible_p (TREE_TYPE (DR_REF (dra)), |
| TREE_TYPE (DR_REF (drb)))) |
| break; |
| |
| /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */ |
| HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra)); |
| HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb)); |
| gcc_assert (init_a <= init_b); |
| |
| /* If init_b == init_a + the size of the type * k, we have an |
| interleaving, and DRA is accessed before DRB. */ |
| HOST_WIDE_INT type_size_a = tree_to_uhwi (sza); |
| if (type_size_a == 0 |
| || (init_b - init_a) % type_size_a != 0) |
| break; |
| |
| /* If we have a store, the accesses are adjacent. This splits |
| groups into chunks we support (we don't support vectorization |
| of stores with gaps). */ |
| if (!DR_IS_READ (dra) |
| && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW |
| (DR_INIT (datarefs_copy[i-1])) |
| != type_size_a)) |
| break; |
| |
| /* If the step (if not zero or non-constant) is greater than the |
| difference between data-refs' inits this splits groups into |
| suitable sizes. */ |
| if (tree_fits_shwi_p (DR_STEP (dra))) |
| { |
| HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra)); |
| if (step != 0 && step <= (init_b - init_a)) |
| break; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected interleaving "); |
| if (DR_IS_READ (dra)) |
| dump_printf (MSG_NOTE, "load "); |
| else |
| dump_printf (MSG_NOTE, "store "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| dump_printf (MSG_NOTE, "\n"); |
| } |
| |
| /* Link the found element into the group list. */ |
| if (!GROUP_FIRST_ELEMENT (stmtinfo_a)) |
| { |
| GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra); |
| lastinfo = stmtinfo_a; |
| } |
| GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra); |
| GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb); |
| lastinfo = stmtinfo_b; |
| } |
| } |
| |
| FOR_EACH_VEC_ELT (datarefs_copy, i, dr) |
| if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) |
| && !vect_analyze_data_ref_access (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: complicated access pattern.\n"); |
| |
| if (is_a <bb_vec_info> (vinfo)) |
| { |
| /* Mark the statement as not vectorizable. */ |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; |
| continue; |
| } |
| else |
| { |
| datarefs_copy.release (); |
| return false; |
| } |
| } |
| |
| datarefs_copy.release (); |
| return true; |
| } |
| |
| /* Function vect_vfa_segment_size. |
| |
| Create an expression that computes the size of segment |
| that will be accessed for a data reference. The functions takes into |
| account that realignment loads may access one more vector. |
| |
| Input: |
| DR: The data reference. |
| LENGTH_FACTOR: segment length to consider. |
| |
| Return an expression whose value is the size of segment which will be |
| accessed by DR. */ |
| |
| static tree |
| vect_vfa_segment_size (struct data_reference *dr, tree length_factor) |
| { |
| tree segment_length; |
| |
| if (integer_zerop (DR_STEP (dr))) |
| segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); |
| else |
| segment_length = size_binop (MULT_EXPR, |
| fold_convert (sizetype, DR_STEP (dr)), |
| fold_convert (sizetype, length_factor)); |
| |
| if (vect_supportable_dr_alignment (dr, false) |
| == dr_explicit_realign_optimized) |
| { |
| tree vector_size = TYPE_SIZE_UNIT |
| (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)))); |
| |
| segment_length = size_binop (PLUS_EXPR, segment_length, vector_size); |
| } |
| return segment_length; |
| } |
| |
| /* Function vect_no_alias_p. |
| |
| Given data references A and B with equal base and offset, the alias |
| relation can be decided at compilation time, return TRUE if they do |
| not alias to each other; return FALSE otherwise. SEGMENT_LENGTH_A |
| and SEGMENT_LENGTH_B are the memory lengths accessed by A and B |
| respectively. */ |
| |
| static bool |
| vect_no_alias_p (struct data_reference *a, struct data_reference *b, |
| tree segment_length_a, tree segment_length_b) |
| { |
| gcc_assert (TREE_CODE (DR_INIT (a)) == INTEGER_CST |
| && TREE_CODE (DR_INIT (b)) == INTEGER_CST); |
| if (tree_int_cst_equal (DR_INIT (a), DR_INIT (b))) |
| return false; |
| |
| tree seg_a_min = DR_INIT (a); |
| tree seg_a_max = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_a_min), |
| seg_a_min, segment_length_a); |
| /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT |
| bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of |
| [a, a+12) */ |
| if (tree_int_cst_compare (DR_STEP (a), size_zero_node) < 0) |
| { |
| tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (a))); |
| seg_a_min = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_a_max), |
| seg_a_max, unit_size); |
| seg_a_max = fold_build2 (PLUS_EXPR, TREE_TYPE (DR_INIT (a)), |
| DR_INIT (a), unit_size); |
| } |
| tree seg_b_min = DR_INIT (b); |
| tree seg_b_max = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_b_min), |
| seg_b_min, segment_length_b); |
| if (tree_int_cst_compare (DR_STEP (b), size_zero_node) < 0) |
| { |
| tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (b))); |
| seg_b_min = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_b_max), |
| seg_b_max, unit_size); |
| seg_b_max = fold_build2 (PLUS_EXPR, TREE_TYPE (DR_INIT (b)), |
| DR_INIT (b), unit_size); |
| } |
| |
| if (tree_int_cst_le (seg_a_max, seg_b_min) |
| || tree_int_cst_le (seg_b_max, seg_a_min)) |
| return true; |
| |
| return false; |
| } |
| |
| /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH |
| in DDR is >= VF. */ |
| |
| static bool |
| dependence_distance_ge_vf (data_dependence_relation *ddr, |
| unsigned int loop_depth, unsigned HOST_WIDE_INT vf) |
| { |
| if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE |
| || DDR_NUM_DIST_VECTS (ddr) == 0) |
| return false; |
| |
| /* If the dependence is exact, we should have limited the VF instead. */ |
| gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr)); |
| |
| unsigned int i; |
| lambda_vector dist_v; |
| FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
| { |
| HOST_WIDE_INT dist = dist_v[loop_depth]; |
| if (dist != 0 |
| && !(dist > 0 && DDR_REVERSED_P (ddr)) |
| && (unsigned HOST_WIDE_INT) abs_hwi (dist) < vf) |
| return false; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); |
| dump_printf (MSG_NOTE, " is >= VF\n"); |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_prune_runtime_alias_test_list. |
| |
| Prune a list of ddrs to be tested at run-time by versioning for alias. |
| Merge several alias checks into one if possible. |
| Return FALSE if resulting list of ddrs is longer then allowed by |
| PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */ |
| |
| bool |
| vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) |
| { |
| typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash; |
| hash_set <tree_pair_hash> compared_objects; |
| |
| vec<ddr_p> may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); |
| vec<dr_with_seg_len_pair_t> &comp_alias_ddrs |
| = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo); |
| vec<vec_object_pair> &check_unequal_addrs |
| = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo); |
| int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo); |
| |
| ddr_p ddr; |
| unsigned int i; |
| tree length_factor; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_prune_runtime_alias_test_list ===\n"); |
| |
| if (may_alias_ddrs.is_empty ()) |
| return true; |
| |
| comp_alias_ddrs.create (may_alias_ddrs.length ()); |
| |
| unsigned int loop_depth |
| = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num, |
| LOOP_VINFO_LOOP_NEST (loop_vinfo)); |
| |
| /* First, we collect all data ref pairs for aliasing checks. */ |
| FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr) |
| { |
| int comp_res; |
| struct data_reference *dr_a, *dr_b; |
| gimple *dr_group_first_a, *dr_group_first_b; |
| tree segment_length_a, segment_length_b; |
| gimple *stmt_a, *stmt_b; |
| |
| /* Ignore the alias if the VF we chose ended up being no greater |
| than the dependence distance. */ |
| if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor)) |
| continue; |
| |
| if (DDR_OBJECT_A (ddr)) |
| { |
| vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr)); |
| if (!compared_objects.add (new_pair)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "checking that "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second); |
| dump_printf (MSG_NOTE, " have different addresses\n"); |
| } |
| LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair); |
| } |
| continue; |
| } |
| |
| dr_a = DDR_A (ddr); |
| stmt_a = DR_STMT (DDR_A (ddr)); |
| dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a)); |
| if (dr_group_first_a) |
| { |
| stmt_a = dr_group_first_a; |
| dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); |
| } |
| |
| dr_b = DDR_B (ddr); |
| stmt_b = DR_STMT (DDR_B (ddr)); |
| dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b)); |
| if (dr_group_first_b) |
| { |
| stmt_b = dr_group_first_b; |
| dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b)); |
| } |
| |
| if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)) |
| length_factor = scalar_loop_iters; |
| else |
| length_factor = size_int (vect_factor); |
| segment_length_a = vect_vfa_segment_size (dr_a, length_factor); |
| segment_length_b = vect_vfa_segment_size (dr_b, length_factor); |
| |
| comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a), |
| DR_BASE_ADDRESS (dr_b)); |
| if (comp_res == 0) |
| comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), |
| DR_OFFSET (dr_b)); |
| |
| /* Alias is known at compilation time. */ |
| if (comp_res == 0 |
| && TREE_CODE (DR_STEP (dr_a)) == INTEGER_CST |
| && TREE_CODE (DR_STEP (dr_b)) == INTEGER_CST |
| && TREE_CODE (segment_length_a) == INTEGER_CST |
| && TREE_CODE (segment_length_b) == INTEGER_CST) |
| { |
| if (vect_no_alias_p (dr_a, dr_b, segment_length_a, segment_length_b)) |
| continue; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "not vectorized: compilation time alias.\n"); |
| |
| return false; |
| } |
| |
| dr_with_seg_len_pair_t dr_with_seg_len_pair |
| (dr_with_seg_len (dr_a, segment_length_a), |
| dr_with_seg_len (dr_b, segment_length_b)); |
| |
| /* Canonicalize pairs by sorting the two DR members. */ |
| if (comp_res > 0) |
| std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second); |
| |
| comp_alias_ddrs.safe_push (dr_with_seg_len_pair); |
| } |
| |
| prune_runtime_alias_test_list (&comp_alias_ddrs, |
| (unsigned HOST_WIDE_INT) vect_factor); |
| |
| unsigned int count = (comp_alias_ddrs.length () |
| + check_unequal_addrs.length ()); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "improved number of alias checks from %d to %d\n", |
| may_alias_ddrs.length (), count); |
| if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "number of versioning for alias " |
| "run-time tests exceeds %d " |
| "(--param vect-max-version-for-alias-checks)\n", |
| PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Return true if a non-affine read or write in STMT is suitable for a |
| gather load or scatter store. Describe the operation in *INFO if so. */ |
| |
| bool |
| vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo, |
| gather_scatter_info *info) |
| { |
| HOST_WIDE_INT scale = 1, pbitpos, pbitsize; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
| tree offtype = NULL_TREE; |
| tree decl, |