blob: 9cd0f0141d0d6fa89c8dcd2399664ca36c65b8d7 [file] [log] [blame]
/* Data References Analysis and Manipulation Utilities for Vectorization.
Copyright (C) 2003-2015 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "dumpfile.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "tm_p.h"
#include "target.h"
#include "predict.h"
#include "hard-reg-set.h"
#include "function.h"
#include "dominance.h"
#include "cfg.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "tree-eh.h"
#include "gimple-expr.h"
#include "is-a.h"
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop.h"
#include "cfgloop.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "diagnostic-core.h"
#include "hash-map.h"
#include "plugin-api.h"
#include "ipa-ref.h"
#include "cgraph.h"
/* Need to include rtl.h, expr.h, etc. for optabs. */
#include "hashtab.h"
#include "rtl.h"
#include "flags.h"
#include "statistics.h"
#include "real.h"
#include "fixed-value.h"
#include "insn-config.h"
#include "expmed.h"
#include "dojump.h"
#include "explow.h"
#include "calls.h"
#include "emit-rtl.h"
#include "varasm.h"
#include "stmt.h"
#include "expr.h"
#include "insn-codes.h"
#include "optabs.h"
#include "builtins.h"
/* Return true if load- or store-lanes optab OPTAB is implemented for
COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
static bool
vect_lanes_optab_supported_p (const char *name, convert_optab optab,
tree vectype, unsigned HOST_WIDE_INT count)
{
machine_mode mode, array_mode;
bool limit_p;
mode = TYPE_MODE (vectype);
limit_p = !targetm.array_mode_supported_p (mode, count);
array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
MODE_INT, limit_p);
if (array_mode == BLKmode)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
GET_MODE_NAME (mode), count);
return false;
}
if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>\n", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
return true;
}
/* Return the smallest scalar part of STMT.
This is used to determine the vectype of the stmt. We generally set the
vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
promotion), vectype will be reset appropriately (later). Note that we have
to visit the smallest datatype in this function, because that determines the
VF. If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
'int_x = (int) short_inv', which we'd expect to have been optimized away by
invariant motion. However, we cannot rely on invariant motion to always
take invariants out of the loop, and so in the case of promotion we also
have to check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types. */
tree
vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
HOST_WIDE_INT *rhs_size_unit)
{
tree scalar_type = gimple_expr_type (stmt);
HOST_WIDE_INT lhs, rhs;
lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
if (is_gimple_assign (stmt)
&& (gimple_assign_cast_p (stmt)
|| gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
|| gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
|| gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
{
tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
if (rhs < lhs)
scalar_type = rhs_type;
}
*lhs_size_unit = lhs;
*rhs_size_unit = rhs;
return scalar_type;
}
/* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
tested at run-time. Return TRUE if DDR was successfully inserted.
Return false if versioning is not supported. */
static bool
vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return false;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"mark for run-time aliasing test between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
dump_printf (MSG_NOTE, "\n");
}
if (optimize_loop_nest_for_size_p (loop))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing"
" for size.\n");
return false;
}
/* FORNOW: We don't support versioning with outer-loop vectorization. */
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.\n");
return false;
}
/* FORNOW: We don't support creating runtime alias tests for non-constant
step. */
if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
|| TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for non-constant "
"step\n");
return false;
}
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
return true;
}
/* Function vect_analyze_data_ref_dependence.
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. Adjust *MAX_VF according to
the data dependence. */
static bool
vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo, int *max_vf)
{
unsigned int i;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
lambda_vector dist_v;
unsigned int loop_depth;
/* In loop analysis all data references should be vectorizable. */
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
|| !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
gcc_unreachable ();
/* Independent data accesses. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return false;
if (dra == drb
|| (DR_IS_READ (dra) && DR_IS_READ (drb)))
return false;
/* Even if we have an anti-dependence then, as the vectorized loop covers at
least two scalar iterations, there is always also a true dependence.
As the vectorizer does not re-order loads and stores we can ignore
the anti-dependence if TBAA can disambiguate both DRs similar to the
case with known negative distance anti-dependences (positive
distance anti-dependences would violate TBAA constraints). */
if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
|| (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
&& !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
get_alias_set (DR_REF (drb))))
return false;
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
/* If user asserted safelen consecutive iterations can be
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
if (loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
}
if (STMT_VINFO_GATHER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_P (stmtinfo_b))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
/* Known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
{
/* If user asserted safelen consecutive iterations can be
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
if (loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
}
if (STMT_VINFO_GATHER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_P (stmtinfo_b))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
"bad dist vector for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
"bad dist vector for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.\n", dist);
if (dist == 0)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* When we perform grouped accesses and perform implicit CSE
by detecting equal accesses and doing disambiguation with
runtime alias tests like for
.. = a[i];
.. = a[i+1];
a[i] = ..;
a[i+1] = ..;
*p = ..;
.. = a[i];
.. = a[i+1];
where we will end up loading { a[i], a[i+1] } once, make
sure that inserting group loads before the first load and
stores after the last store will do the right thing.
Similar for groups like
a[i] = ...;
... = a[i];
a[i+1] = ...;
where loads from the group interleave with the store. */
if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
|| STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
{
gimple earlier_stmt;
earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
if (DR_IS_WRITE
(STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving."
"\n");
return true;
}
}
continue;
}
if (dist > 0 && DDR_REVERSED_P (ddr))
{
/* If DDR_REVERSED_P the order of the data-refs in DDR was
reversed (to make distance vector positive), and the actual
distance is negative. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.\n");
/* Record a negative dependence distance to later limit the
amount of stmt copying / unrolling we can perform.
Only need to handle read-after-write dependence. */
if (DR_IS_READ (drb)
&& (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
|| STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
continue;
}
if (abs (dist) >= 2
&& abs (dist) < *max_vf)
{
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf = abs (dist);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i\n",
*max_vf);
}
if (abs (dist) >= *max_vf)
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.\n");
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
"between data-refs ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
return true;
}
return false;
}
/* Function vect_analyze_data_ref_dependences.
Examine all the data references in the loop, and make sure there do not
exist any data dependences between them. Set *MAX_VF according to
the maximum vectorization factor the data dependences allow. */
bool
vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
{
unsigned int i;
struct data_dependence_relation *ddr;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_dependences ===\n");
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
&LOOP_VINFO_DDRS (loop_vinfo),
LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
return false;
FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
return false;
return true;
}
/* Function vect_slp_analyze_data_ref_dependence.
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. Adjust *MAX_VF according to
the data dependence. */
static bool
vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
/* We need to check dependences of statements marked as unvectorizable
as well, they still can prohibit vectorization. */
/* Independent data accesses. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return false;
if (dra == drb)
return false;
/* Read-read is OK. */
if (DR_IS_READ (dra) && DR_IS_READ (drb))
return false;
/* If dra and drb are part of the same interleaving chain consider
them independent. */
if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
&& (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
== GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
return false;
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
}
else if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* We do not vectorize basic blocks with write-write dependencies. */
if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
return true;
/* If we have a read-write dependence check that the load is before the store.
When we vectorize basic blocks, vector load can be only before
corresponding scalar load, and vector store can be only after its
corresponding scalar store. So the order of the acceses is preserved in
case the load is before the store. */
gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
{
/* That only holds for load-store pairs taking part in vectorization. */
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
&& STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
return false;
}
return true;
}
/* Function vect_analyze_data_ref_dependences.
Examine all the data references in the basic-block, and make sure there
do not exist any data dependences between them. Set *MAX_VF according to
the maximum vectorization factor the data dependences allow. */
bool
vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
{
struct data_dependence_relation *ddr;
unsigned int i;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_slp_analyze_data_ref_dependences ===\n");
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
&BB_VINFO_DDRS (bb_vinfo),
vNULL, true))
return false;
FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
if (vect_slp_analyze_data_ref_dependence (ddr))
return false;
return true;
}
/* Function vect_compute_data_ref_alignment
Compute the misalignment of the data reference DR.
Output:
1. If during the misalignment computation it is found that the data reference
cannot be vectorized then false is returned.
2. DR_MISALIGNMENT (DR) is defined.
FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */
static bool
vect_compute_data_ref_alignment (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
tree ref = DR_REF (dr);
tree vectype;
tree base, base_addr;
tree misalign;
tree aligned_to;
unsigned HOST_WIDE_INT alignment;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:\n");
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Initialize misalignment to unknown. */
SET_DR_MISALIGNMENT (dr, -1);
/* Strided loads perform only component accesses, misalignment information
is irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
return true;
misalign = DR_INIT (dr);
aligned_to = DR_ALIGNED_TO (dr);
base_addr = DR_BASE_ADDRESS (dr);
vectype = STMT_VINFO_VECTYPE (stmt_info);
/* In case the dataref is in an inner-loop of the loop that is being
vectorized (LOOP), we use the base and misalignment information
relative to the outer-loop (LOOP). This is ok only if the misalignment
stays the same throughout the execution of the inner-loop, which is why
we have to check that the stride of the dataref in the inner-loop evenly
divides by the vector size. */
if (loop && nested_in_vect_loop_p (loop, stmt))
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner step divides the vector-size.\n");
misalign = STMT_VINFO_DR_INIT (stmt_info);
aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
/* Similarly, if we're doing basic-block vectorization, we can only use
base and misalignment information relative to an innermost loop if the
misalignment stays the same throughout the execution of the loop.
As above, this is the case if the stride of the dataref evenly divides
by the vector size. */
if (!loop)
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
/* To look at alignment of the base we have to preserve an inner MEM_REF
as that carries alignment information of the actual access. */
base = ref;
while (handled_component_p (base))
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) == MEM_REF)
base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
unsigned int base_alignment = get_object_alignment (base);
if (base_alignment >= TYPE_ALIGN (TREE_TYPE (vectype)))
DR_VECT_AUX (dr)->base_element_aligned = true;
alignment = TYPE_ALIGN_UNIT (vectype);
if ((compare_tree_int (aligned_to, alignment) < 0)
|| !misalign)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (base_alignment < TYPE_ALIGN (vectype))
{
/* Strip an inner MEM_REF to a bare decl if possible. */
if (TREE_CODE (base) == MEM_REF
&& integer_zerop (TREE_OPERAND (base, 1))
&& TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
return true;
}
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
DR_VECT_AUX (dr)->base_decl = base;
DR_VECT_AUX (dr)->base_misaligned = true;
DR_VECT_AUX (dr)->base_element_aligned = true;
}
/* If this is a backward running DR then first access in the larger
vectype actually is N-1 elements before the address in the DR.
Adjust misalign accordingly. */
if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
{
tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
/* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
otherwise we wouldn't be here. */
offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
/* PLUS because DR_STEP was negative. */
misalign = size_binop (PLUS_EXPR, misalign, offset);
}
SET_DR_MISALIGNMENT (dr,
wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
/* Function vect_compute_data_refs_alignment
Compute the misalignment of data references in the loop.
Return FALSE if a data reference is found that cannot be vectorized. */
static bool
vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
vec<data_reference_p> datarefs;
struct data_reference *dr;
unsigned int i;
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_compute_data_ref_alignment (dr))
{
if (bb_vinfo)
{
/* Mark unsupported statement as unvectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
continue;
}
else
return false;
}
return true;
}
/* Function vect_update_misalignment_for_peel
DR - the data reference whose misalignment is to be adjusted.
DR_PEEL - the data reference whose misalignment is being made
zero in the vector loop by the peel.
NPEEL - the number of iterations in the peel loop if the misalignment
of DR_PEEL is known at compile time. */
static void
vect_update_misalignment_for_peel (struct data_reference *dr,
struct data_reference *dr_peel, int npeel)
{
unsigned int i;
vec<dr_p> same_align_drs;
struct data_reference *current_dr;
int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
dr_peel_size *= GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
{
if (current_dr != dr)
continue;
gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
DR_MISALIGNMENT (dr_peel) / dr_peel_size);
SET_DR_MISALIGNMENT (dr, 0);
return;
}
if (known_alignment_for_access_p (dr)
&& known_alignment_for_access_p (dr_peel))
{
bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
int misal = DR_MISALIGNMENT (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
misal += negative ? -npeel * dr_size : npeel * dr_size;
misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
SET_DR_MISALIGNMENT (dr, misal);
return;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
SET_DR_MISALIGNMENT (dr, -1);
}
/* Function vect_verify_datarefs_alignment
Return TRUE if all data references in the loop can be
handled with respect to alignment. */
bool
vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
vec<data_reference_p> datarefs;
struct data_reference *dr;
enum dr_alignment_support supportable_dr_alignment;
unsigned int i;
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned load.");
else
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned "
"store.");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.\n");
}
return true;
}
/* Given an memory reference EXP return whether its alignment is less
than its size. */
static bool
not_size_aligned (tree exp)
{
if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
return true;
return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
> get_object_alignment (exp));
}
/* Function vector_alignment_reachable_p
Return true if vector alignment for DR is reachable by peeling
a few loop iterations. Return false otherwise. */
static bool
vector_alignment_reachable_p (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* For interleaved access we peel only if number of iterations in
the prolog loop ({VF - misalignment}), is a multiple of the
number of the interleaved accesses. */
int elem_size, mis_in_elements;
int nelements = TYPE_VECTOR_SUBPARTS (vectype);
/* FORNOW: handle only known alignment. */
if (!known_alignment_for_access_p (dr))
return false;
elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
return false;
}
/* If misalignment is known at the compile time then allow peeling
only if natural alignment is reachable through peeling. */
if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
{
HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
dump_printf (MSG_NOTE,
". misalignment = %d.\n", DR_MISALIGNMENT (dr));
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
}
}
if (!known_alignment_for_access_p (dr))
{
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d\n",is_packed);
if ((TYPE_USER_ALIGN (type) && !is_packed)
|| targetm.vectorize.vector_alignment_reachable (type, is_packed))
return true;
else
return false;
}
return true;
}
/* Calculate the cost of the memory access represented by DR. */
static void
vect_get_data_access_cost (struct data_reference *dr,
unsigned int *inside_cost,
unsigned int *outside_cost,
stmt_vector_for_cost *body_cost_vec)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
int ncopies = vf / nunits;
if (DR_IS_READ (dr))
vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
NULL, body_cost_vec, false);
else
vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.\n", *inside_cost, *outside_cost);
}
/* Insert DR into peeling hash table with NPEEL as key. */
static void
vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
int npeel)
{
struct _vect_peel_info elem, *slot;
_vect_peel_info **new_slot;
bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
elem.npeel = npeel;
slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
if (slot)
slot->count++;
else
{
slot = XNEW (struct _vect_peel_info);
slot->npeel = npeel;
slot->dr = dr;
slot->count = 1;
new_slot
= LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
*new_slot = slot;
}
if (!supportable_dr_alignment
&& unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
slot->count += VECT_MAX_COST;
}
/* Traverse peeling hash table to find peeling option that aligns maximum
number of data accesses. */
int
vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
_vect_peel_extended_info *max)
{
vect_peel_info elem = *slot;
if (elem->count > max->peel_info.count
|| (elem->count == max->peel_info.count
&& max->peel_info.npeel > elem->npeel))
{
max->peel_info.npeel = elem->npeel;
max->peel_info.count = elem->count;
max->peel_info.dr = elem->dr;
}
return 1;
}
/* Traverse peeling hash table and calculate cost for each peeling option.
Find the one with the lowest cost. */
int
vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
_vect_peel_extended_info *min)
{
vect_peel_info elem = *slot;
int save_misalignment, dummy;
unsigned int inside_cost = 0, outside_cost = 0, i;
gimple stmt = DR_STMT (elem->dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
prologue_cost_vec.create (2);
body_cost_vec.create (2);
epilogue_cost_vec.create (2);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
save_misalignment = DR_MISALIGNMENT (dr);
vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
&body_cost_vec);
SET_DR_MISALIGNMENT (dr, save_misalignment);
}
auto_vec<stmt_info_for_cost> scalar_cost_vec;
vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec);
outside_cost += vect_get_known_peeling_cost
(loop_vinfo, elem->npeel, &dummy,
&scalar_cost_vec, &prologue_cost_vec, &epilogue_cost_vec);
/* Prologue and epilogue costs are added to the target model later.
These costs depend only on the scalar iteration cost, the
number of peeling iterations finally chosen, and the number of
misaligned statements. So discard the information found here. */
prologue_cost_vec.release ();
epilogue_cost_vec.release ();
if (inside_cost < min->inside_cost
|| (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
{
min->inside_cost = inside_cost;
min->outside_cost = outside_cost;
min->body_cost_vec.release ();
min->body_cost_vec = body_cost_vec;
min->peel_info.dr = elem->dr;
min->peel_info.npeel = elem->npeel;
}
else
body_cost_vec.release ();
return 1;
}
/* Choose best peeling option by traversing peeling hash table and either
choosing an option with the lowest cost (if cost model is enabled) or the
option that aligns as many accesses as possible. */
static struct data_reference *
vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
unsigned int *npeel,
stmt_vector_for_cost *body_cost_vec)
{
struct _vect_peel_extended_info res;
res.peel_info.dr = NULL;
res.body_cost_vec = stmt_vector_for_cost ();
if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
res.inside_cost = INT_MAX;
res.outside_cost = INT_MAX;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
->traverse <_vect_peel_extended_info *,
vect_peeling_hash_get_lowest_cost> (&res);
}
else
{
res.peel_info.count = 0;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
->traverse <_vect_peel_extended_info *,
vect_peeling_hash_get_most_frequent> (&res);
}
*npeel = res.peel_info.npeel;
*body_cost_vec = res.body_cost_vec;
return res.peel_info.dr;
}
/* Function vect_enhance_data_refs_alignment
This pass will use loop versioning and loop peeling in order to enhance
the alignment of data references in the loop.
FOR NOW: we assume that whatever versioning/peeling takes place, only the
original loop is to be vectorized. Any other loops that are created by
the transformations performed in this pass - are not supposed to be
vectorized. This restriction will be relaxed.
This pass will require a cost model to guide it whether to apply peeling
or versioning or a combination of the two. For example, the scheme that
intel uses when given a loop with several memory accesses, is as follows:
choose one memory access ('p') which alignment you want to force by doing
peeling. Then, either (1) generate a loop in which 'p' is aligned and all
other accesses are not necessarily aligned, or (2) use loop versioning to
generate one loop in which all accesses are aligned, and another loop in
which only 'p' is necessarily aligned.
("Automatic Intra-Register Vectorization for the Intel Architecture",
Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
Devising a cost model is the most critical aspect of this work. It will
guide us on which access to peel for, whether to use loop versioning, how
many versions to create, etc. The cost model will probably consist of
generic considerations as well as target specific considerations (on
powerpc for example, misaligned stores are more painful than misaligned
loads).
Here are the general steps involved in alignment enhancements:
-- original loop, before alignment analysis:
for (i=0; i<N; i++){
x = q[i]; # DR_MISALIGNMENT(q) = unknown
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- After vect_compute_data_refs_alignment:
for (i=0; i<N; i++){
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- Possibility 1: we do loop versioning:
if (p is aligned) {
for (i=0; i<N; i++){ # loop 1A
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = 0
}
}
else {
for (i=0; i<N; i++){ # loop 1B
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = unaligned
}
}
-- Possibility 2: we do loop peeling:
for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
x = q[i];
p[i] = y;
}
for (i = 3; i < N; i++){ # loop 2A
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- Possibility 3: combination of loop peeling and versioning:
for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
x = q[i];
p[i] = y;
}
if (p is aligned) {
for (i = 3; i<N; i++){ # loop 3A
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = 0
}
}
else {
for (i = 3; i<N; i++){ # loop 3B
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = unaligned
}
}
These loops are later passed to loop_transform to be vectorized. The
vectorizer will use the alignment information to guide the transformation
(whether to generate regular loads/stores, or with special handling for
misalignment). */
bool
vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum dr_alignment_support supportable_dr_alignment;
struct data_reference *dr0 = NULL, *first_store = NULL;
struct data_reference *dr;
unsigned int i, j;
bool do_peeling = false;
bool do_versioning = false;
bool stat;
gimple stmt;
stmt_vec_info stmt_info;
unsigned int npeel = 0;
bool all_misalignments_unknown = true;
unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned possible_npeel_number = 1;
tree vectype;
unsigned int nelements, mis, same_align_drs_max = 0;
stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_enhance_data_refs_alignment ===\n");
/* While cost model enhancements are expected in the future, the high level
view of the code at this time is as follows:
A) If there is a misaligned access then see if peeling to align
this access can make all data references satisfy
vect_supportable_dr_alignment. If so, update data structures
as needed and return true.
B) If peeling wasn't possible and there is a data reference with an
unknown misalignment that does not satisfy vect_supportable_dr_alignment
then see if loop versioning checks can be used to make all data
references satisfy vect_supportable_dr_alignment. If so, update
data structures as needed and return true.
C) If neither peeling nor versioning were successful then return false if
any data reference does not satisfy vect_supportable_dr_alignment.
D) Return true (all data references satisfy vect_supportable_dr_alignment).
Note, Possibility 3 above (which is peeling and versioning together) is not
being done at this time. */
/* (1) Peeling to force alignment. */
/* (1.1) Decide whether to perform peeling, and how many iterations to peel:
Considerations:
+ How many accesses will become aligned due to the peeling
- How many accesses will become unaligned due to the peeling,
and the cost of misaligned accesses.
- The cost of peeling (the extra runtime checks, the increase
in code size). */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* For invariant accesses there is nothing to enhance. */
if (integer_zerop (DR_STEP (dr)))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
do_peeling = vector_alignment_reachable_p (dr);
if (do_peeling)
{
if (known_alignment_for_access_p (dr))
{
unsigned int npeel_tmp;
bool negative = tree_int_cst_compare (DR_STEP (dr),
size_zero_node) < 0;
/* Save info about DR in the hash table. */
if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
= new hash_table<peel_info_hasher> (1);
vectype = STMT_VINFO_VECTYPE (stmt_info);
nelements = TYPE_VECTOR_SUBPARTS (vectype);
mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
TREE_TYPE (DR_REF (dr))));
npeel_tmp = (negative
? (mis - nelements) : (nelements - mis))
& (nelements - 1);
/* For multiple types, it is possible that the bigger type access
will have more than one peeling option. E.g., a loop with two
types: one of size (vector size / 4), and the other one of
size (vector size / 8). Vectorization factor will 8. If both
access are misaligned by 3, the first one needs one scalar
iteration to be aligned, and the second one needs 5. But the
the first one will be aligned also by peeling 5 scalar
iterations, and in that case both accesses will be aligned.
Hence, except for the immediate peeling amount, we also want
to try to add full vector size, while we don't exceed
vectorization factor.
We do this automtically for cost model, since we calculate cost
for every peeling option. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
possible_npeel_number = vf /nelements;
/* Handle the aligned case. We may decide to align some other
access, making DR unaligned. */
if (DR_MISALIGNMENT (dr) == 0)
{
npeel_tmp = 0;
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
possible_npeel_number++;
}
for (j = 0; j < possible_npeel_number; j++)
{
gcc_assert (npeel_tmp <= vf);
vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
npeel_tmp += nelements;
}
all_misalignments_unknown = false;
/* Data-ref that was chosen for the case that all the
misalignments are unknown is not relevant anymore, since we
have a data-ref with known alignment. */
dr0 = NULL;
}
else
{
/* If we don't know any misalignment values, we prefer
peeling for data-ref that has the maximum number of data-refs
with the same alignment, unless the target prefers to align
stores over load. */
if (all_misalignments_unknown)
{
unsigned same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
if (!dr0
|| same_align_drs_max < same_align_drs)
{
same_align_drs_max = same_align_drs;
dr0 = dr;
}
/* For data-refs with the same number of related
accesses prefer the one where the misalign
computation will be invariant in the outermost loop. */
else if (same_align_drs_max == same_align_drs)
{
struct loop *ivloop0, *ivloop;
ivloop0 = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr0));
ivloop = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr));
if ((ivloop && !ivloop0)
|| (ivloop && ivloop0
&& flow_loop_nested_p (ivloop, ivloop0)))
dr0 = dr;
}
if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
/* If there are both known and unknown misaligned accesses in the
loop, we choose peeling amount according to the known
accesses. */
if (!supportable_dr_alignment)
{
dr0 = dr;
if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
}
}
else
{
if (!aligned_access_p (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable\n");
break;
}
}
}
/* Check if we can possibly peel the loop. */
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
do_peeling = false;
/* If we don't know how many times the peeling loop will run
assume it will run VF-1 times and disable peeling if the remaining
iters are less than the vectorization factor. */
if (do_peeling
&& all_misalignments_unknown
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& (LOOP_VINFO_INT_NITERS (loop_vinfo)
< 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
do_peeling = false;
if (do_peeling
&& all_misalignments_unknown
&& vect_supportable_dr_alignment (dr0, false))
{
/* Check if the target requires to prefer stores over loads, i.e., if
misaligned stores are more expensive than misaligned loads (taking
drs with same alignment into account). */
if (first_store && DR_IS_READ (dr0))
{
unsigned int load_inside_cost = 0, load_outside_cost = 0;
unsigned int store_inside_cost = 0, store_outside_cost = 0;
unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
stmt_vector_for_cost dummy;
dummy.create (2);
vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
&dummy);
vect_get_data_access_cost (first_store, &store_inside_cost,
&store_outside_cost, &dummy);
dummy.release ();
/* Calculate the penalty for leaving FIRST_STORE unaligned (by
aligning the load DR0). */
load_inside_penalty = store_inside_cost;
load_outside_penalty = store_outside_cost;
for (i = 0;
STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
DR_STMT (first_store))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
load_inside_penalty += load_inside_cost;
load_outside_penalty += load_outside_cost;
}
else
{
load_inside_penalty += store_inside_cost;
load_outside_penalty += store_outside_cost;
}
/* Calculate the penalty for leaving DR0 unaligned (by
aligning the FIRST_STORE). */
store_inside_penalty = load_inside_cost;
store_outside_penalty = load_outside_cost;
for (i = 0;
STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
DR_STMT (dr0))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
store_inside_penalty += load_inside_cost;
store_outside_penalty += load_outside_cost;
}
else
{
store_inside_penalty += store_inside_cost;
store_outside_penalty += store_outside_cost;
}
if (load_inside_penalty > store_inside_penalty
|| (load_inside_penalty == store_inside_penalty
&& load_outside_penalty > store_outside_penalty))
dr0 = first_store;
}
/* In case there are only loads with different unknown misalignments, use
peeling only if it may help to align other accesses in the loop. */
if (!first_store
&& !STMT_VINFO_SAME_ALIGN_REFS (
vinfo_for_stmt (DR_STMT (dr0))).length ()
&& vect_supportable_dr_alignment (dr0, false)
!= dr_unaligned_supported)
do_peeling = false;
}
if (do_peeling && !dr0)
{
/* Peeling is possible, but there is no data access that is not supported
unless aligned. So we try to choose the best possible peeling. */
/* We should get here only if there are drs with known misalignment. */
gcc_assert (!all_misalignments_unknown);
/* Choose the best peeling from the hash table. */
dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
&body_cost_vec);
if (!dr0 || !npeel)
do_peeling = false;
/* If peeling by npeel will result in a remaining loop not iterating
enough to be vectorized then do not peel. */
if (do_peeling
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& (LOOP_VINFO_INT_NITERS (loop_vinfo)
< LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
do_peeling = false;
}
if (do_peeling)
{
stmt = DR_STMT (dr0);
stmt_info = vinfo_for_stmt (stmt);
vectype = STMT_VINFO_VECTYPE (stmt_info);
nelements = TYPE_VECTOR_SUBPARTS (vectype);
if (known_alignment_for_access_p (dr0))
{
bool negative = tree_int_cst_compare (DR_STEP (dr0),
size_zero_node) < 0;
if (!npeel)
{
/* Since it's known at compile time, compute the number of
iterations in the peeled loop (the peeling factor) for use in
updating DR_MISALIGNMENT values. The peeling factor is the
vectorization factor minus the misalignment as an element
count. */
mis = DR_MISALIGNMENT (dr0);
mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
npeel = ((negative ? mis - nelements : nelements - mis)
& (nelements - 1));
}
/* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d\n", npeel);
}
/* Ensure that all data refs can be vectorized after the peel. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
int save_misalignment;
if (dr == dr0)
continue;
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
save_misalignment = DR_MISALIGNMENT (dr);
vect_update_misalignment_for_peel (dr, dr0, npeel);
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
SET_DR_MISALIGNMENT (dr, save_misalignment);
if (!supportable_dr_alignment)
{
do_peeling = false;
break;
}
}
if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
{
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
if (!stat)
do_peeling = false;
else
{
body_cost_vec.release ();
return stat;
}
}
if (do_peeling)
{
unsigned max_allowed_peel
= PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
if (max_allowed_peel != (unsigned)-1)
{
unsigned max_peel = npeel;
if (max_peel == 0)
{
gimple dr_stmt = DR_STMT (dr0);
stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
tree vtype = STMT_VINFO_VECTYPE (vinfo);
max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
}
if (max_peel > max_allowed_peel)
{
do_peeling = false;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Disable peeling, max peels reached: %d\n", max_peel);
}
}
}
if (do_peeling)
{
/* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
If the misalignment of DR_i is identical to that of dr0 then set
DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
by the peeling factor times the element size of DR_i (MOD the
vectorization factor times the size). Otherwise, the
misalignment of DR_i must be set to unknown. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
if (dr != dr0)
vect_update_misalignment_for_peel (dr, dr0, npeel);
LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
if (npeel)
LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
else
LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
= DR_MISALIGNMENT (dr0);
SET_DR_MISALIGNMENT (dr0, 0);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.\n");
dump_printf_loc (MSG_NOTE, vect_location,
"Peeling for alignment will be applied.\n");
}
/* The inside-loop cost will be accounted for in vectorizable_load
and vectorizable_store correctly with adjusted alignments.
Drop the body_cst_vec on the floor here. */
body_cost_vec.release ();
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
gcc_assert (stat);
return stat;
}
}
body_cost_vec.release ();
/* (2) Versioning to force alignment. */
/* Try versioning if:
1) optimize loop for speed
2) there is at least one unsupported misaligned data ref with an unknown
misalignment, and
3) all misaligned data refs with a known misalignment are supported, and
4) the number of runtime alignment checks is within reason. */
do_versioning =
optimize_loop_nest_for_speed_p (loop)
&& (!loop->inner); /* FORNOW */
if (do_versioning)
{
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (aligned_access_p (dr)
|| (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
gimple stmt;
int mask;
tree vectype;
if (known_alignment_for_access_p (dr)
|| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
>= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
{
do_versioning = false;
break;
}
stmt = DR_STMT (dr);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
/* The rightmost bits of an aligned address must be zeros.
Construct the mask needed for this test. For example,
GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
mask must be 15 = 0xf. */
mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
/* FORNOW: use the same mask to test all potentially unaligned
references in the loop. The vectorizer currently supports
a single vector size, see the reference to
GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
vectorization factor is computed. */
gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
|| LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
DR_STMT (dr));
}
}
/* Versioning requires at least one misaligned data reference. */
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false;
else if (!do_versioning)
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
}
if (do_versioning)
{
vec<gimple> may_misalign_stmts
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
gimple stmt;
/* It can now be assumed that the data references in the statements
in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
of the loop being vectorized. */
FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.\n");
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.\n");
/* Peeling and versioning can't be done together at this time. */
gcc_assert (! (do_peeling && do_versioning));
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
gcc_assert (stat);
return stat;
}
/* This point is reached if neither peeling nor versioning is being done. */
gcc_assert (! (do_peeling || do_versioning));
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
return stat;
}
/* Function vect_find_same_alignment_drs.
Update group and alignment relations according to the chosen
vectorization factor. */
static void
vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo)
{
unsigned int i;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
lambda_vector dist_v;
unsigned int loop_depth;
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return;
if (dra == drb)
return;
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return;
/* Loop-based vectorization and known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
return;
/* Data-dependence analysis reports a distance vector of zero
for data-references that overlap only in the first iteration
but have different sign step (see PR45764).
So as a sanity check require equal DR_STEP. */
if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
return;
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.\n", dist);
/* Same loop iteration. */
if (dist == 0
|| (dist % vectorization_factor == 0 && dra_size == drb_size))
{
/* Two references with distance zero have the same alignment. */
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.\n");
dump_printf (MSG_NOTE,
"dependence distance modulo vf == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
}
}
}
/* Function vect_analyze_data_refs_alignment
Analyze the alignment of the data-references in the loop.
Return FALSE if a data reference is found that cannot be vectorized. */
bool
vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs_alignment ===\n");
/* Mark groups of data references with same alignment using
data dependence information. */
if (loop_vinfo)
{
vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
struct data_dependence_relation *ddr;
unsigned int i;
FOR_EACH_VEC_ELT (ddrs, i, ddr)
vect_find_same_alignment_drs (ddr, loop_vinfo);
}
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.\n");
return false;
}
return true;
}
/* Analyze groups of accesses: check that DR belongs to a group of
accesses of legal size, step, etc. Detect gaps, single element
interleaving, and other special cases. Set grouped access info.
Collect groups of strided stores for further use in SLP analysis. */
static bool
vect_analyze_group_access (struct data_reference *dr)
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
HOST_WIDE_INT groupsize, last_accessed_element = 1;
bool slp_impossible = false;
struct loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
/* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
size of the interleaving group (including gaps). */
groupsize = absu_hwi (dr_step) / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
/* Gaps are supported only for loads. STEP must be a multiple of the type
size. The size of the group must be a power of 2. */
if (DR_IS_READ (dr)
&& (dr_step % type_size) == 0
&& groupsize > 0
&& exact_log2 (groupsize) != -1)
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
dump_printf (MSG_NOTE, " step ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
dump_printf (MSG_NOTE, "\n");
}
if (loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not"
" supported\n");
return false;
}
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
{
/* Mark the statement as unvectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
return true;
}
return false;
}
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{
/* First stmt in the interleaving chain. Check the chain. */
gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
gimple prev = stmt;
HOST_WIDE_INT diff, gaps = 0;
unsigned HOST_WIDE_INT count_in_bytes;
while (next)
{
/* Skip same data-refs. In case that two or more stmts share
data-ref (supported only for loads), we vectorize only the first
stmt, and the rest get their vectorized loads from the first
one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))
{
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.\n");
return false;
}
/* For load use the same data-ref load. */
GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
prev = next;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
continue;
}
prev = next;
data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
/* All group members have the same STEP by construction. */
gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
/* Check that the distance between two accesses is equal to the type
size. Otherwise, we have gaps. */
diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
- TREE_INT_CST_LOW (prev_init)) / type_size;
if (diff != 1)
{
/* FORNOW: SLP of accesses with gaps is not supported. */
slp_impossible = true;
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
gaps += diff - 1;
}
last_accessed_element += diff;
/* Store the gap from the previous member of the group. If there is no
gap in the access, GROUP_GAP is always 1. */
GROUP_GAP (vinfo_for_stmt (next)) = diff;
prev_init = DR_INIT (data_ref);
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
/* Count the number of data-refs in the chain. */
count++;
}
/* COUNT is the number of accesses found, we multiply it by the size of
the type to get COUNT_IN_BYTES. */
count_in_bytes = type_size * count;
/* Check that the size of the interleaving (including gaps) is not
greater than STEP. */
if (dr_step != 0
&& absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaving size is greater than step for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
/* Check that the size of the interleaving is equal to STEP for stores,
i.e., that there are no gaps. */
if (dr_step != 0
&& absu_hwi (dr_step) != count_in_bytes)
{
if (DR_IS_READ (dr))
{
slp_impossible = true;
/* There is a gap after the last load in the group. This gap is a
difference between the groupsize and the number of elements.
When there is no gap, this difference should be 0. */
GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
}
/* Check that STEP is a multiple of type size. */
if (dr_step != 0
&& (dr_step % type_size) != 0)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step is not a multiple of type size: step ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
TYPE_SIZE_UNIT (scalar_type));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (groupsize == 0)
groupsize = count;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d\n", (int)groupsize);
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (DR_IS_WRITE (dr) && !slp_impossible)
{
if (loop_vinfo)
LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
if (bb_vinfo)
BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
}
/* There is a gap in the end of the group. */
if (groupsize - last_accessed_element > 0 && loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported\n");
return false;
}
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
}
}
return true;
}
/* Analyze the access pattern of the data-reference DR.
In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of accesses. */
static bool
vect_analyze_data_ref_access (struct data_reference *dr)
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
if (loop_vinfo && !step)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop\n");
return false;
}
/* Allow invariant loads in not nested loops. */
if (loop_vinfo && integer_zerop (step))
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
if (nested_in_vect_loop_p (loop, stmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in inner loop of nest\n");
return false;
}
return DR_IS_READ (dr);
}
if (loop && nested_in_vect_loop_p (loop, stmt))
{
/* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
if (integer_zerop (step))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.\n");
if (DR_IS_READ (dr))
return true;
else
return false;
}
}
/* Consecutive? */
if (TREE_CODE (step) == INTEGER_CST)
{
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
|| (dr_step < 0
&& !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{
/* Mark that it is not interleaving. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
return true;
}
}
if (loop && nested_in_vect_loop_p (loop, stmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.\n");
return false;
}
/* Assume this is a DR handled by non-constant strided load case. */
if (TREE_CODE (step) != INTEGER_CST)
return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
/* Not consecutive access - check if it's a part of interleaving group. */
return vect_analyze_group_access (dr);
}
/* A helper function used in the comparator function to sort data
references. T1 and T2 are two data references to be compared.
The function returns -1, 0, or 1. */
static int
compare_tree (tree t1, tree t2)
{
int i, cmp;
enum tree_code code;
char tclass;
if (t1 == t2)
return 0;
if (t1 == NULL)
return -1;
if (t2 == NULL)
return 1;
STRIP_NOPS (t1);
STRIP_NOPS (t2);
if (TREE_CODE (t1) != TREE_CODE (t2))
return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
code = TREE_CODE (t1);
switch (code)
{
/* For const values, we can just use hash values for comparisons. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
{
hashval_t h1 = iterative_hash_expr (t1, 0);
hashval_t h2 = iterative_hash_expr (t2, 0);
if (h1 != h2)
return h1 < h2 ? -1 : 1;
break;
}
case SSA_NAME:
cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
if (cmp != 0)
return cmp;
if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
break;
default:
tclass = TREE_CODE_CLASS (code);
/* For var-decl, we could compare their UIDs. */
if (tclass == tcc_declaration)
{
if (DECL_UID (t1) != DECL_UID (t2))
return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
break;
}
/* For expressions with operands, compare their operands recursively. */
for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
{
cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
if (cmp != 0)
return cmp;
}
}
return 0;
}
/* Compare two data-references DRA and DRB to group them into chunks
suitable for grouping. */
static int
dr_group_sort_cmp (const void *dra_, const void *drb_)
{
data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
int cmp;
/* Stabilize sort. */
if (dra == drb)
return 0;
/* Ordering of DRs according to base. */
if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
{
cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
if (cmp != 0)
return cmp;
}
/* And according to DR_OFFSET. */
if (!dr_equal_offsets_p (dra, drb))
{
cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
if (cmp != 0)
return cmp;
}
/* Put reads before writes. */
if (DR_IS_READ (dra) != DR_IS_READ (drb))
return DR_IS_READ (dra) ? -1 : 1;
/* Then sort after access size. */
if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
{
cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
if (cmp != 0)
return cmp;
}
/* And after step. */
if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
{
cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
if (cmp != 0)
return cmp;
}
/* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
if (cmp == 0)
return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
return cmp;
}
/* Function vect_analyze_data_ref_accesses.
Analyze the access pattern of all the data references in the loop.
FORNOW: the only access pattern that is considered vectorizable is a
simple step 1 (consecutive) access.
FORNOW: handle only arrays and pointer accesses. */
bool
vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
unsigned int i;
vec<data_reference_p> datarefs;
struct data_reference *dr;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_accesses ===\n");
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
if (datarefs.is_empty ())
return true;
/* Sort the array of datarefs to make building the interleaving chains
linear. Don't modify the original vector's order, it is needed for
determining what dependencies are reversed. */
vec<data_reference_p> datarefs_copy = datarefs.copy ();
datarefs_copy.qsort (dr_group_sort_cmp);
/* Build the interleaving chains. */
for (i = 0; i < datarefs_copy.length () - 1;)
{
data_reference_p dra = datarefs_copy[i];
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info lastinfo = NULL;
for (i = i + 1; i < datarefs_copy.length (); ++i)
{
data_reference_p drb = datarefs_copy[i];
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
/* ??? Imperfect sorting (non-compatible types, non-modulo
accesses, same accesses) can lead to a group to be artificially
split here as we don't just skip over those. If it really
matters we can push those to a worklist and re-iterate
over them. The we can just skip ahead to the next DR here. */
/* Check that the data-refs have same first location (except init)
and they are both either store or load (not load and store,
not masked loads or stores). */
if (DR_IS_READ (dra) != DR_IS_READ (drb)
|| !operand_equal_p (DR_BASE_ADDRESS (dra),
DR_BASE_ADDRESS (drb), 0)
|| !dr_equal_offsets_p (dra, drb)
|| !gimple_assign_single_p (DR_STMT (dra))
|| !gimple_assign_single_p (DR_STMT (drb)))
break;
/* Check that the data-refs have the same constant size and step. */
tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
if (!tree_fits_uhwi_p (sza)
|| !tree_fits_uhwi_p (szb)
|| !tree_int_cst_equal (sza, szb)
|| !tree_fits_shwi_p (DR_STEP (dra))
|| !tree_fits_shwi_p (DR_STEP (drb))
|| !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
break;
/* Do not place the same access in the interleaving chain twice. */
if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
break;
/* Check the types are compatible.
??? We don't distinguish this during sorting. */
if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
TREE_TYPE (DR_REF (drb))))
break;
/* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
gcc_assert (init_a <= init_b);
/* If init_b == init_a + the size of the type * k, we have an
interleaving, and DRA is accessed before DRB. */
HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
if ((init_b - init_a) % type_size_a != 0)
break;
/* The step (if not zero) is greater than the difference between
data-refs' inits. This splits groups into suitable sizes. */
HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
if (step != 0 && step <= (init_b - init_a))
break;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* Link the found element into the group list. */
if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
{
GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
lastinfo = stmtinfo_a;
}
GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
lastinfo = stmtinfo_b;
}
}
FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.\n");
if (bb_vinfo)
{
/* Mark the statement as not vectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
continue;
}
else
{
datarefs_copy.release ();
return false;
}
}
datarefs_copy.release ();
return true;
}
/* Operator == between two dr_with_seg_len objects.
This equality operator is used to make sure two data refs
are the same one so that we will consider to combine the
aliasing checks of those two pairs of data dependent data
refs. */
static bool
operator == (const dr_with_seg_len& d1,
const dr_with_seg_len& d2)
{
return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
DR_BASE_ADDRESS (d2.dr), 0)
&& compare_tree (d1.offset, d2.offset) == 0
&& compare_tree (d1.seg_len, d2.seg_len) == 0;
}
/* Function comp_dr_with_seg_len_pair.
Comparison function for sorting objects of dr_with_seg_len_pair_t
so that we can combine aliasing checks in one scan. */
static int
comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
{
const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
const dr_with_seg_len &p11 = p1->first,
&p12 = p1->second,
&p21 = p2->first,
&p22 = p2->second;
/* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
if a and c have the same basic address snd step, and b and d have the same
address and step. Therefore, if any a&c or b&d don't have the same address
and step, we don't care the order of those two pairs after sorting. */
int comp_res;
if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
DR_BASE_ADDRESS (p21.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
DR_BASE_ADDRESS (p22.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
return comp_res;
if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
return comp_res;
return 0;
}
/* Function vect_vfa_segment_size.
Create an expression that computes the size of segment
that will be accessed for a data reference. The functions takes into
account that realignment loads may access one more vector.
Input:
DR: The data reference.
LENGTH_FACTOR: segment length to consider.
Return an expression whose value is the size of segment which will be
accessed by DR. */
static tree
vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
{
tree segment_length;
if (integer_zerop (DR_STEP (dr)))
segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
else
segment_length = size_binop (MULT_EXPR,
fold_convert (sizetype, DR_STEP (dr)),
fold_convert (sizetype, length_factor));
if (vect_supportable_dr_alignment (dr, false)
== dr_explicit_realign_optimized)
{
tree vector_size = TYPE_SIZE_UNIT
(STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
}
return segment_length;
}
/* Function vect_prune_runtime_alias_test_list.
Prune a list of ddrs to be tested at run-time by versioning for alias.
Merge several alias checks into one if possible.
Return FALSE if resulting list of ddrs is longer then allowed by
PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
bool
vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
{
vec<ddr_p> may_alias_ddrs =
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ddr_p ddr;
unsigned int i;
tree length_factor;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===\n");
if (may_alias_ddrs.is_empty ())
return true;
/* Basically, for each pair of dependent data refs store_ptr_0
and load_ptr_0, we create an expression:
((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
|| (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
for aliasing checks. However, in some cases we can decrease
the number of checks by combining two checks into one. For
example, suppose we have another pair of data refs store_ptr_0
and load_ptr_1, and if the following condition is satisfied:
load_ptr_0 < load_ptr_1 &&
load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
(this condition means, in each iteration of vectorized loop,
the accessed memory of store_ptr_0 cannot be between the memory
of load_ptr_0 and load_ptr_1.)
we then can use only the following expression to finish the
alising checks between store_ptr_0 & load_ptr_0 and
store_ptr_0 & load_ptr_1:
((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
|| (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
Note that we only consider that load_ptr_0 and load_ptr_1 have the
same basic address. */
comp_alias_ddrs.create (may_alias_ddrs.length ());
/* First, we collect all data ref pairs for aliasing checks. */
FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
{
struct data_reference *dr_a, *dr_b;
gimple dr_group_first_a, dr_group_first_b;
tree segment_length_a, segment_length_b;
gimple stmt_a, stmt_b;
dr_a = DDR_A (ddr);
stmt_a = DR_STMT (DDR_A (ddr));
dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
if (dr_group_first_a)
{
stmt_a = dr_group_first_a;
dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
}
dr_b = DDR_B (ddr);
stmt_b = DR_STMT (DDR_B (ddr));
dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
if (dr_group_first_b)
{
stmt_b = dr_group_first_b;
dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
}
if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
length_factor = scalar_loop_iters;
else
length_factor = size_int (vect_factor);
segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
dr_with_seg_len_pair_t dr_with_seg_len_pair
(dr_with_seg_len (dr_a, segment_length_a),
dr_with_seg_len (dr_b, segment_length_b));
if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
}
/* Second, we sort the collected data ref pairs so that we can scan
them once to combine all possible aliasing checks. */
comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
/* Third, we scan the sorted dr pairs and check if we can combine
alias checks of two neighbouring dr pairs. */
for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
{
/* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
*dr_b1 = &comp_alias_ddrs[i-1].second,
*dr_a2 = &comp_alias_ddrs[i].first,
*dr_b2 = &comp_alias_ddrs[i].second;
/* Remove duplicate data ref pairs. */
if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"found equal ranges ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a1->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b1->dr));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a2->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b2->dr));
dump_printf (MSG_NOTE, "\n");
}
comp_alias_ddrs.ordered_remove (i--);
continue;
}
if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
{
/* We consider the case that DR_B1 and DR_B2 are same memrefs,
and DR_A1 and DR_A2 are two consecutive memrefs. */
if (*dr_a1 == *dr_a2)
{
std::swap (dr_a1, dr_b1);
std::swap (dr_a2, dr_b2);
}
if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
DR_BASE_ADDRESS (dr_a2->dr),
0)
|| !tree_fits_shwi_p (dr_a1->offset)
|| !tree_fits_shwi_p (dr_a2->offset))
continue;
/* Make sure dr_a1 starts left of dr_a2. */
if (tree_int_cst_lt (dr_a2->offset, dr_a1->offset))
std::swap (*dr_a1, *dr_a2);
unsigned HOST_WIDE_INT diff
= tree_to_shwi (dr_a2->offset) - tree_to_shwi (dr_a1->offset);
bool do_remove = false;
/* If the left segment does not extend beyond the start of the
right segment the new segment length is that of the right
plus the segment distance. */
if (tree_fits_uhwi_p (dr_a1->seg_len)
&& compare_tree_int (dr_a1->seg_len, diff) <= 0)
{
dr_a1->seg_len = size_binop (PLUS_EXPR, dr_a2->seg_len,
size_int (diff));
do_remove = true;
}
/* Generally the new segment length is the maximum of the
left segment size and the right segment size plus the distance.
??? We can also build tree MAX_EXPR here but it's not clear this
is profitable. */
else if (tree_fits_uhwi_p (dr_a1->seg_len)
&& tree_fits_uhwi_p (dr_a2->seg_len))
{
unsigned HOST_WIDE_INT seg_len_a1 = tree_to_uhwi (dr_a1->seg_len);
unsigned HOST_WIDE_INT seg_len_a2 = tree_to_uhwi (dr_a2->seg_len);
dr_a1->seg_len = size_int (MAX (seg_len_a1, diff + seg_len_a2));
do_remove = true;
}
/* Now we check if the following condition is satisfied:
DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
have to make a best estimation. We can get the minimum value
of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
then either of the following two conditions can guarantee the
one above:
1: DIFF <= MIN_SEG_LEN_B
2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B */
else
{
unsigned HOST_WIDE_INT min_seg_len_b
= (tree_fits_uhwi_p (dr_b1->seg_len)
? tree_to_uhwi (dr_b1->seg_len)
: vect_factor);
if (diff <= min_seg_len_b
|| (tree_fits_uhwi_p (dr_a1->seg_len)
&& diff - tree_to_uhwi (dr_a1->seg_len) < min_seg_len_b))
{
dr_a1->seg_len = size_binop (PLUS_EXPR,
dr_a2->seg_len, size_int (diff));
do_remove = true;
}
}
if (do_remove)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"merging ranges for ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr));
dump_printf (MSG_NOTE, "\n");
}
comp_alias_ddrs.ordered_remove (i--);
}
}
}
dump_printf_loc (MSG_NOTE, vect_location,
"improved number of alias checks from %d to %d\n",
may_alias_ddrs.length (), comp_alias_ddrs.length ());
if ((int) comp_alias_ddrs.length () >
PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
return false;
return true;
}
/* Check whether a non-affine read in stmt is suitable for gather load
and if so, return a builtin decl for that operation. */
tree
vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
tree *offp, int *scalep)
{
HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
tree decl, base, off;
machine_mode pmode;
int punsignedp, pvolatilep;
base = DR_REF (dr);
/* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
see if we can use the def stmt of the address. */
if (is_gimple_call (stmt)
&& gimple_call_internal_p (stmt)
&& (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
|| gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
&& TREE_CODE (base) == MEM_REF
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
&& integer_zerop (TREE_OPERAND (base, 1))
&& !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
{
gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
if (is_gimple_assign (def_stmt)
&& gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
}
/* The gather builtins need address of the form
loop_invariant + vector * {1, 2, 4, 8}
or
loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
of loop invariants/SSA_NAMEs defined in the loop, with casts,
multiplications and additions in it. To get a vector, we need
a single SSA_NAME that will be defined in the loop and will
contain everything that is not loop invariant and that can be
vectorized. The following code attempts to find such a preexistng
SSA_NAME OFF and put the loop invariants into a tree BASE
that can be gimplified before the loop. */
base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
&pmode, &punsignedp, &pvolatilep, false);
gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
if (TREE_CODE (base) == MEM_REF)
{
if (!integer_zerop (TREE_OPERAND (base, 1)))
{
if (off == NULL_TREE)
{
offset_int moff = mem_ref_offset (base);
off = wide_int_to_tree (sizetype, moff);
}
else
off = size_binop (PLUS_EXPR, off,
fold_convert (sizetype, TREE_OPERAND (base, 1)));
}
base = TREE_OPERAND (base, 0);
}
else
base = build_fold_addr_expr (base);
if (off == NULL_TREE)
off = size_zero_node;
/* If base is not loop invariant, either off is 0, then we start with just
the constant offset in the loop invariant BASE and continue with base
as OFF, otherwise give up.
We could handle that case by gimplifying the addition of base + off
into some SSA_NAME and use that as off, but for now punt. */
if (!expr_invariant_in_loop_p (loop, base))
{
if (!integer_zerop (off))
return NULL_TREE;
off = base;
base = size_int (pbitpos / BITS_PER_UNIT);
}
/* Otherwise put base + constant offset into the loop invariant BASE
and continue with OFF. */
else
{
base = fold_convert (sizetype, base);
base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
}
/* OFF at this point may be either a SSA_NAME or some tree expression
from get_inner_reference. Try to peel off loop invariants from it
into BASE as long as possible. */
STRIP_NOPS (off);
while (offtype == NULL_TREE)
{
enum tree_code code;
tree op0, op1, add = NULL_TREE;
if (TREE_CODE (off) == SSA_NAME)
{
gimple def_stmt = SSA_NAME_DEF_STMT (off);
if (expr_invariant_in_loop_p (loop, off))
return NULL_TREE;
if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
break;
op0 = gimple_assign_rhs1 (def_stmt);
code = gimple_assign_rhs_code (def_stmt);
op1 = gimple_assign_rhs2 (def_stmt);
}
else
{
if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
return NULL_TREE;
code = TREE_CODE (off);
extract_ops_from_tree (off, &code, &op0, &op1);
}
switch (code)
{
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
if (expr_invariant_in_loop_p (loop, op0))
{
add = op0;
off = op1;
do_add:
add = fold_convert (sizetype, add);
if (scale != 1)
add = size_binop (MULT_EXPR, add, size_int (scale));
base = size_binop (PLUS_EXPR, base, add);
continue;
}
if (expr_invariant_in_loop_p (loop, op1))
{
add = op1;
off = op0;
goto do_add;
}
break;
case MINUS_EXPR:
if (expr_invariant_in_loop_p (loop, op1))
{
add = fold_convert (sizetype, op1);
add = size_binop (MINUS_EXPR, size_zero_node, add);
off = op0;
goto do_add;
}
break;
case MULT_EXPR:
if (scale == 1 && tree_fits_shwi_p (op1))
{
scale = tree_to_shwi (op1);
off = op0;
continue;
}
break;
case SSA_NAME:
off = op0;
continue;
CASE_CONVERT:
if (!POINTER_TYPE_P (TREE_TYPE (op0))
&& !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
break;
if (TYPE_PRECISION (TREE_TYPE (op0))
== TYPE_PRECISION (TREE_TYPE (off)))
{
off = op0;
continue;
}
if (TYPE_PRECISION (TREE_TYPE (op0))
< TYPE_PRECISION (TREE_TYPE (off)))
{
off = op0;
offtype = TREE_TYPE (off);
STRIP_NOPS (off);
continue;
}
break;
default:
break;
}
break;
}
/* If at the end OFF still isn't a SSA_NAME or isn't
defined in the loop, punt. */
if (TREE_CODE (off) != SSA_NAME
|| expr_invariant_in_loop_p (loop, off))
return NULL_TREE;
if (offtype == NULL_TREE)
offtype = TREE_TYPE (off);
decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
offtype, scale);
if (decl == NULL_TREE)
return NULL_TREE;
if (basep)