blob: 4ce721ed4786e61fd9d3239707e285a72aecc0c6 [file] [log] [blame]
/* Loop Vectorization
Copyright (C) 2003-2018 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com> and
Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "optabs-tree.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "cfgloop.h"
#include "params.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "gimple-fold.h"
#include "cgraph.h"
#include "tree-cfg.h"
#include "tree-if-conv.h"
#include "internal-fn.h"
#include "tree-vector-builder.h"
#include "vec-perm-indices.h"
#include "tree-eh.h"
/* Loop Vectorization Pass.
This pass tries to vectorize loops.
For example, the vectorizer transforms the following simple loop:
short a[N]; short b[N]; short c[N]; int i;
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
as if it was manually vectorized by rewriting the source code into:
typedef int __attribute__((mode(V8HI))) v8hi;
short a[N]; short b[N]; short c[N]; int i;
v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
v8hi va, vb, vc;
for (i=0; i<N/8; i++){
vb = pb[i];
vc = pc[i];
va = vb + vc;
pa[i] = va;
}
The main entry to this pass is vectorize_loops(), in which
the vectorizer applies a set of analyses on a given set of loops,
followed by the actual vectorization transformation for the loops that
had successfully passed the analysis phase.
Throughout this pass we make a distinction between two types of
data: scalars (which are represented by SSA_NAMES), and memory references
("data-refs"). These two types of data require different handling both
during analysis and transformation. The types of data-refs that the
vectorizer currently supports are ARRAY_REFS which base is an array DECL
(not a pointer), and INDIRECT_REFS through pointers; both array and pointer
accesses are required to have a simple (consecutive) access pattern.
Analysis phase:
===============
The driver for the analysis phase is vect_analyze_loop().
It applies a set of analyses, some of which rely on the scalar evolution
analyzer (scev) developed by Sebastian Pop.
During the analysis phase the vectorizer records some information
per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
loop, as well as general information about the loop as a whole, which is
recorded in a "loop_vec_info" struct attached to each loop.
Transformation phase:
=====================
The loop transformation phase scans all the stmts in the loop, and
creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
the loop that needs to be vectorized. It inserts the vector code sequence
just before the scalar stmt S, and records a pointer to the vector code
in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
attached to S). This pointer will be used for the vectorization of following
stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
otherwise, we rely on dead code elimination for removing it.
For example, say stmt S1 was vectorized into stmt VS1:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
S2: a = b;
To vectorize stmt S2, the vectorizer first finds the stmt that defines
the operand 'b' (S1), and gets the relevant vector def 'vb' from the
vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
resulting sequence would be:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
VS2: va = vb;
S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
Operands that are not SSA_NAMEs, are data-refs that appear in
load/store operations (like 'x[i]' in S1), and are handled differently.
Target modeling:
=================
Currently the only target specific information that is used is the
size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
Targets that can support different sizes of vectors, for now will need
to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
flexibility will be added in the future.
Since we only vectorize operations which vector form can be
expressed using existing tree codes, to verify that an operation is
supported, the vectorizer checks the relevant optab at the relevant
machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
the value found is CODE_FOR_nothing, then there's no target support, and
we can't vectorize the stmt.
For additional information on this project see:
http://gcc.gnu.org/projects/tree-ssa/vectorization.html
*/
static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
/* Function vect_determine_vectorization_factor
Determine the vectorization factor (VF). VF is the number of data elements
that are operated upon in parallel in a single iteration of the vectorized
loop. For example, when vectorizing a loop that operates on 4byte elements,
on a target with vector size (VS) 16byte, the VF is set to 4, since 4
elements can fit in a single vector register.
We currently support vectorization of loops in which all types operated upon
are of the same size. Therefore this function currently sets VF according to
the size of the types operated upon, and fails if there are multiple sizes
in the loop.
VF is also the factor by which the loop iterations are strip-mined, e.g.:
original loop:
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
vectorized loop:
for (i=0; i<N; i+=VF){
a[i:VF] = b[i:VF] + c[i:VF];
}
*/
static bool
vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
poly_uint64 vectorization_factor = 1;
tree scalar_type = NULL_TREE;
gphi *phi;
tree vectype;
stmt_vec_info stmt_info;
unsigned i;
HOST_WIDE_INT dummy;
gimple *stmt, *pattern_stmt = NULL;
gimple_seq pattern_def_seq = NULL;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool analyze_pattern_stmt = false;
bool bool_result;
auto_vec<stmt_vec_info> mask_producers;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_determine_vectorization_factor ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
phi = si.phi ();
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (stmt_info);
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
{
gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
scalar_type = TREE_TYPE (PHI_RESULT (phi));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vectype);
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si) || analyze_pattern_stmt;)
{
tree vf_vectype;
if (analyze_pattern_stmt)
stmt = pattern_stmt;
else
stmt = gsi_stmt (si);
stmt_info = vinfo_for_stmt (stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
gcc_assert (stmt_info);
/* Skip stmts which do not need to be vectorized. */
if ((!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
|| gimple_clobber_p (stmt))
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
gsi_next (&si);
continue;
}
}
else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
analyze_pattern_stmt = true;
/* If a pattern statement has def stmts, analyze them too. */
if (is_pattern_stmt_p (stmt_info))
{
if (pattern_def_seq == NULL)
{
pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
pattern_def_si = gsi_start (pattern_def_seq);
}
else if (!gsi_end_p (pattern_def_si))
gsi_next (&pattern_def_si);
if (pattern_def_seq != NULL)
{
gimple *pattern_def_stmt = NULL;
stmt_vec_info pattern_def_stmt_info = NULL;
while (!gsi_end_p (pattern_def_si))
{
pattern_def_stmt = gsi_stmt (pattern_def_si);
pattern_def_stmt_info
= vinfo_for_stmt (pattern_def_stmt);
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
|| STMT_VINFO_LIVE_P (pattern_def_stmt_info))
break;
gsi_next (&pattern_def_si);
}
if (!gsi_end_p (pattern_def_si))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
}
stmt = pattern_def_stmt;
stmt_info = pattern_def_stmt_info;
}
else
{
pattern_def_si = gsi_none ();
analyze_pattern_stmt = false;
}
}
else
analyze_pattern_stmt = false;
}
if (gimple_get_lhs (stmt) == NULL_TREE
/* MASK_STORE has no lhs, but is ok. */
&& (!is_gimple_call (stmt)
|| !gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
{
if (is_gimple_call (stmt))
{
/* Ignore calls with no lhs. These must be calls to
#pragma omp simd functions, and what vectorization factor
it really needs can't be determined until
vectorizable_simd_clone_call. */
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: irregular stmt.");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
return false;
}
bool_result = false;
if (STMT_VINFO_VECTYPE (stmt_info))
{
/* The only case when a vectype had been already set is for stmts
that contain a dataref, or for "pattern-stmts" (stmts
generated by the vectorizer to represent/replace a certain
idiom). */
gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
|| is_pattern_stmt_p (stmt_info)
|| !gsi_end_p (pattern_def_si));
vectype = STMT_VINFO_VECTYPE (stmt_info);
}
else
{
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
/* Bool ops don't participate in vectorization factor
computation. For comparison use compared types to
compute a factor. */
if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
&& is_gimple_assign (stmt)
&& gimple_assign_rhs_code (stmt) != COND_EXPR)
{
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
mask_producers.safe_push (stmt_info);
bool_result = true;
if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
== tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
else
{
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (!bool_result)
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
}
/* Don't try to compute VF out scalar types if we stmt
produces boolean vector. Use result vectype instead. */
if (VECTOR_BOOLEAN_TYPE_P (vectype))
vf_vectype = vectype;
else
{
/* The vectorization factor is according to the smallest
scalar type (or the largest vector size, but we only
support one vector size per loop). */
if (!bool_result)
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
&dummy);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vf_vectype = get_vectype_for_scalar_type (scalar_type);
}
if (!vf_vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized vector "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vf_vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vf_vectype);
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
}
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, "\n");
}
if (known_le (vectorization_factor, 1U))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type\n");
return false;
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
for (i = 0; i < mask_producers.length (); i++)
{
tree mask_type = NULL;
stmt = STMT_VINFO_STMT (mask_producers[i]);
if (is_gimple_assign (stmt)
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
{
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
mask_type = get_mask_type_for_scalar_type (scalar_type);
if (!mask_type)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported mask\n");
return false;
}
}
else
{
tree rhs;
ssa_op_iter iter;
gimple *def_stmt;
enum vect_def_type dt;
FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
{
if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
&def_stmt, &dt, &vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
/* No vectype probably means external definition.
Allow it in case there is another operand which
allows to determine mask type. */
if (!vectype)
continue;
if (!mask_type)
mask_type = vectype;
else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized masks "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
!= VECTOR_BOOLEAN_TYPE_P (vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: mixed mask and "
"nonmask vector types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
}
/* We may compare boolean value loaded as vector of integers.
Fix mask_type in such case. */
if (mask_type
&& !VECTOR_BOOLEAN_TYPE_P (mask_type)
&& gimple_code (stmt) == GIMPLE_ASSIGN
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
mask_type = build_same_sized_truth_vector_type (mask_type);
}
/* No mask_type should mean loop invariant predicate.
This is probably a subject for optimization in
if-conversion. */
if (!mask_type)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type;
}
return true;
}
/* Function vect_is_simple_iv_evolution.
FORNOW: A simple evolution of an induction variables in the loop is
considered a polynomial evolution. */
static bool
vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
tree * step)
{
tree init_expr;
tree step_expr;
tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
basic_block bb;
/* When there is no evolution in this loop, the evolution function
is not "simple". */
if (evolution_part == NULL_TREE)
return false;
/* When the evolution is a polynomial of degree >= 2
the evolution function is not "simple". */
if (tree_is_chrec (evolution_part))
return false;
step_expr = evolution_part;
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
dump_printf (MSG_NOTE, ", init: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
dump_printf (MSG_NOTE, "\n");
}
*init = init_expr;
*step = step_expr;
if (TREE_CODE (step_expr) != INTEGER_CST
&& (TREE_CODE (step_expr) != SSA_NAME
|| ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
&& flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
|| (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
&& (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
|| !flag_associative_math)))
&& (TREE_CODE (step_expr) != REAL_CST
|| !flag_associative_math))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.\n");
return false;
}
return true;
}
/* Function vect_analyze_scalar_cycles_1.
Examine the cross iteration def-use cycles of scalar variables
in LOOP. LOOP_VINFO represents the loop that is now being
considered for vectorization (can be LOOP, or an outer-loop
enclosing LOOP). */
static void
vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
basic_block bb = loop->header;
tree init, step;
auto_vec<gimple *, 64> worklist;
gphi_iterator gsi;
bool double_reduc;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_scalar_cycles ===\n");
/* First - identify all inductions. Reduction detection assumes that all the
inductions have been identified, therefore, this order must not be
changed. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
tree access_fn = NULL;
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
/* Skip virtual phi's. The data dependences that are associated with
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
if (virtual_operand_p (def))
continue;
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
/* Analyze the evolution function. */
access_fn = analyze_scalar_evolution (loop, def);
if (access_fn)
{
STRIP_NOPS (access_fn);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
= initial_condition_in_loop_num (access_fn, loop->num);
STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
= evolution_part_in_loop_num (access_fn, loop->num);
}
if (!access_fn
|| !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
|| (LOOP_VINFO_LOOP (loop_vinfo) != loop
&& TREE_CODE (step) != INTEGER_CST))
{
worklist.safe_push (phi);
continue;
}
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
!= NULL_TREE);
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
/* Second - identify all reductions and nested cycles. */
while (worklist.length () > 0)
{
gimple *phi = worklist.pop ();
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple *reduc_stmt;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (!virtual_operand_p (def)
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi,
&double_reduc, false);
if (reduc_stmt)
{
if (double_reduc)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_double_reduction_def;
}
else
{
if (loop != LOOP_VINFO_LOOP (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_nested_cycle;
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_reduction_def;
/* Store the reduction cycles for possible vectorization in
loop-aware SLP if it was not detected as reduction
chain. */
if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
}
}
}
else
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.\n");
}
}
/* Function vect_analyze_scalar_cycles.
Examine the cross iteration def-use cycles of scalar variables, by
analyzing the loop-header PHIs of scalar variables. Classify each
cycle as one of the following: invariant, induction, reduction, unknown.
We do that for the loop represented by LOOP_VINFO, and also to its
inner-loop, if exists.
Examples for scalar cycles:
Example1: reduction:
loop1:
for (i=0; i<N; i++)
sum += a[i];
Example2: induction:
loop2:
for (i=0; i<N; i++)
a[i] = i; */
static void
vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
/* When vectorizing an outer-loop, the inner-loop is executed sequentially.
Reductions in such inner-loop therefore have different properties than
the reductions in the nest that gets vectorized:
1. When vectorized, they are executed in the same order as in the original
scalar loop, so we can't change the order of computation when
vectorizing them.
2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
current checks are too strict. */
if (loop->inner)
vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
}
/* Transfer group and reduction information from STMT to its pattern stmt. */
static void
vect_fixup_reduc_chain (gimple *stmt)
{
gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
gimple *stmtp;
gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
do
{
stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
if (stmt)
GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
}
while (stmt);
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def;
}
/* Fixup scalar cycles that now have their stmts detected as patterns. */
static void
vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
{
gimple *first;
unsigned i;
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next)
{
if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
break;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
/* If not all stmt in the chain are patterns try to handle
the chain without patterns. */
if (! next)
{
vect_fixup_reduc_chain (first);
LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
}
}
}
/* Function vect_get_loop_niters.
Determine how many iterations the loop is executed and place it
in NUMBER_OF_ITERATIONS. Place the number of latch iterations
in NUMBER_OF_ITERATIONSM1. Place the condition under which the
niter information holds in ASSUMPTIONS.
Return the loop exit condition. */
static gcond *
vect_get_loop_niters (struct loop *loop, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
edge exit = single_exit (loop);
struct tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
*assumptions = boolean_true_node;
*number_of_iterationsm1 = chrec_dont_know;
*number_of_iterations = chrec_dont_know;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== get_loop_niters ===\n");
if (!exit)
return cond;
niter = chrec_dont_know;
may_be_zero = NULL_TREE;
niter_assumptions = boolean_true_node;
if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
|| chrec_contains_undetermined (niter_desc.niter))
return cond;
niter_assumptions = niter_desc.assumptions;
may_be_zero = niter_desc.may_be_zero;
niter = niter_desc.niter;
if (may_be_zero && integer_zerop (may_be_zero))
may_be_zero = NULL_TREE;
if (may_be_zero)
{
if (COMPARISON_CLASS_P (may_be_zero))
{
/* Try to combine may_be_zero with assumptions, this can simplify
computation of niter expression. */
if (niter_assumptions && !integer_nonzerop (niter_assumptions))
niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter_assumptions,
fold_build1 (TRUTH_NOT_EXPR,
boolean_type_node,
may_be_zero));
else
niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
build_int_cst (TREE_TYPE (niter), 0),
rewrite_to_non_trapping_overflow (niter));
may_be_zero = NULL_TREE;
}
else if (integer_nonzerop (may_be_zero))
{
*number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
*number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
return cond;
}
else
return cond;
}
*assumptions = niter_assumptions;
*number_of_iterationsm1 = niter;
/* We want the number of loop header executions which is the number
of latch executions plus one.
??? For UINT_MAX latch executions this number overflows to zero
for loops like do { n++; } while (n != 0); */
if (niter && !chrec_contains_undetermined (niter))
niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
build_int_cst (TREE_TYPE (niter), 1));
*number_of_iterations = niter;
return cond;
}
/* Function bb_in_loop_p
Used as predicate for dfs order traversal of the loop bbs. */
static bool
bb_in_loop_p (const_basic_block bb, const void *data)
{
const struct loop *const loop = (const struct loop *)data;
if (flow_bb_inside_loop_p (loop, bb))
return true;
return false;
}
/* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
stmt_vec_info structs for all the stmts in LOOP_IN. */
_loop_vec_info::_loop_vec_info (struct loop *loop_in)
: vec_info (vec_info::loop, init_cost (loop_in)),
loop (loop_in),
bbs (XCNEWVEC (basic_block, loop->num_nodes)),
num_itersm1 (NULL_TREE),
num_iters (NULL_TREE),
num_iters_unchanged (NULL_TREE),
num_iters_assumptions (NULL_TREE),
th (0),
versioning_threshold (0),
vectorization_factor (0),
max_vectorization_factor (0),
mask_skip_niters (NULL_TREE),
mask_compare_type (NULL_TREE),
unaligned_dr (NULL),
peeling_for_alignment (0),
ptr_mask (0),
ivexpr_map (NULL),
slp_unrolling_factor (1),
single_scalar_iteration_cost (0),
vectorizable (false),
can_fully_mask_p (true),
fully_masked_p (false),
peeling_for_gaps (false),
peeling_for_niter (false),
operands_swapped (false),
no_data_dependencies (false),
has_mask_store (false),
scalar_loop (NULL),
orig_loop_info (NULL)
{
/* Create/Update stmt_info for all stmts in the loop. */
basic_block *body = get_loop_body (loop);
for (unsigned int i = 0; i < loop->num_nodes; i++)
{
basic_block bb = body[i];
gimple_stmt_iterator si;
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *phi = gsi_stmt (si);
gimple_set_uid (phi, 0);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this));
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
gimple_set_uid (stmt, 0);
set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this));
}
}
free (body);
/* CHECKME: We want to visit all BBs before their successors (except for
latch blocks, for which this assertion wouldn't hold). In the simple
case of the loop forms we allow, a dfs order of the BBs would the same
as reversed postorder traversal, so we are safe. */
unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
bbs, loop->num_nodes, loop);
gcc_assert (nbbs == loop->num_nodes);
}
/* Free all levels of MASKS. */
void
release_vec_loop_masks (vec_loop_masks *masks)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (*masks, i, rgm)
rgm->masks.release ();
masks->release ();
}
/* Free all memory used by the _loop_vec_info, as well as all the
stmt_vec_info structs of all the stmts in the loop. */
_loop_vec_info::~_loop_vec_info ()
{
int nbbs;
gimple_stmt_iterator si;
int j;
nbbs = loop->num_nodes;
for (j = 0; j < nbbs; j++)
{
basic_block bb = bbs[j];
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
free_stmt_vec_info (gsi_stmt (si));
for (si = gsi_start_bb (bb); !gsi_end_p (si); )
{
gimple *stmt = gsi_stmt (si);
/* We may have broken canonical form by moving a constant
into RHS1 of a commutative op. Fix such occurrences. */
if (operands_swapped && is_gimple_assign (stmt))
{
enum tree_code code = gimple_assign_rhs_code (stmt);
if ((code == PLUS_EXPR
|| code == POINTER_PLUS_EXPR
|| code == MULT_EXPR)
&& CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
swap_ssa_operands (stmt,
gimple_assign_rhs1_ptr (stmt),
gimple_assign_rhs2_ptr (stmt));
else if (code == COND_EXPR
&& CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
{
tree cond_expr = gimple_assign_rhs1 (stmt);
enum tree_code cond_code = TREE_CODE (cond_expr);
if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
{
bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
0));
cond_code = invert_tree_comparison (cond_code,
honor_nans);
if (cond_code != ERROR_MARK)
{
TREE_SET_CODE (cond_expr, cond_code);
swap_ssa_operands (stmt,
gimple_assign_rhs2_ptr (stmt),
gimple_assign_rhs3_ptr (stmt));
}
}
}
}
/* Free stmt_vec_info. */
free_stmt_vec_info (stmt);
gsi_next (&si);
}
}
free (bbs);
release_vec_loop_masks (&masks);
delete ivexpr_map;
loop->aux = NULL;
}
/* Return an invariant or register for EXPR and emit necessary
computations in the LOOP_VINFO loop preheader. */
tree
cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
{
if (is_gimple_reg (expr)
|| is_gimple_min_invariant (expr))
return expr;
if (! loop_vinfo->ivexpr_map)
loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
if (! cached)
{
gimple_seq stmts = NULL;
cached = force_gimple_operand (unshare_expr (expr),
&stmts, true, NULL_TREE);
if (stmts)
{
edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
gsi_insert_seq_on_edge_immediate (e, stmts);
}
}
return cached;
}
/* Return true if we can use CMP_TYPE as the comparison type to produce
all masks required to mask LOOP_VINFO. */
static bool
can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
if (rgm->mask_type != NULL_TREE
&& !direct_internal_fn_supported_p (IFN_WHILE_ULT,
cmp_type, rgm->mask_type,
OPTIMIZE_FOR_SPEED))
return false;
return true;
}
/* Calculate the maximum number of scalars per iteration for every
rgroup in LOOP_VINFO. */
static unsigned int
vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
{
unsigned int res = 1;
unsigned int i;
rgroup_masks *rgm;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
res = MAX (res, rgm->max_nscalars_per_iter);
return res;
}
/* Each statement in LOOP_VINFO can be masked where necessary. Check
whether we can actually generate the masks required. Return true if so,
storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
static bool
vect_verify_full_masking (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int min_ni_width;
/* Use a normal loop if there are no statements that need masking.
This only happens in rare degenerate cases: it means that the loop
has no loads, no stores, and no live-out values. */
if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
return false;
/* Get the maximum number of iterations that is representable
in the counter type. */
tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
/* Get a more refined estimate for the number of iterations. */
widest_int max_back_edges;
if (max_loop_iterations (loop, &max_back_edges))
max_ni = wi::smin (max_ni, max_back_edges + 1);
/* Account for rgroup masks, in which each bit is replicated N times. */
max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
/* Work out how many bits we need to represent the limit. */
min_ni_width = wi::min_precision (max_ni, UNSIGNED);
/* Find a scalar mode for which WHILE_ULT is supported. */
opt_scalar_int_mode cmp_mode_iter;
tree cmp_type = NULL_TREE;
FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
{
unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
if (cmp_bits >= min_ni_width
&& targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
{
tree this_type = build_nonstandard_integer_type (cmp_bits, true);
if (this_type
&& can_produce_all_loop_masks_p (loop_vinfo, this_type))
{
/* Although we could stop as soon as we find a valid mode,
it's often better to continue until we hit Pmode, since the
operands to the WHILE are more likely to be reusable in
address calculations. */
cmp_type = this_type;
if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
break;
}
}
}
if (!cmp_type)
return false;
LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
return true;
}
/* Calculate the cost of one scalar iteration of the loop. */
static void
vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes, factor;
int innerloop_iters, i;
/* Gather costs for statements in the scalar loop. */
/* FORNOW. */
innerloop_iters = 1;
if (loop->inner)
innerloop_iters = 50; /* FIXME */
for (i = 0; i < nbbs; i++)
{
gimple_stmt_iterator si;
basic_block bb = bbs[i];
if (bb->loop_father == loop->inner)
factor = innerloop_iters;
else
factor = 1;
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
continue;
/* Skip stmts that are not vectorized inside the loop. */
if (stmt_info
&& !STMT_VINFO_RELEVANT_P (stmt_info)
&& (!STMT_VINFO_LIVE_P (stmt_info)
|| !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !STMT_VINFO_IN_PATTERN_P (stmt_info))
continue;
vect_cost_for_stmt kind;
if (STMT_VINFO_DATA_REF (stmt_info))
{
if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
kind = scalar_load;
else
kind = scalar_store;
}
else
kind = scalar_stmt;
record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
factor, kind, stmt_info, 0, vect_prologue);
}
}
/* Now accumulate cost. */
void *target_cost_data = init_cost (loop);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, si->count,
si->kind, stmt_info, si->misalign,
vect_body);
}
unsigned dummy, body_cost = 0;
finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
destroy_cost_data (target_cost_data);
LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
}
/* Function vect_analyze_loop_form_1.
Verify that certain CFG restrictions hold, including:
- the loop has a pre-header
- the loop has a single entry and exit
- the loop exit condition is simple enough
- the number of iterations can be analyzed, i.e, a countable loop. The
niter could be analyzed under some assumptions. */
bool
vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_form ===\n");
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
if (!loop->inner)
{
/* Inner-most loop. We currently require that the number of BBs is
exactly 2 (the header and latch). Vectorizable inner-most loops
look like this:
(pre-header)
|
header <--------+
| | |
| +--> latch --+
|
(exit-bb) */
if (loop->num_nodes != 2)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
if (empty_block_p (loop->header))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.\n");
return false;
}
}
else
{
struct loop *innerloop = loop->inner;
edge entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
contains a single inner loop, and the number of BBs is exactly 5.
Vectorizable outer-loops look like this:
(pre-header)
|
header <---+
| |
inner-loop |
| |
tail ------+
|
(exit-bb)
The inner-loop has the properties expected of inner-most loops
as described above. */
if ((loop->inner)->inner || (loop->inner)->next)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.\n");
return false;
}
if (loop->num_nodes != 5)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
entryedge = loop_preheader_edge (innerloop);
if (entryedge->src != loop->header
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.\n");
return false;
}
/* Analyze the inner-loop. */
tree inner_niterm1, inner_niter, inner_assumptions;
if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
&inner_assumptions, &inner_niterm1,
&inner_niter, NULL)
/* Don't support analyzing niter under assumptions for inner
loop. */
|| !integer_onep (inner_assumptions))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.\n");
return false;
}
if (!expr_invariant_in_loop_p (loop, inner_niter))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not"
" invariant.\n");
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.\n");
}
if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
if (dump_enabled_p ())
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple exits.\n");
else if (EDGE_COUNT (loop->header->preds) != 2)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many incoming edges.\n");
}
return false;
}
/* We assume that the loop exit condition is at the end of the loop. i.e,
that the loop is represented as a do-while (with a proper if-guard
before the loop if needed), where the loop header contains all the
executable statements, and the latch is empty. */
if (!empty_block_p (loop->latch)
|| !gimple_seq_empty_p (phi_nodes (loop->latch)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: latch block not empty.\n");
return false;
}
/* Make sure the exit is not abnormal. */
edge e = single_exit (loop);
if (e->flags & EDGE_ABNORMAL)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.\n");
return false;
}
*loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
number_of_iterationsm1);
if (!*loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.\n");
return false;
}
if (integer_zerop (*assumptions)
|| !*number_of_iterations
|| chrec_contains_undetermined (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.\n");
return false;
}
if (integer_zerop (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.\n");
return false;
}
return true;
}
/* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
loop_vec_info
vect_analyze_loop_form (struct loop *loop)
{
tree assumptions, number_of_iterations, number_of_iterationsm1;
gcond *loop_cond, *inner_loop_cond = NULL;
if (! vect_analyze_loop_form_1 (loop, &loop_cond,
&assumptions, &number_of_iterationsm1,
&number_of_iterations, &inner_loop_cond))
return NULL;
loop_vec_info loop_vinfo = new _loop_vec_info (loop);
LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
if (!integer_onep (assumptions))
{
/* We consider to vectorize this loop by versioning it under
some assumptions. In order to do this, we need to clear
existing information computed by scev and niter analyzer. */
scev_reset_htab ();
free_numbers_of_iterations_estimates (loop);
/* Also set flag for this loop so that following scev and niter
analysis are done under the assumptions. */
loop_constraint_set (loop, LOOP_C_FINITE);
/* Also record the assumptions for versioning. */
LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
}
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
dump_printf (MSG_NOTE, "\n");
}
}
STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
if (inner_loop_cond)
STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond))
= loop_exit_ctrl_vec_info_type;
gcc_assert (!loop->aux);
loop->aux = loop_vinfo;
return loop_vinfo;
}
/* Scan the loop stmts and dependent on whether there are any (non-)SLP
statements update the vectorization factor. */
static void
vect_update_vf_for_slp (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
poly_uint64 vectorization_factor;
int i;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_vf_for_slp ===\n");
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
/* If all the stmts in the loop can be SLPed, we perform only SLP, and
vectorization factor of the loop is the unrolling factor required by
the SLP instances. If that unrolling factor is 1, we say, that we
perform pure SLP on loop - cross iteration parallelism is not
exploited. */
bool only_slp_in_loop = true;
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& STMT_VINFO_RELATED_STMT (stmt_info))
{
stmt = STMT_VINFO_RELATED_STMT (stmt_info);
stmt_info = vinfo_for_stmt (stmt);
}
if ((STMT_VINFO_RELEVANT_P (stmt_info)
|| VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !PURE_SLP_STMT (stmt_info))
/* STMT needs both SLP and loop-based vectorization. */
only_slp_in_loop = false;
}
}
if (only_slp_in_loop)
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains only SLP stmts\n");
vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
}
else
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains SLP and non-SLP stmts\n");
/* Both the vectorization factor and unroll factor have the form
current_vector_size * X for some rational X, so they must have
a common multiple. */
vectorization_factor
= force_common_multiple (vectorization_factor,
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ".\n");
}
}
/* Return true if STMT_INFO describes a double reduction phi and if
the other phi in the reduction is also relevant for vectorization.
This rejects cases such as:
outer1:
x_1 = PHI <x_3(outer2), ...>;
...
inner:
x_2 = ...;
...
outer2:
x_3 = PHI <x_2(inner)>;
if nothing in x_2 or elsewhere makes x_1 relevant. */
static bool
vect_active_double_reduction_p (stmt_vec_info stmt_info)
{
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
return false;
gimple *other_phi = STMT_VINFO_REDUC_DEF (stmt_info);
return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi));
}
/* Function vect_analyze_loop_operations.
Scan the loop stmts and make sure they are all vectorizable. */
static bool
vect_analyze_loop_operations (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
stmt_vec_info stmt_info;
bool need_to_vectorize = false;
bool ok;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_operations ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
gphi *phi = si.phi ();
ok = true;
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
if (virtual_operand_p (gimple_phi_result (phi)))
continue;
/* Inner-loop loop-closed exit phi in outer-loop vectorization
(i.e., a phi in the tail of the outer-loop). */
if (! is_loop_header_bb_p (bb))
{
/* FORNOW: we currently don't support the case that these phis
are not used in the outerloop (unless it is double reduction,
i.e., this phi is vect_reduction_def), cause this case
requires to actually do something here. */
if (STMT_VINFO_LIVE_P (stmt_info)
&& !vect_active_double_reduction_p (stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.\n");
return false;
}
/* If PHI is used in the outer loop, we check that its operand
is defined in the inner loop. */
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
tree phi_op;
gimple *op_def_stmt;
if (gimple_phi_num_args (phi) != 1)
return false;
phi_op = PHI_ARG_DEF (phi, 0);
if (TREE_CODE (phi_op) != SSA_NAME)
return false;
op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
if (gimple_nop_p (op_def_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
|| !vinfo_for_stmt (op_def_stmt))
return false;
if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer
&& STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer_by_reduction)
return false;
}
continue;
}
gcc_assert (stmt_info);
if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
|| STMT_VINFO_LIVE_P (stmt_info))
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
{
/* A scalar-dependence cycle that we don't support. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.\n");
return false;
}
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
need_to_vectorize = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_induction (phi, NULL, NULL, NULL);
else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL);
}
/* SLP PHIs are tested by vect_slp_analyze_node_operations. */
if (ok
&& STMT_VINFO_LIVE_P (stmt_info)
&& !PURE_SLP_STMT (stmt_info))
ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL);
if (!ok)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
"supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
}
return false;
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
if (!gimple_clobber_p (stmt)
&& !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL))
return false;
}
} /* bbs */
/* All operations in the loop are either irrelevant (deal with loop
control, or dead), or only used outside the loop and can be moved
out of the loop (e.g. invariants, inductions). The loop can be
optimized away by scalar optimizations. We're better off not
touching this loop. */
if (!need_to_vectorize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.\n");
return false;
}
return true;
}
/* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
is worthwhile to vectorize. Return 1 if definitely yes, 0 if
definitely no, or -1 if it's worth retrying. */
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
/* Only fully-masked loops can have iteration counts less than the
vectorization factor. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
HOST_WIDE_INT max_niter;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
else
max_niter = max_stmt_executions_int (loop);
if (max_niter != -1
&& (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.\n");
return 0;
}
}
int min_profitable_iters, min_profitable_estimate;
vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
&min_profitable_estimate);
if (min_profitable_iters < 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.\n");
return -1;
}
int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
* assumed_vf);
/* Use the cost model only if it is more conservative than user specified
threshold. */
unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
min_profitable_iters);
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
"iterations (whichever is more conservative).\n");
return 0;
}
HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
if (estimated_niter == -1)
estimated_niter = likely_max_stmt_executions_int (loop);
if (estimated_niter != -1
&& ((unsigned HOST_WIDE_INT) estimated_niter
< MAX (th, (unsigned) min_profitable_estimate)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
"profitable iterations (whichever is more "
"conservative).\n");
return -1;
}
return 1;
}
/* Function vect_analyze_loop_2.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. */
static bool
vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
{
bool ok;
int res;
unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
poly_uint64 min_vf = 2;
unsigned int n_stmts = 0;
/* The first group of checks is independent of the vector size. */
fatal = true;
/* Find all data references in the loop (which correspond to vdefs/vuses)
and analyze their evolution in the loop. */
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop nest containing two "
"or more consecutive inner loops cannot be "
"vectorized\n");
return false;
}
for (unsigned i = 0; i < loop->num_nodes; i++)
for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
!gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
++n_stmts;
if (!find_data_references_in_stmt (loop, stmt,
&LOOP_VINFO_DATAREFS (loop_vinfo)))
{
if (is_gimple_call (stmt) && loop->safelen)
{
tree fndecl = gimple_call_fndecl (stmt), op;
if (fndecl != NULL_TREE)
{
cgraph_node *node = cgraph_node::get (fndecl);
if (node != NULL && node->simd_clones != NULL)
{
unsigned int j, n = gimple_call_num_args (stmt);
for (j = 0; j < n; j++)
{
op = gimple_call_arg (stmt, j);
if (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))
break;
}
op = gimple_call_lhs (stmt);
/* Ignore #pragma omp declare simd functions
if they don't have data references in the
call stmt itself. */
if (j == n
&& !(op
&& (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))))
continue;
}
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function "
"calls or data references that cannot "
"be analyzed\n");
return false;
}
}
/* Analyze the data references and also adjust the minimal
vectorization factor according to the loads and stores. */
ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.\n");
return false;
}
/* Classify all cross-iteration scalar data-flow cycles.
Cross-iteration cycles caused by virtual phis are analyzed separately. */
vect_analyze_scalar_cycles (loop_vinfo);
vect_pattern_recog (loop_vinfo);
vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
/* Analyze the access patterns of the data-refs in the loop (consecutive,
complex, etc.). FORNOW: Only handle consecutive access pattern. */
ok = vect_analyze_data_ref_accesses (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.\n");
return false;
}
/* Data-flow analysis to detect stmts that do not need to be vectorized. */
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.\n");
return false;
}
/* While the rest of the analysis below depends on it in some way. */
fatal = false;
/* Analyze data dependences between the data-refs in the loop
and adjust the maximum vectorization factor according to
the dependences.
FORNOW: fail at the first data dependence that we encounter. */
ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
if (!ok
|| (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, min_vf)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
ok = vect_determine_vectorization_factor (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.\n");
return false;
}
if (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
/* Compute the scalar iteration cost. */
vect_compute_single_scalar_iteration_cost (loop_vinfo);
poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned th;
/* Check the SLP opportunities in the loop, analyze and build SLP trees. */
ok = vect_analyze_slp (loop_vinfo, n_stmts);
if (!ok)
return false;
/* If there are any SLP instances mark them as pure_slp. */
bool slp = vect_make_slp_decision (loop_vinfo);
if (slp)
{
/* Find stmts that need to be both vectorized and SLPed. */
vect_detect_hybrid_slp (loop_vinfo);
/* Update the vectorization factor based on the SLP decision. */
vect_update_vf_for_slp (loop_vinfo);
}
bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
/* We don't expect to have to roll back to anything other than an empty
set of rgroups. */
gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
/* This is the point where we can re-start analysis with SLP forced off. */
start_over:
/* Now the vectorization factor is final. */
poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vectorization_factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
LOOP_VINFO_INT_NITERS (loop_vinfo));
}
HOST_WIDE_INT max_niter
= likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
/* Analyze the alignment of the data-refs in the loop.
Fail if a data reference is found that cannot be vectorized. */
ok = vect_analyze_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
/* Prune the list of ddrs to be tested at run-time by versioning for alias.
It is important to call pruning after vect_analyze_data_ref_accesses,
since we use grouping information gathered by interleaving analysis. */
ok = vect_prune_runtime_alias_test_list (loop_vinfo);
if (!ok)
return false;
/* Do not invoke vect_enhance_data_refs_alignment for eplilogue
vectorization. */
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
/* This pass will decide on using loop versioning and/or loop peeling in
order to enhance the alignment of data references in the loop. */
ok = vect_enhance_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
}
if (slp)
{
/* Analyze operations in the SLP instances. Note this may
remove unsupported SLP instances which makes the above
SLP kind detection invalid. */
unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
vect_slp_analyze_operations (loop_vinfo);
if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
goto again;
}
/* Scan all the remaining operations in the loop that are not subject
to SLP and make sure they are vectorizable. */
ok = vect_analyze_loop_operations (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.\n");
return false;
}
/* Decide whether to use a fully-masked loop for this vectorization
factor. */
LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
= (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
&& vect_verify_full_masking (loop_vinfo));
if (dump_enabled_p ())
{
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
dump_printf_loc (MSG_NOTE, vect_location,
"using a fully-masked loop.\n");
else
dump_printf_loc (MSG_NOTE, vect_location,
"not using a fully-masked loop.\n");
}
/* If epilog loop is required because of data accesses with gaps,
one additional iteration needs to be peeled. Check if there is
enough iterations for vectorization. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
if (known_lt (wi::to_widest (scalar_niters), vf))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop has no enough iterations to support"
" peeling for gaps.\n");
return false;
}
}
/* Check the costings of the loop make vectorizing worthwhile. */
res = vect_analyze_loop_costing (loop_vinfo);
if (res < 0)
goto again;
if (!res)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Loop costings not worthwhile.\n");
return false;
}
/* Decide whether we need to create an epilogue loop to handle
remaining scalar iterations. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
unsigned HOST_WIDE_INT const_vf;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
/* The main loop handles all iterations. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
- LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
}
else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
|| !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
|| ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
< (unsigned) exact_log2 (const_vf))
/* In case of versioning, check if the maximum number of
iterations is greater than th. If they are identical,
the epilogue is unnecessary. */
&& (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
|| ((unsigned HOST_WIDE_INT) max_niter
> (th / const_vf) * const_vf))))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
/* If an epilogue loop is required make sure we can create one. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
single_exit (LOOP_VINFO_LOOP
(loop_vinfo))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create required "
"epilog loop\n");
goto again;
}
}
/* During peeling, we need to check if number of loop iterations is
enough for both peeled prolog loop and vector loop. This check
can be merged along with threshold check of loop versioning, so
increase threshold for this case if necessary. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
poly_uint64 niters_th = 0;
if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
{
/* Niters for peeled prolog loop. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
{
struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
}
else
niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
}
/* Niters for at least one iteration of vectorized loop. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
/* One additional iteration because of peeling for gap. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
niters_th += 1;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
}
gcc_assert (known_eq (vectorization_factor,
LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
/* Ok to vectorize! */
return true;
again:
/* Try again with SLP forced off but if we didn't do any SLP there is
no point in re-trying. */
if (!slp)
return false;
/* If there are reduction chains re-trying will fail anyway. */
if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
return false;
/* Likewise if the grouped loads or stores in the SLP cannot be handled
via interleaving or lane instructions. */
slp_instance instance;
slp_tree node;
unsigned i, j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
{
stmt_vec_info vinfo;
vinfo = vinfo_for_stmt
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false)
&& ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
&& ! vect_grouped_store_supported (vectype, size))
return false;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo);
size = STMT_VINFO_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_load_lanes_supported (vectype, size, false)
&& ! vect_grouped_load_supported (vectype, single_element_p,
size))
return false;
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"re-trying with SLP disabled\n");
/* Roll back state appropriately. No SLP this time. */
slp = false;
/* Restore vectorization factor as it were without SLP. */
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
/* Free the SLP instances. */
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
vect_free_slp_instance (instance);
LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
/* Reset SLP type to loop_vect on all stmts. */
for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
{
basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
for (gimple_stmt_iterator si = gsi_start_phis (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
if (STMT_VINFO_IN_PATTERN_P (stmt_info))
{
stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
STMT_SLP_TYPE (stmt_info) = loop_vect;
for (gimple_stmt_iterator pi
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
!gsi_end_p (pi); gsi_next (&pi))
{
gimple *pstmt = gsi_stmt (pi);
STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect;
}
}
}
}
/* Free optimized alias test DDRS. */
LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
/* Reset target cost data. */
destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
= init_cost (LOOP_VINFO_LOOP (loop_vinfo));
/* Reset accumulated rgroup information. */
release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
/* Reset assorted flags. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
goto start_over;
}
/* Function vect_analyze_loop.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
be vectorized. */
loop_vec_info
vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
{
loop_vec_info loop_vinfo;
auto_vector_sizes vector_sizes;
/* Autodetect first vector size we try. */
current_vector_size = 0;
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest =====\n");
if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.\n");
return NULL;
}
poly_uint64 autodetected_vector_size = 0;
while (1)
{
/* Check the CFG characteristics of the loop (nesting, entry/exit). */
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.\n");
return NULL;
}
bool fatal = false;
if (orig_loop_vinfo)
LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
if (vect_analyze_loop_2 (loop_vinfo, fatal))
{
LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
return loop_vinfo;
}
delete loop_vinfo;
if (next_size == 0)
autodetected_vector_size = current_vector_size;
if (next_size < vector_sizes.length ()
&& known_eq (vector_sizes[next_size], autodetected_vector_size))
next_size += 1;
if (fatal
|| next_size == vector_sizes.length ()
|| known_eq (current_vector_size, 0U))
return NULL;
/* Try the next biggest vector size. */
current_vector_size = vector_sizes[next_size++];
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size ");
dump_dec (MSG_NOTE, current_vector_size);
dump_printf (MSG_NOTE, "\n");
}
}
}
/* Return true if there is an in-order reduction function for CODE, storing
it in *REDUC_FN if so. */
static bool
fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case PLUS_EXPR:
*reduc_fn = IFN_FOLD_LEFT_PLUS;
return true;
default:
return false;
}
}
/* Function reduction_fn_for_scalar_code
Input:
CODE - tree_code of a reduction operations.
Output:
REDUC_FN - the corresponding internal function to be used to reduce the
vector of partial results into a single scalar result, or IFN_LAST
if the operation is a supported reduction operation, but does not have
such an internal function.
Return FALSE if CODE currently cannot be vectorized as reduction. */
static bool
reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case MAX_EXPR:
*reduc_fn = IFN_REDUC_MAX;
return true;
case MIN_EXPR:
*reduc_fn = IFN_REDUC_MIN;
return true;
case PLUS_EXPR:
*reduc_fn = IFN_REDUC_PLUS;
return true;
case BIT_AND_EXPR:
*reduc_fn = IFN_REDUC_AND;
return true;
case BIT_IOR_EXPR:
*reduc_fn = IFN_REDUC_IOR;
return true;
case BIT_XOR_EXPR:
*reduc_fn = IFN_REDUC_XOR;
return true;
case MULT_EXPR:
case MINUS_EXPR:
*reduc_fn = IFN_LAST;
return true;
default:
return false;
}
}
/* If there is a neutral value X such that SLP reduction NODE would not
be affected by the introduction of additional X elements, return that X,
otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
is true if the SLP statements perform a single reduction, false if each
statement performs an independent reduction. */
static tree
neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
bool reduc_chain)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
struct loop *loop = gimple_bb (stmt)->loop_father;
gcc_assert (loop);
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
return build_zero_cst (scalar_type);
case MULT_EXPR:
return build_one_cst (scalar_type);
case BIT_AND_EXPR:
return build_all_ones_cst (scalar_type);
case MAX_EXPR:
case MIN_EXPR:
/* For MIN/MAX the initial values are neutral. A reduction chain
has only a single initial value, so that value is neutral for
all statements. */
if (reduc_chain)
return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
STMT is printed with a message MSG. */
static void
report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
{
dump_printf_loc (msg_type, vect_location, "%s", msg);
dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
}
/* Detect SLP reduction of the form:
#a1 = phi <a5, a0>
a2 = operation (a1)
a3 = operation (a2)
a4 = operation (a3)
a5 = operation (a4)
#a = phi <a5>
PHI is the reduction phi node (#a1 = phi <a5, a0> above)
FIRST_STMT is the first reduction stmt in the chain
(a2 = operation (a1)).
Return TRUE if a reduction chain was detected. */
static bool
vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
gimple *first_stmt)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt;
stmt_vec_info use_stmt_info, current_stmt_info;
tree lhs;
imm_use_iterator imm_iter;
use_operand_p use_p;
int nloop_uses, size = 0, n_out_of_loop_uses;
bool found = false;
if (loop != vect_loop)
return false;
lhs = PHI_RESULT (phi);
code = gimple_assign_rhs_code (first_stmt);
while (1)
{
nloop_uses = 0;
n_out_of_loop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
/* Check if we got back to the reduction phi. */
if (use_stmt == phi)
{
loop_use_stmt = use_stmt;
found = true;
break;
}
if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
loop_use_stmt = use_stmt;
nloop_uses++;
}
else
n_out_of_loop_uses++;
/* There are can be either a single use in the loop or two uses in
phi nodes. */
if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
return false;
}
if (found)
break;
/* We reached a statement with no loop uses. */
if (nloop_uses == 0)
return false;
/* This is a loop exit phi, and we haven't reached the reduction phi. */
if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
return false;
if (!is_gimple_assign (loop_use_stmt)
|| code != gimple_assign_rhs_code (loop_use_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
return false;
/* Insert USE_STMT into reduction chain. */
use_stmt_info = vinfo_for_stmt (loop_use_stmt);
if (current_stmt)
{
current_stmt_info = vinfo_for_stmt (current_stmt);
GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
GROUP_FIRST_ELEMENT (use_stmt_info)
= GROUP_FIRST_ELEMENT (current_stmt_info);
}
else
GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
lhs = gimple_assign_lhs (loop_use_stmt);
current_stmt = loop_use_stmt;
size++;
}
if (!found || loop_use_stmt != phi || size < 2)
return false;
/* Swap the operands, if needed, to make the reduction operand be the second
operand. */
lhs = PHI_RESULT (phi);
next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
while (next_stmt)
{
if (gimple_assign_rhs2 (next_stmt) == lhs)
{
tree op = gimple_assign_rhs1 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
continue;
}
return false;
}
else
{
tree op = gimple_assign_rhs2 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
}
swap_ssa_operands (next_stmt,
gimple_assign_rhs1_ptr (next_stmt),
gimple_assign_rhs2_ptr (next_stmt));
update_stmt (next_stmt);
if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
}
else
return false;
}
lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
/* Save the chain for further analysis in SLP detection. */
first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
GROUP_SIZE (vinfo_for_stmt (first)) = size;
return true;
}
/* Return true if we need an in-order reduction for operation CODE
on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
overflow must wrap. */
static bool
needs_fold_left_reduction_p (tree type, tree_code code,
bool need_wrapping_integral_overflow)
{
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type))
switch (code)
{
case MIN_EXPR:
case MAX_EXPR:
return false;
default:
return !flag_associative_math;
}
if (INTEGRAL_TYPE_P (type))
{
if (!operation_no_trapping_overflow (type, code))
return true;
if (need_wrapping_integral_overflow
&& !TYPE_OVERFLOW_WRAPS (type)
&& operation_can_overflow (code))
return true;
return false;
}
if (SAT_FIXED_POINT_TYPE_P (type))
return true;
return false;
}
/* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
reduction operation CODE has a handled computation expression. */
bool
check_reduction_path (location_t loc, loop_p loop, gphi *phi, tree loop_arg,
enum tree_code code)
{
auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
auto_bitmap visited;
tree lookfor = PHI_RESULT (phi);
ssa_op_iter curri;
use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
while (USE_FROM_PTR (curr) != loop_arg)
curr = op_iter_next_use (&curri);
curri.i = curri.numops;
do
{
path.safe_push (std::make_pair (curri, curr));
tree use = USE_FROM_PTR (curr);
if (use == lookfor)
break;
gimple *def = SSA_NAME_DEF_STMT (use);
if (gimple_nop_p (def)
|| ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
{
pop:
do
{
std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
curri = x.first;
curr = x.second;
do
curr = op_iter_next_use (&curri);
/* Skip already visited or non-SSA operands (from iterating
over PHI args). */
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))));
}
while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
if (curr == NULL_USE_OPERAND_P)
break;
}
else
{
if (gimple_code (def) == GIMPLE_PHI)
curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
else
curr = op_iter_init_use (&curri, def, SSA_OP_USE);
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))))
curr = op_iter_next_use (&curri);
if (curr == NULL_USE_OPERAND_P)
goto pop;
}
}
while (1);
if (dump_file && (dump_flags & TDF_DETAILS))
{
dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
unsigned i;
std::pair<ssa_op_iter, use_operand_p> *x;
FOR_EACH_VEC_ELT (path, i, x)
{
dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
dump_printf (MSG_NOTE, " ");
}
dump_printf (MSG_NOTE, "\n");
}
/* Check whether the reduction path detected is valid. */
bool fail = path.length () == 0;
bool neg = false;
for (unsigned i = 1; i < path.length (); ++i)
{
gimple *use_stmt = USE_STMT (path[i].second);
tree op = USE_FROM_PTR (path[i].second);
if (! has_single_use (op)
|| ! is_gimple_assign (use_stmt))
{
fail = true;
break;
}
if (gimple_assign_rhs_code (use_stmt) != code)
{
if (code == PLUS_EXPR
&& gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
{
/* Track whether we negate the reduction value each iteration. */
if (gimple_assign_rhs2 (use_stmt) == op)
neg = ! neg;
}
else
{
fail = true;
break;
}
}
}
return ! fail && ! neg;
}
/* Function vect_is_simple_reduction
(1) Detect a cross-iteration def-use cycle that represents a simple
reduction computation. We look for the following pattern:
loop_header:
a1 = phi < a0, a2 >
a3 = ...
a2 = operation (a3, a1)
or
a3 = ...
loop_header:
a1 = phi < a0, a2 >
a2 = operation (a3, a1)
such that:
1. operation is commutative and associative and it is safe to
change the order of the computation
2. no uses for a2 in the loop (a2 is used out of the loop)
3. no uses of a1 in the loop besides the reduction operation
4. no uses of a1 outside the loop.
Conditions 1,4 are tested here.
Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
(2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
nested cycles.
(3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
reductions:
a1 = phi < a0, a2 >
inner loop (def of a3)
a2 = phi < a3 >
(4) Detect condition expressions, ie:
for (int i = 0; i < N; i++)
if (a[i] < val)
ret_val = a[i];
*/
static gimple *
vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool *double_reduc,
bool need_wrapping_integral_overflow,
enum vect_reduction_type *v_reduc_type)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL;
enum tree_code orig_code, code;
tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
tree type;
int nloop_uses;
tree name;
imm_use_iterator imm_iter;
use_operand_p use_p;
bool phi_def;
*double_reduc = false;
*v_reduc_type = TREE_CODE_REDUCTION;
tree phi_name = PHI_RESULT (phi);
/* ??? If there are no uses of the PHI result the inner loop reduction
won't be detected as possibly double-reduction by vectorizable_reduction
because that tries to walk the PHI arg from the preheader edge which
can be constant. See PR60382. */
if (has_zero_uses (phi_name))
return NULL;
nloop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.\n");
return NULL;
}
nloop_uses++;
if (nloop_uses > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction value used in loop.\n");
return NULL;
}
phi_use_stmt = use_stmt;
}
edge latch_e = loop_latch_edge (loop);
tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
if (TREE_CODE (loop_arg) != SSA_NAME)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return NULL;
}
def_stmt = SSA_NAME_DEF_STMT (loop_arg);
if (is_gimple_assign (def_stmt))
{
name = gimple_assign_lhs (def_stmt);
phi_def = false;
}
else if (gimple_code (def_stmt) == GIMPLE_PHI)
{
name = PHI_RESULT (def_stmt);
phi_def = true;
}
else
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: unhandled reduction operation: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0);
}
return NULL;
}
if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
return NULL;
nloop_uses = 0;
auto_vec<gphi *, 3> lcphis;
F