| /* Data References Analysis and Manipulation Utilities for Vectorization. |
| Copyright (C) 2003-2013 Free Software Foundation, Inc. |
| Contributed by Dorit Naishlos <dorit@il.ibm.com> |
| and Ira Rosen <irar@il.ibm.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "dumpfile.h" |
| #include "tm.h" |
| #include "ggc.h" |
| #include "tree.h" |
| #include "tm_p.h" |
| #include "target.h" |
| #include "basic-block.h" |
| #include "gimple-pretty-print.h" |
| #include "tree-flow.h" |
| #include "dumpfile.h" |
| #include "cfgloop.h" |
| #include "tree-chrec.h" |
| #include "tree-scalar-evolution.h" |
| #include "tree-vectorizer.h" |
| #include "diagnostic-core.h" |
| |
| /* Need to include rtl.h, expr.h, etc. for optabs. */ |
| #include "expr.h" |
| #include "optabs.h" |
| |
| /* Return true if load- or store-lanes optab OPTAB is implemented for |
| COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */ |
| |
| static bool |
| vect_lanes_optab_supported_p (const char *name, convert_optab optab, |
| tree vectype, unsigned HOST_WIDE_INT count) |
| { |
| enum machine_mode mode, array_mode; |
| bool limit_p; |
| |
| mode = TYPE_MODE (vectype); |
| limit_p = !targetm.array_mode_supported_p (mode, count); |
| array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode), |
| MODE_INT, limit_p); |
| |
| if (array_mode == BLKmode) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]", |
| GET_MODE_NAME (mode), count); |
| return false; |
| } |
| |
| if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "cannot use %s<%s><%s>", name, |
| GET_MODE_NAME (array_mode), GET_MODE_NAME (mode)); |
| return false; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "can use %s<%s><%s>", name, GET_MODE_NAME (array_mode), |
| GET_MODE_NAME (mode)); |
| |
| return true; |
| } |
| |
| |
| /* Return the smallest scalar part of STMT. |
| This is used to determine the vectype of the stmt. We generally set the |
| vectype according to the type of the result (lhs). For stmts whose |
| result-type is different than the type of the arguments (e.g., demotion, |
| promotion), vectype will be reset appropriately (later). Note that we have |
| to visit the smallest datatype in this function, because that determines the |
| VF. If the smallest datatype in the loop is present only as the rhs of a |
| promotion operation - we'd miss it. |
| Such a case, where a variable of this datatype does not appear in the lhs |
| anywhere in the loop, can only occur if it's an invariant: e.g.: |
| 'int_x = (int) short_inv', which we'd expect to have been optimized away by |
| invariant motion. However, we cannot rely on invariant motion to always |
| take invariants out of the loop, and so in the case of promotion we also |
| have to check the rhs. |
| LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding |
| types. */ |
| |
| tree |
| vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, |
| HOST_WIDE_INT *rhs_size_unit) |
| { |
| tree scalar_type = gimple_expr_type (stmt); |
| HOST_WIDE_INT lhs, rhs; |
| |
| lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| |
| if (is_gimple_assign (stmt) |
| && (gimple_assign_cast_p (stmt) |
| || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR |
| || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR |
| || gimple_assign_rhs_code (stmt) == FLOAT_EXPR)) |
| { |
| tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| |
| rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); |
| if (rhs < lhs) |
| scalar_type = rhs_type; |
| } |
| |
| *lhs_size_unit = lhs; |
| *rhs_size_unit = rhs; |
| return scalar_type; |
| } |
| |
| |
| /* Find the place of the data-ref in STMT in the interleaving chain that starts |
| from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */ |
| |
| int |
| vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt) |
| { |
| gimple next_stmt = first_stmt; |
| int result = 0; |
| |
| if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) |
| return -1; |
| |
| while (next_stmt && next_stmt != stmt) |
| { |
| result++; |
| next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
| } |
| |
| if (next_stmt) |
| return result; |
| else |
| return -1; |
| } |
| |
| |
| /* Function vect_insert_into_interleaving_chain. |
| |
| Insert DRA into the interleaving chain of DRB according to DRA's INIT. */ |
| |
| static void |
| vect_insert_into_interleaving_chain (struct data_reference *dra, |
| struct data_reference *drb) |
| { |
| gimple prev, next; |
| tree next_init; |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| |
| prev = GROUP_FIRST_ELEMENT (stmtinfo_b); |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)); |
| while (next) |
| { |
| next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0) |
| { |
| /* Insert here. */ |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra); |
| GROUP_NEXT_ELEMENT (stmtinfo_a) = next; |
| return; |
| } |
| prev = next; |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)); |
| } |
| |
| /* We got to the end of the list. Insert here. */ |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra); |
| GROUP_NEXT_ELEMENT (stmtinfo_a) = NULL; |
| } |
| |
| |
| /* Function vect_update_interleaving_chain. |
| |
| For two data-refs DRA and DRB that are a part of a chain interleaved data |
| accesses, update the interleaving chain. DRB's INIT is smaller than DRA's. |
| |
| There are four possible cases: |
| 1. New stmts - both DRA and DRB are not a part of any chain: |
| FIRST_DR = DRB |
| NEXT_DR (DRB) = DRA |
| 2. DRB is a part of a chain and DRA is not: |
| no need to update FIRST_DR |
| no need to insert DRB |
| insert DRA according to init |
| 3. DRA is a part of a chain and DRB is not: |
| if (init of FIRST_DR > init of DRB) |
| FIRST_DR = DRB |
| NEXT(FIRST_DR) = previous FIRST_DR |
| else |
| insert DRB according to its init |
| 4. both DRA and DRB are in some interleaving chains: |
| choose the chain with the smallest init of FIRST_DR |
| insert the nodes of the second chain into the first one. */ |
| |
| static void |
| vect_update_interleaving_chain (struct data_reference *drb, |
| struct data_reference *dra) |
| { |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| tree next_init, init_dra_chain, init_drb_chain; |
| gimple first_a, first_b; |
| tree node_init; |
| gimple node, prev, next, first_stmt; |
| |
| /* 1. New stmts - both DRA and DRB are not a part of any chain. */ |
| if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b)) |
| { |
| GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (drb); |
| GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb); |
| GROUP_NEXT_ELEMENT (stmtinfo_b) = DR_STMT (dra); |
| return; |
| } |
| |
| /* 2. DRB is a part of a chain and DRA is not. */ |
| if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && GROUP_FIRST_ELEMENT (stmtinfo_b)) |
| { |
| GROUP_FIRST_ELEMENT (stmtinfo_a) = GROUP_FIRST_ELEMENT (stmtinfo_b); |
| /* Insert DRA into the chain of DRB. */ |
| vect_insert_into_interleaving_chain (dra, drb); |
| return; |
| } |
| |
| /* 3. DRA is a part of a chain and DRB is not. */ |
| if (GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b)) |
| { |
| gimple old_first_stmt = GROUP_FIRST_ELEMENT (stmtinfo_a); |
| tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt ( |
| old_first_stmt))); |
| gimple tmp; |
| |
| if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0) |
| { |
| /* DRB's init is smaller than the init of the stmt previously marked |
| as the first stmt of the interleaving chain of DRA. Therefore, we |
| update FIRST_STMT and put DRB in the head of the list. */ |
| GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb); |
| GROUP_NEXT_ELEMENT (stmtinfo_b) = old_first_stmt; |
| |
| /* Update all the stmts in the list to point to the new FIRST_STMT. */ |
| tmp = old_first_stmt; |
| while (tmp) |
| { |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) = DR_STMT (drb); |
| tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp)); |
| } |
| } |
| else |
| { |
| /* Insert DRB in the list of DRA. */ |
| vect_insert_into_interleaving_chain (drb, dra); |
| GROUP_FIRST_ELEMENT (stmtinfo_b) = GROUP_FIRST_ELEMENT (stmtinfo_a); |
| } |
| return; |
| } |
| |
| /* 4. both DRA and DRB are in some interleaving chains. */ |
| first_a = GROUP_FIRST_ELEMENT (stmtinfo_a); |
| first_b = GROUP_FIRST_ELEMENT (stmtinfo_b); |
| if (first_a == first_b) |
| return; |
| init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a))); |
| init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b))); |
| |
| if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0) |
| { |
| /* Insert the nodes of DRA chain into the DRB chain. |
| After inserting a node, continue from this node of the DRB chain (don't |
| start from the beginning. */ |
| node = GROUP_FIRST_ELEMENT (stmtinfo_a); |
| prev = GROUP_FIRST_ELEMENT (stmtinfo_b); |
| first_stmt = first_b; |
| } |
| else |
| { |
| /* Insert the nodes of DRB chain into the DRA chain. |
| After inserting a node, continue from this node of the DRA chain (don't |
| start from the beginning. */ |
| node = GROUP_FIRST_ELEMENT (stmtinfo_b); |
| prev = GROUP_FIRST_ELEMENT (stmtinfo_a); |
| first_stmt = first_a; |
| } |
| |
| while (node) |
| { |
| node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node))); |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)); |
| while (next) |
| { |
| next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (next_init, node_init) > 0) |
| { |
| /* Insert here. */ |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node; |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = next; |
| prev = node; |
| break; |
| } |
| prev = next; |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)); |
| } |
| if (!next) |
| { |
| /* We got to the end of the list. Insert here. */ |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node; |
| GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = NULL; |
| prev = node; |
| } |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (node)) = first_stmt; |
| node = GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)); |
| } |
| } |
| |
| /* Check dependence between DRA and DRB for basic block vectorization. |
| If the accesses share same bases and offsets, we can compare their initial |
| constant offsets to decide whether they differ or not. In case of a read- |
| write dependence we check that the load is before the store to ensure that |
| vectorization will not change the order of the accesses. */ |
| |
| static bool |
| vect_drs_dependent_in_basic_block (struct data_reference *dra, |
| struct data_reference *drb) |
| { |
| HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b; |
| gimple earlier_stmt; |
| |
| /* We only call this function for pairs of loads and stores, but we verify |
| it here. */ |
| if (DR_IS_READ (dra) == DR_IS_READ (drb)) |
| { |
| if (DR_IS_READ (dra)) |
| return false; |
| else |
| return true; |
| } |
| |
| /* Check that the data-refs have same bases and offsets. If not, we can't |
| determine if they are dependent. */ |
| if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0) |
| || !dr_equal_offsets_p (dra, drb)) |
| return true; |
| |
| /* Check the types. */ |
| type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); |
| type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); |
| |
| if (type_size_a != type_size_b |
| || !types_compatible_p (TREE_TYPE (DR_REF (dra)), |
| TREE_TYPE (DR_REF (drb)))) |
| return true; |
| |
| init_a = TREE_INT_CST_LOW (DR_INIT (dra)); |
| init_b = TREE_INT_CST_LOW (DR_INIT (drb)); |
| |
| /* Two different locations - no dependence. */ |
| if (init_a != init_b) |
| return false; |
| |
| /* We have a read-write dependence. Check that the load is before the store. |
| When we vectorize basic blocks, vector load can be only before |
| corresponding scalar load, and vector store can be only after its |
| corresponding scalar store. So the order of the acceses is preserved in |
| case the load is before the store. */ |
| earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); |
| if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_check_interleaving. |
| |
| Check if DRA and DRB are a part of interleaving. In case they are, insert |
| DRA and DRB in an interleaving chain. */ |
| |
| static bool |
| vect_check_interleaving (struct data_reference *dra, |
| struct data_reference *drb) |
| { |
| HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b; |
| |
| /* Check that the data-refs have same first location (except init) and they |
| are both either store or load (not load and store). */ |
| if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0) |
| || !dr_equal_offsets_p (dra, drb) |
| || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) |
| || DR_IS_READ (dra) != DR_IS_READ (drb)) |
| return false; |
| |
| /* Check: |
| 1. data-refs are of the same type |
| 2. their steps are equal |
| 3. the step (if greater than zero) is greater than the difference between |
| data-refs' inits. */ |
| type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); |
| type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); |
| |
| if (type_size_a != type_size_b |
| || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb)) |
| || !types_compatible_p (TREE_TYPE (DR_REF (dra)), |
| TREE_TYPE (DR_REF (drb)))) |
| return false; |
| |
| init_a = TREE_INT_CST_LOW (DR_INIT (dra)); |
| init_b = TREE_INT_CST_LOW (DR_INIT (drb)); |
| step = TREE_INT_CST_LOW (DR_STEP (dra)); |
| |
| if (init_a > init_b) |
| { |
| /* If init_a == init_b + the size of the type * k, we have an interleaving, |
| and DRB is accessed before DRA. */ |
| diff_mod_size = (init_a - init_b) % type_size_a; |
| |
| if (step && (init_a - init_b) > step) |
| return false; |
| |
| if (diff_mod_size == 0) |
| { |
| vect_update_interleaving_chain (drb, dra); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected interleaving "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| return true; |
| } |
| } |
| else |
| { |
| /* If init_b == init_a + the size of the type * k, we have an |
| interleaving, and DRA is accessed before DRB. */ |
| diff_mod_size = (init_b - init_a) % type_size_a; |
| |
| if (step && (init_b - init_a) > step) |
| return false; |
| |
| if (diff_mod_size == 0) |
| { |
| vect_update_interleaving_chain (dra, drb); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected interleaving "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Check if data references pointed by DR_I and DR_J are same or |
| belong to same interleaving group. Return FALSE if drs are |
| different, otherwise return TRUE. */ |
| |
| static bool |
| vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j) |
| { |
| gimple stmt_i = DR_STMT (dr_i); |
| gimple stmt_j = DR_STMT (dr_j); |
| |
| if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0) |
| || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i)) |
| && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)) |
| && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i)) |
| == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))))) |
| return true; |
| else |
| return false; |
| } |
| |
| /* If address ranges represented by DDR_I and DDR_J are equal, |
| return TRUE, otherwise return FALSE. */ |
| |
| static bool |
| vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j) |
| { |
| if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j)) |
| && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j))) |
| || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j)) |
| && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j)))) |
| return true; |
| else |
| return false; |
| } |
| |
| /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be |
| tested at run-time. Return TRUE if DDR was successfully inserted. |
| Return false if versioning is not supported. */ |
| |
| static bool |
| vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0) |
| return false; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "mark for run-time aliasing test between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); |
| } |
| |
| if (optimize_loop_nest_for_size_p (loop)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning not supported when optimizing for size."); |
| return false; |
| } |
| |
| /* FORNOW: We don't support versioning with outer-loop vectorization. */ |
| if (loop->inner) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning not yet supported for outer-loops."); |
| return false; |
| } |
| |
| /* FORNOW: We don't support creating runtime alias tests for non-constant |
| step. */ |
| if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST |
| || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning not yet supported for non-constant " |
| "step"); |
| return false; |
| } |
| |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr); |
| return true; |
| } |
| |
| |
| /* Function vect_analyze_data_ref_dependence. |
| |
| Return TRUE if there (might) exist a dependence between a memory-reference |
| DRA and a memory-reference DRB. When versioning for alias may check a |
| dependence at run-time, return FALSE. Adjust *MAX_VF according to |
| the data dependence. */ |
| |
| static bool |
| vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, |
| loop_vec_info loop_vinfo, int *max_vf) |
| { |
| unsigned int i; |
| struct loop *loop = NULL; |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| lambda_vector dist_v; |
| unsigned int loop_depth; |
| |
| /* Don't bother to analyze statements marked as unvectorizable. */ |
| if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a) |
| || !STMT_VINFO_VECTORIZABLE (stmtinfo_b)) |
| return false; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| { |
| /* Independent data accesses. */ |
| vect_check_interleaving (dra, drb); |
| return false; |
| } |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb) |
| return false; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
| { |
| gimple earlier_stmt; |
| |
| if (loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias required: " |
| "can't determine dependence between "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (drb)); |
| } |
| |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| /* When vectorizing a basic block unknown depnedence can still mean |
| grouped access. */ |
| if (vect_check_interleaving (dra, drb)) |
| return false; |
| |
| /* Read-read is OK (we need this check here, after checking for |
| interleaving). */ |
| if (DR_IS_READ (dra) && DR_IS_READ (drb)) |
| return false; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "can't determine dependence between "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); |
| } |
| |
| /* We do not vectorize basic blocks with write-write dependencies. */ |
| if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb)) |
| return true; |
| |
| /* Check that it's not a load-after-store dependence. */ |
| earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); |
| if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Versioning for alias is not yet supported for basic block SLP, and |
| dependence distance is unapplicable, hence, in case of known data |
| dependence, basic block vectorization is impossible for now. */ |
| if (!loop_vinfo) |
| { |
| if (dra != drb && vect_check_interleaving (dra, drb)) |
| return false; |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "determined dependence between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| |
| /* Do not vectorize basic blcoks with write-write dependences. */ |
| if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb)) |
| return true; |
| |
| /* Check if this dependence is allowed in basic block vectorization. */ |
| return vect_drs_dependent_in_basic_block (dra, drb); |
| } |
| |
| /* Loop-based vectorization and known data dependence. */ |
| if (DDR_NUM_DIST_VECTS (ddr) == 0) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "versioning for alias required: " |
| "bad dist vector for "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " and "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); |
| } |
| /* Add to list of ddrs that need to be tested at run-time. */ |
| return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); |
| } |
| |
| loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); |
| FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
| { |
| int dist = dist_v[loop_depth]; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance = %d.", dist); |
| |
| if (dist == 0) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance == 0 between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| |
| /* For interleaving, mark that there is a read-write dependency if |
| necessary. We check before that one of the data-refs is store. */ |
| if (DR_IS_READ (dra)) |
| GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true; |
| else |
| { |
| if (DR_IS_READ (drb)) |
| GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true; |
| } |
| |
| continue; |
| } |
| |
| if (dist > 0 && DDR_REVERSED_P (ddr)) |
| { |
| /* If DDR_REVERSED_P the order of the data-refs in DDR was |
| reversed (to make distance vector positive), and the actual |
| distance is negative. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "dependence distance negative."); |
| /* Record a negative dependence distance to later limit the |
| amount of stmt copying / unrolling we can perform. |
| Only need to handle read-after-write dependence. */ |
| if (DR_IS_READ (drb) |
| && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0 |
| || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist)) |
| STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist; |
| continue; |
| } |
| |
| if (abs (dist) >= 2 |
| && abs (dist) < *max_vf) |
| { |
| /* The dependence distance requires reduction of the maximal |
| vectorization factor. */ |
| *max_vf = abs (dist); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "adjusting maximal vectorization factor to %i", |
| *max_vf); |
| } |
| |
| if (abs (dist) >= *max_vf) |
| { |
| /* Dependence distance does not create dependence, as far as |
| vectorization is concerned, in this case. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance >= VF."); |
| continue; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized, possible dependence " |
| "between data-refs "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Function vect_analyze_data_ref_dependences. |
| |
| Examine all the data references in the loop, and make sure there do not |
| exist any data dependences between them. Set *MAX_VF according to |
| the maximum vectorization factor the data dependences allow. */ |
| |
| bool |
| vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, |
| bb_vec_info bb_vinfo, int *max_vf) |
| { |
| unsigned int i; |
| vec<ddr_p> ddrs = vNULL; |
| struct data_dependence_relation *ddr; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_dependences ==="); |
| if (loop_vinfo) |
| ddrs = LOOP_VINFO_DDRS (loop_vinfo); |
| else |
| ddrs = BB_VINFO_DDRS (bb_vinfo); |
| |
| FOR_EACH_VEC_ELT (ddrs, i, ddr) |
| if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf)) |
| return false; |
| |
| return true; |
| } |
| |
| |
| /* Function vect_compute_data_ref_alignment |
| |
| Compute the misalignment of the data reference DR. |
| |
| Output: |
| 1. If during the misalignment computation it is found that the data reference |
| cannot be vectorized then false is returned. |
| 2. DR_MISALIGNMENT (DR) is defined. |
| |
| FOR NOW: No analysis is actually performed. Misalignment is calculated |
| only for trivial cases. TODO. */ |
| |
| static bool |
| vect_compute_data_ref_alignment (struct data_reference *dr) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = NULL; |
| tree ref = DR_REF (dr); |
| tree vectype; |
| tree base, base_addr; |
| bool base_aligned; |
| tree misalign; |
| tree aligned_to, alignment; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vect_compute_data_ref_alignment:"); |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| /* Initialize misalignment to unknown. */ |
| SET_DR_MISALIGNMENT (dr, -1); |
| |
| /* Strided loads perform only component accesses, misalignment information |
| is irrelevant for them. */ |
| if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) |
| return true; |
| |
| misalign = DR_INIT (dr); |
| aligned_to = DR_ALIGNED_TO (dr); |
| base_addr = DR_BASE_ADDRESS (dr); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| /* In case the dataref is in an inner-loop of the loop that is being |
| vectorized (LOOP), we use the base and misalignment information |
| relative to the outer-loop (LOOP). This is ok only if the misalignment |
| stays the same throughout the execution of the inner-loop, which is why |
| we have to check that the stride of the dataref in the inner-loop evenly |
| divides by the vector size. */ |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| tree step = DR_STEP (dr); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| |
| if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "inner step divides the vector-size."); |
| misalign = STMT_VINFO_DR_INIT (stmt_info); |
| aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info); |
| base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info); |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "inner step doesn't divide the vector-size."); |
| misalign = NULL_TREE; |
| } |
| } |
| |
| /* Similarly, if we're doing basic-block vectorization, we can only use |
| base and misalignment information relative to an innermost loop if the |
| misalignment stays the same throughout the execution of the loop. |
| As above, this is the case if the stride of the dataref evenly divides |
| by the vector size. */ |
| if (!loop) |
| { |
| tree step = DR_STEP (dr); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| |
| if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "SLP: step doesn't divide the vector-size."); |
| misalign = NULL_TREE; |
| } |
| } |
| |
| base = build_fold_indirect_ref (base_addr); |
| alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT); |
| |
| if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0) |
| || !misalign) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown alignment for access: "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, base); |
| } |
| return true; |
| } |
| |
| if ((DECL_P (base) |
| && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)), |
| alignment) >= 0) |
| || (TREE_CODE (base_addr) == SSA_NAME |
| && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE ( |
| TREE_TYPE (base_addr)))), |
| alignment) >= 0) |
| || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype))) |
| base_aligned = true; |
| else |
| base_aligned = false; |
| |
| if (!base_aligned) |
| { |
| /* Do not change the alignment of global variables here if |
| flag_section_anchors is enabled as we already generated |
| RTL for other functions. Most global variables should |
| have been aligned during the IPA increase_alignment pass. */ |
| if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)) |
| || (TREE_STATIC (base) && flag_section_anchors)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "can't force alignment of ref: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
| } |
| return true; |
| } |
| |
| /* Force the alignment of the decl. |
| NOTE: This is the only change to the code we make during |
| the analysis phase, before deciding to vectorize the loop. */ |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, "force alignment of "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
| } |
| |
| DECL_ALIGN (base) = TYPE_ALIGN (vectype); |
| DECL_USER_ALIGN (base) = 1; |
| } |
| |
| /* At this point we assume that the base is aligned. */ |
| gcc_assert (base_aligned |
| || (TREE_CODE (base) == VAR_DECL |
| && DECL_ALIGN (base) >= TYPE_ALIGN (vectype))); |
| |
| /* If this is a backward running DR then first access in the larger |
| vectype actually is N-1 elements before the address in the DR. |
| Adjust misalign accordingly. */ |
| if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0) |
| { |
| tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); |
| /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type, |
| otherwise we wouldn't be here. */ |
| offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr)); |
| /* PLUS because DR_STEP was negative. */ |
| misalign = size_binop (PLUS_EXPR, misalign, offset); |
| } |
| |
| /* Modulo alignment. */ |
| misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment); |
| |
| if (!host_integerp (misalign, 1)) |
| { |
| /* Negative or overflowed misalignment value. */ |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "unexpected misalign value"); |
| return false; |
| } |
| |
| SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign)); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr)); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); |
| } |
| |
| return true; |
| } |
| |
| |
| /* Function vect_compute_data_refs_alignment |
| |
| Compute the misalignment of data references in the loop. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| static bool |
| vect_compute_data_refs_alignment (loop_vec_info loop_vinfo, |
| bb_vec_info bb_vinfo) |
| { |
| vec<data_reference_p> datarefs; |
| struct data_reference *dr; |
| unsigned int i; |
| |
| if (loop_vinfo) |
| datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| else |
| datarefs = BB_VINFO_DATAREFS (bb_vinfo); |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) |
| && !vect_compute_data_ref_alignment (dr)) |
| { |
| if (bb_vinfo) |
| { |
| /* Mark unsupported statement as unvectorizable. */ |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; |
| continue; |
| } |
| else |
| return false; |
| } |
| |
| return true; |
| } |
| |
| |
| /* Function vect_update_misalignment_for_peel |
| |
| DR - the data reference whose misalignment is to be adjusted. |
| DR_PEEL - the data reference whose misalignment is being made |
| zero in the vector loop by the peel. |
| NPEEL - the number of iterations in the peel loop if the misalignment |
| of DR_PEEL is known at compile time. */ |
| |
| static void |
| vect_update_misalignment_for_peel (struct data_reference *dr, |
| struct data_reference *dr_peel, int npeel) |
| { |
| unsigned int i; |
| vec<dr_p> same_align_drs; |
| struct data_reference *current_dr; |
| int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); |
| int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel)))); |
| stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); |
| stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel)); |
| |
| /* For interleaved data accesses the step in the loop must be multiplied by |
| the size of the interleaving group. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); |
| if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info)) |
| dr_peel_size *= GROUP_SIZE (peel_stmt_info); |
| |
| /* It can be assumed that the data refs with the same alignment as dr_peel |
| are aligned in the vector loop. */ |
| same_align_drs |
| = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel))); |
| FOR_EACH_VEC_ELT (same_align_drs, i, current_dr) |
| { |
| if (current_dr != dr) |
| continue; |
| gcc_assert (DR_MISALIGNMENT (dr) / dr_size == |
| DR_MISALIGNMENT (dr_peel) / dr_peel_size); |
| SET_DR_MISALIGNMENT (dr, 0); |
| return; |
| } |
| |
| if (known_alignment_for_access_p (dr) |
| && known_alignment_for_access_p (dr_peel)) |
| { |
| bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0; |
| int misal = DR_MISALIGNMENT (dr); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| misal += negative ? -npeel * dr_size : npeel * dr_size; |
| misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1; |
| SET_DR_MISALIGNMENT (dr, misal); |
| return; |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1."); |
| SET_DR_MISALIGNMENT (dr, -1); |
| } |
| |
| |
| /* Function vect_verify_datarefs_alignment |
| |
| Return TRUE if all data references in the loop can be |
| handled with respect to alignment. */ |
| |
| bool |
| vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) |
| { |
| vec<data_reference_p> datarefs; |
| struct data_reference *dr; |
| enum dr_alignment_support supportable_dr_alignment; |
| unsigned int i; |
| |
| if (loop_vinfo) |
| datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| else |
| datarefs = BB_VINFO_DATAREFS (bb_vinfo); |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| |
| if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
| continue; |
| |
| /* For interleaving, only the alignment of the first access matters. |
| Skip statements marked as not vectorizable. */ |
| if ((STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| || !STMT_VINFO_VECTORIZABLE (stmt_info)) |
| continue; |
| |
| /* Strided loads perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
| if (!supportable_dr_alignment) |
| { |
| if (dump_enabled_p ()) |
| { |
| if (DR_IS_READ (dr)) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported unaligned load."); |
| else |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unsupported unaligned " |
| "store."); |
| |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| DR_REF (dr)); |
| } |
| return false; |
| } |
| if (supportable_dr_alignment != dr_aligned && dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Vectorizing an unaligned access."); |
| } |
| return true; |
| } |
| |
| /* Given an memory reference EXP return whether its alignment is less |
| than its size. */ |
| |
| static bool |
| not_size_aligned (tree exp) |
| { |
| if (!host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)) |
| return true; |
| |
| return (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) |
| > get_object_alignment (exp)); |
| } |
| |
| /* Function vector_alignment_reachable_p |
| |
| Return true if vector alignment for DR is reachable by peeling |
| a few loop iterations. Return false otherwise. */ |
| |
| static bool |
| vector_alignment_reachable_p (struct data_reference *dr) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
| |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| { |
| /* For interleaved access we peel only if number of iterations in |
| the prolog loop ({VF - misalignment}), is a multiple of the |
| number of the interleaved accesses. */ |
| int elem_size, mis_in_elements; |
| int nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| /* FORNOW: handle only known alignment. */ |
| if (!known_alignment_for_access_p (dr)) |
| return false; |
| |
| elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements; |
| mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; |
| |
| if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info)) |
| return false; |
| } |
| |
| /* If misalignment is known at the compile time then allow peeling |
| only if natural alignment is reachable through peeling. */ |
| if (known_alignment_for_access_p (dr) && !aligned_access_p (dr)) |
| { |
| HOST_WIDE_INT elmsize = |
| int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize); |
| dump_printf (MSG_NOTE, |
| ". misalignment = %d. ", DR_MISALIGNMENT (dr)); |
| } |
| if (DR_MISALIGNMENT (dr) % elmsize) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "data size does not divide the misalignment.\n"); |
| return false; |
| } |
| } |
| |
| if (!known_alignment_for_access_p (dr)) |
| { |
| tree type = TREE_TYPE (DR_REF (dr)); |
| bool is_packed = not_size_aligned (DR_REF (dr)); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Unknown misalignment, is_packed = %d",is_packed); |
| if (targetm.vectorize.vector_alignment_reachable (type, is_packed)) |
| return true; |
| else |
| return false; |
| } |
| |
| return true; |
| } |
| |
| |
| /* Calculate the cost of the memory access represented by DR. */ |
| |
| static void |
| vect_get_data_access_cost (struct data_reference *dr, |
| unsigned int *inside_cost, |
| unsigned int *outside_cost, |
| stmt_vector_for_cost *body_cost_vec) |
| { |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| int ncopies = vf / nunits; |
| |
| if (DR_IS_READ (dr)) |
| vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost, |
| NULL, body_cost_vec, false); |
| else |
| vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "vect_get_data_access_cost: inside_cost = %d, " |
| "outside_cost = %d.", *inside_cost, *outside_cost); |
| } |
| |
| |
| static hashval_t |
| vect_peeling_hash (const void *elem) |
| { |
| const struct _vect_peel_info *peel_info; |
| |
| peel_info = (const struct _vect_peel_info *) elem; |
| return (hashval_t) peel_info->npeel; |
| } |
| |
| |
| static int |
| vect_peeling_hash_eq (const void *elem1, const void *elem2) |
| { |
| const struct _vect_peel_info *a, *b; |
| |
| a = (const struct _vect_peel_info *) elem1; |
| b = (const struct _vect_peel_info *) elem2; |
| return (a->npeel == b->npeel); |
| } |
| |
| |
| /* Insert DR into peeling hash table with NPEEL as key. */ |
| |
| static void |
| vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr, |
| int npeel) |
| { |
| struct _vect_peel_info elem, *slot; |
| void **new_slot; |
| bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
| |
| elem.npeel = npeel; |
| slot = (vect_peel_info) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo), |
| &elem); |
| if (slot) |
| slot->count++; |
| else |
| { |
| slot = XNEW (struct _vect_peel_info); |
| slot->npeel = npeel; |
| slot->dr = dr; |
| slot->count = 1; |
| new_slot = htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo), slot, |
| INSERT); |
| *new_slot = slot; |
| } |
| |
| if (!supportable_dr_alignment && !flag_vect_cost_model) |
| slot->count += VECT_MAX_COST; |
| } |
| |
| |
| /* Traverse peeling hash table to find peeling option that aligns maximum |
| number of data accesses. */ |
| |
| static int |
| vect_peeling_hash_get_most_frequent (void **slot, void *data) |
| { |
| vect_peel_info elem = (vect_peel_info) *slot; |
| vect_peel_extended_info max = (vect_peel_extended_info) data; |
| |
| if (elem->count > max->peel_info.count |
| || (elem->count == max->peel_info.count |
| && max->peel_info.npeel > elem->npeel)) |
| { |
| max->peel_info.npeel = elem->npeel; |
| max->peel_info.count = elem->count; |
| max->peel_info.dr = elem->dr; |
| } |
| |
| return 1; |
| } |
| |
| |
| /* Traverse peeling hash table and calculate cost for each peeling option. |
| Find the one with the lowest cost. */ |
| |
| static int |
| vect_peeling_hash_get_lowest_cost (void **slot, void *data) |
| { |
| vect_peel_info elem = (vect_peel_info) *slot; |
| vect_peel_extended_info min = (vect_peel_extended_info) data; |
| int save_misalignment, dummy; |
| unsigned int inside_cost = 0, outside_cost = 0, i; |
| gimple stmt = DR_STMT (elem->dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct data_reference *dr; |
| stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec; |
| int single_iter_cost; |
| |
| prologue_cost_vec.create (2); |
| body_cost_vec.create (2); |
| epilogue_cost_vec.create (2); |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| save_misalignment = DR_MISALIGNMENT (dr); |
| vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel); |
| vect_get_data_access_cost (dr, &inside_cost, &outside_cost, |
| &body_cost_vec); |
| SET_DR_MISALIGNMENT (dr, save_misalignment); |
| } |
| |
| single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo); |
| outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, |
| &dummy, single_iter_cost, |
| &prologue_cost_vec, |
| &epilogue_cost_vec); |
| |
| /* Prologue and epilogue costs are added to the target model later. |
| These costs depend only on the scalar iteration cost, the |
| number of peeling iterations finally chosen, and the number of |
| misaligned statements. So discard the information found here. */ |
| prologue_cost_vec.release (); |
| epilogue_cost_vec.release (); |
| |
| if (inside_cost < min->inside_cost |
| || (inside_cost == min->inside_cost && outside_cost < min->outside_cost)) |
| { |
| min->inside_cost = inside_cost; |
| min->outside_cost = outside_cost; |
| min->body_cost_vec.release (); |
| min->body_cost_vec = body_cost_vec; |
| min->peel_info.dr = elem->dr; |
| min->peel_info.npeel = elem->npeel; |
| } |
| else |
| body_cost_vec.release (); |
| |
| return 1; |
| } |
| |
| |
| /* Choose best peeling option by traversing peeling hash table and either |
| choosing an option with the lowest cost (if cost model is enabled) or the |
| option that aligns as many accesses as possible. */ |
| |
| static struct data_reference * |
| vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo, |
| unsigned int *npeel, |
| stmt_vector_for_cost *body_cost_vec) |
| { |
| struct _vect_peel_extended_info res; |
| |
| res.peel_info.dr = NULL; |
| res.body_cost_vec = stmt_vector_for_cost(); |
| |
| if (flag_vect_cost_model) |
| { |
| res.inside_cost = INT_MAX; |
| res.outside_cost = INT_MAX; |
| htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo), |
| vect_peeling_hash_get_lowest_cost, &res); |
| } |
| else |
| { |
| res.peel_info.count = 0; |
| htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo), |
| vect_peeling_hash_get_most_frequent, &res); |
| } |
| |
| *npeel = res.peel_info.npeel; |
| *body_cost_vec = res.body_cost_vec; |
| return res.peel_info.dr; |
| } |
| |
| |
| /* Function vect_enhance_data_refs_alignment |
| |
| This pass will use loop versioning and loop peeling in order to enhance |
| the alignment of data references in the loop. |
| |
| FOR NOW: we assume that whatever versioning/peeling takes place, only the |
| original loop is to be vectorized. Any other loops that are created by |
| the transformations performed in this pass - are not supposed to be |
| vectorized. This restriction will be relaxed. |
| |
| This pass will require a cost model to guide it whether to apply peeling |
| or versioning or a combination of the two. For example, the scheme that |
| intel uses when given a loop with several memory accesses, is as follows: |
| choose one memory access ('p') which alignment you want to force by doing |
| peeling. Then, either (1) generate a loop in which 'p' is aligned and all |
| other accesses are not necessarily aligned, or (2) use loop versioning to |
| generate one loop in which all accesses are aligned, and another loop in |
| which only 'p' is necessarily aligned. |
| |
| ("Automatic Intra-Register Vectorization for the Intel Architecture", |
| Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International |
| Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) |
| |
| Devising a cost model is the most critical aspect of this work. It will |
| guide us on which access to peel for, whether to use loop versioning, how |
| many versions to create, etc. The cost model will probably consist of |
| generic considerations as well as target specific considerations (on |
| powerpc for example, misaligned stores are more painful than misaligned |
| loads). |
| |
| Here are the general steps involved in alignment enhancements: |
| |
| -- original loop, before alignment analysis: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = unknown |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- After vect_compute_data_refs_alignment: |
| for (i=0; i<N; i++){ |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 1: we do loop versioning: |
| if (p is aligned) { |
| for (i=0; i<N; i++){ # loop 1A |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i=0; i<N; i++){ # loop 1B |
| x = q[i]; # DR_MISALIGNMENT(q) = 3 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| -- Possibility 2: we do loop peeling: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| for (i = 3; i < N; i++){ # loop 2A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unknown |
| } |
| |
| -- Possibility 3: combination of loop peeling and versioning: |
| for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). |
| x = q[i]; |
| p[i] = y; |
| } |
| if (p is aligned) { |
| for (i = 3; i<N; i++){ # loop 3A |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = 0 |
| } |
| } |
| else { |
| for (i = 3; i<N; i++){ # loop 3B |
| x = q[i]; # DR_MISALIGNMENT(q) = 0 |
| p[i] = y; # DR_MISALIGNMENT(p) = unaligned |
| } |
| } |
| |
| These loops are later passed to loop_transform to be vectorized. The |
| vectorizer will use the alignment information to guide the transformation |
| (whether to generate regular loads/stores, or with special handling for |
| misalignment). */ |
| |
| bool |
| vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) |
| { |
| vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| enum dr_alignment_support supportable_dr_alignment; |
| struct data_reference *dr0 = NULL, *first_store = NULL; |
| struct data_reference *dr; |
| unsigned int i, j; |
| bool do_peeling = false; |
| bool do_versioning = false; |
| bool stat; |
| gimple stmt; |
| stmt_vec_info stmt_info; |
| int vect_versioning_for_alias_required; |
| unsigned int npeel = 0; |
| bool all_misalignments_unknown = true; |
| unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| unsigned possible_npeel_number = 1; |
| tree vectype; |
| unsigned int nelements, mis, same_align_drs_max = 0; |
| stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost(); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_enhance_data_refs_alignment ==="); |
| |
| /* While cost model enhancements are expected in the future, the high level |
| view of the code at this time is as follows: |
| |
| A) If there is a misaligned access then see if peeling to align |
| this access can make all data references satisfy |
| vect_supportable_dr_alignment. If so, update data structures |
| as needed and return true. |
| |
| B) If peeling wasn't possible and there is a data reference with an |
| unknown misalignment that does not satisfy vect_supportable_dr_alignment |
| then see if loop versioning checks can be used to make all data |
| references satisfy vect_supportable_dr_alignment. If so, update |
| data structures as needed and return true. |
| |
| C) If neither peeling nor versioning were successful then return false if |
| any data reference does not satisfy vect_supportable_dr_alignment. |
| |
| D) Return true (all data references satisfy vect_supportable_dr_alignment). |
| |
| Note, Possibility 3 above (which is peeling and versioning together) is not |
| being done at this time. */ |
| |
| /* (1) Peeling to force alignment. */ |
| |
| /* (1.1) Decide whether to perform peeling, and how many iterations to peel: |
| Considerations: |
| + How many accesses will become aligned due to the peeling |
| - How many accesses will become unaligned due to the peeling, |
| and the cost of misaligned accesses. |
| - The cost of peeling (the extra runtime checks, the increase |
| in code size). */ |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
| continue; |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* For invariant accesses there is nothing to enhance. */ |
| if (integer_zerop (DR_STEP (dr))) |
| continue; |
| |
| /* Strided loads perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
| do_peeling = vector_alignment_reachable_p (dr); |
| if (do_peeling) |
| { |
| if (known_alignment_for_access_p (dr)) |
| { |
| unsigned int npeel_tmp; |
| bool negative = tree_int_cst_compare (DR_STEP (dr), |
| size_zero_node) < 0; |
| |
| /* Save info about DR in the hash table. */ |
| if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo)) |
| LOOP_VINFO_PEELING_HTAB (loop_vinfo) = |
| htab_create (1, vect_peeling_hash, |
| vect_peeling_hash_eq, free); |
| |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE ( |
| TREE_TYPE (DR_REF (dr)))); |
| npeel_tmp = (negative |
| ? (mis - nelements) : (nelements - mis)) |
| & (nelements - 1); |
| |
| /* For multiple types, it is possible that the bigger type access |
| will have more than one peeling option. E.g., a loop with two |
| types: one of size (vector size / 4), and the other one of |
| size (vector size / 8). Vectorization factor will 8. If both |
| access are misaligned by 3, the first one needs one scalar |
| iteration to be aligned, and the second one needs 5. But the |
| the first one will be aligned also by peeling 5 scalar |
| iterations, and in that case both accesses will be aligned. |
| Hence, except for the immediate peeling amount, we also want |
| to try to add full vector size, while we don't exceed |
| vectorization factor. |
| We do this automtically for cost model, since we calculate cost |
| for every peeling option. */ |
| if (!flag_vect_cost_model) |
| possible_npeel_number = vf /nelements; |
| |
| /* Handle the aligned case. We may decide to align some other |
| access, making DR unaligned. */ |
| if (DR_MISALIGNMENT (dr) == 0) |
| { |
| npeel_tmp = 0; |
| if (!flag_vect_cost_model) |
| possible_npeel_number++; |
| } |
| |
| for (j = 0; j < possible_npeel_number; j++) |
| { |
| gcc_assert (npeel_tmp <= vf); |
| vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp); |
| npeel_tmp += nelements; |
| } |
| |
| all_misalignments_unknown = false; |
| /* Data-ref that was chosen for the case that all the |
| misalignments are unknown is not relevant anymore, since we |
| have a data-ref with known alignment. */ |
| dr0 = NULL; |
| } |
| else |
| { |
| /* If we don't know all the misalignment values, we prefer |
| peeling for data-ref that has maximum number of data-refs |
| with the same alignment, unless the target prefers to align |
| stores over load. */ |
| if (all_misalignments_unknown) |
| { |
| if (same_align_drs_max |
| < STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length () |
| || !dr0) |
| { |
| same_align_drs_max |
| = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length (); |
| dr0 = dr; |
| } |
| |
| if (!first_store && DR_IS_WRITE (dr)) |
| first_store = dr; |
| } |
| |
| /* If there are both known and unknown misaligned accesses in the |
| loop, we choose peeling amount according to the known |
| accesses. */ |
| |
| |
| if (!supportable_dr_alignment) |
| { |
| dr0 = dr; |
| if (!first_store && DR_IS_WRITE (dr)) |
| first_store = dr; |
| } |
| } |
| } |
| else |
| { |
| if (!aligned_access_p (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "vector alignment may not be reachable"); |
| break; |
| } |
| } |
| } |
| |
| vect_versioning_for_alias_required |
| = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo); |
| |
| /* Temporarily, if versioning for alias is required, we disable peeling |
| until we support peeling and versioning. Often peeling for alignment |
| will require peeling for loop-bound, which in turn requires that we |
| know how to adjust the loop ivs after the loop. */ |
| if (vect_versioning_for_alias_required |
| || !vect_can_advance_ivs_p (loop_vinfo) |
| || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) |
| do_peeling = false; |
| |
| if (do_peeling && all_misalignments_unknown |
| && vect_supportable_dr_alignment (dr0, false)) |
| { |
| |
| /* Check if the target requires to prefer stores over loads, i.e., if |
| misaligned stores are more expensive than misaligned loads (taking |
| drs with same alignment into account). */ |
| if (first_store && DR_IS_READ (dr0)) |
| { |
| unsigned int load_inside_cost = 0, load_outside_cost = 0; |
| unsigned int store_inside_cost = 0, store_outside_cost = 0; |
| unsigned int load_inside_penalty = 0, load_outside_penalty = 0; |
| unsigned int store_inside_penalty = 0, store_outside_penalty = 0; |
| stmt_vector_for_cost dummy; |
| dummy.create (2); |
| |
| vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost, |
| &dummy); |
| vect_get_data_access_cost (first_store, &store_inside_cost, |
| &store_outside_cost, &dummy); |
| |
| dummy.release (); |
| |
| /* Calculate the penalty for leaving FIRST_STORE unaligned (by |
| aligning the load DR0). */ |
| load_inside_penalty = store_inside_cost; |
| load_outside_penalty = store_outside_cost; |
| for (i = 0; |
| STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( |
| DR_STMT (first_store))).iterate (i, &dr); |
| i++) |
| if (DR_IS_READ (dr)) |
| { |
| load_inside_penalty += load_inside_cost; |
| load_outside_penalty += load_outside_cost; |
| } |
| else |
| { |
| load_inside_penalty += store_inside_cost; |
| load_outside_penalty += store_outside_cost; |
| } |
| |
| /* Calculate the penalty for leaving DR0 unaligned (by |
| aligning the FIRST_STORE). */ |
| store_inside_penalty = load_inside_cost; |
| store_outside_penalty = load_outside_cost; |
| for (i = 0; |
| STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( |
| DR_STMT (dr0))).iterate (i, &dr); |
| i++) |
| if (DR_IS_READ (dr)) |
| { |
| store_inside_penalty += load_inside_cost; |
| store_outside_penalty += load_outside_cost; |
| } |
| else |
| { |
| store_inside_penalty += store_inside_cost; |
| store_outside_penalty += store_outside_cost; |
| } |
| |
| if (load_inside_penalty > store_inside_penalty |
| || (load_inside_penalty == store_inside_penalty |
| && load_outside_penalty > store_outside_penalty)) |
| dr0 = first_store; |
| } |
| |
| /* In case there are only loads with different unknown misalignments, use |
| peeling only if it may help to align other accesses in the loop. */ |
| if (!first_store |
| && !STMT_VINFO_SAME_ALIGN_REFS ( |
| vinfo_for_stmt (DR_STMT (dr0))).length () |
| && vect_supportable_dr_alignment (dr0, false) |
| != dr_unaligned_supported) |
| do_peeling = false; |
| } |
| |
| if (do_peeling && !dr0) |
| { |
| /* Peeling is possible, but there is no data access that is not supported |
| unless aligned. So we try to choose the best possible peeling. */ |
| |
| /* We should get here only if there are drs with known misalignment. */ |
| gcc_assert (!all_misalignments_unknown); |
| |
| /* Choose the best peeling from the hash table. */ |
| dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel, |
| &body_cost_vec); |
| if (!dr0 || !npeel) |
| do_peeling = false; |
| } |
| |
| if (do_peeling) |
| { |
| stmt = DR_STMT (dr0); |
| stmt_info = vinfo_for_stmt (stmt); |
| vectype = STMT_VINFO_VECTYPE (stmt_info); |
| nelements = TYPE_VECTOR_SUBPARTS (vectype); |
| |
| if (known_alignment_for_access_p (dr0)) |
| { |
| bool negative = tree_int_cst_compare (DR_STEP (dr0), |
| size_zero_node) < 0; |
| if (!npeel) |
| { |
| /* Since it's known at compile time, compute the number of |
| iterations in the peeled loop (the peeling factor) for use in |
| updating DR_MISALIGNMENT values. The peeling factor is the |
| vectorization factor minus the misalignment as an element |
| count. */ |
| mis = DR_MISALIGNMENT (dr0); |
| mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0)))); |
| npeel = ((negative ? mis - nelements : nelements - mis) |
| & (nelements - 1)); |
| } |
| |
| /* For interleaved data access every iteration accesses all the |
| members of the group, therefore we divide the number of iterations |
| by the group size. */ |
| stmt_info = vinfo_for_stmt (DR_STMT (dr0)); |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
| npeel /= GROUP_SIZE (stmt_info); |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Try peeling by %d", npeel); |
| } |
| |
| /* Ensure that all data refs can be vectorized after the peel. */ |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| int save_misalignment; |
| |
| if (dr == dr0) |
| continue; |
| |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
| continue; |
| |
| /* Strided loads perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) |
| continue; |
| |
| save_misalignment = DR_MISALIGNMENT (dr); |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
| SET_DR_MISALIGNMENT (dr, save_misalignment); |
| |
| if (!supportable_dr_alignment) |
| { |
| do_peeling = false; |
| break; |
| } |
| } |
| |
| if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0) |
| { |
| stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
| if (!stat) |
| do_peeling = false; |
| else |
| { |
| body_cost_vec.release (); |
| return stat; |
| } |
| } |
| |
| if (do_peeling) |
| { |
| stmt_info_for_cost *si; |
| void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
| |
| /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i. |
| If the misalignment of DR_i is identical to that of dr0 then set |
| DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and |
| dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i) |
| by the peeling factor times the element size of DR_i (MOD the |
| vectorization factor times the size). Otherwise, the |
| misalignment of DR_i must be set to unknown. */ |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| if (dr != dr0) |
| vect_update_misalignment_for_peel (dr, dr0, npeel); |
| |
| LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0; |
| if (npeel) |
| LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel; |
| else |
| LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0); |
| SET_DR_MISALIGNMENT (dr0, 0); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Alignment of access forced using peeling."); |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Peeling for alignment will be applied."); |
| } |
| /* We've delayed passing the inside-loop peeling costs to the |
| target cost model until we were sure peeling would happen. |
| Do so now. */ |
| if (body_cost_vec.exists ()) |
| { |
| FOR_EACH_VEC_ELT (body_cost_vec, i, si) |
| { |
| struct _stmt_vec_info *stmt_info |
| = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; |
| (void) add_stmt_cost (data, si->count, si->kind, stmt_info, |
| si->misalign, vect_body); |
| } |
| body_cost_vec.release (); |
| } |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
| gcc_assert (stat); |
| return stat; |
| } |
| } |
| |
| body_cost_vec.release (); |
| |
| /* (2) Versioning to force alignment. */ |
| |
| /* Try versioning if: |
| 1) flag_tree_vect_loop_version is TRUE |
| 2) optimize loop for speed |
| 3) there is at least one unsupported misaligned data ref with an unknown |
| misalignment, and |
| 4) all misaligned data refs with a known misalignment are supported, and |
| 5) the number of runtime alignment checks is within reason. */ |
| |
| do_versioning = |
| flag_tree_vect_loop_version |
| && optimize_loop_nest_for_speed_p (loop) |
| && (!loop->inner); /* FORNOW */ |
| |
| if (do_versioning) |
| { |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| /* For interleaving, only the alignment of the first access |
| matters. */ |
| if (aligned_access_p (dr) |
| || (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
| && GROUP_FIRST_ELEMENT (stmt_info) != stmt)) |
| continue; |
| |
| /* Strided loads perform only component accesses, alignment is |
| irrelevant for them. */ |
| if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) |
| continue; |
| |
| supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
| |
| if (!supportable_dr_alignment) |
| { |
| gimple stmt; |
| int mask; |
| tree vectype; |
| |
| if (known_alignment_for_access_p (dr) |
| || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length () |
| >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)) |
| { |
| do_versioning = false; |
| break; |
| } |
| |
| stmt = DR_STMT (dr); |
| vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); |
| gcc_assert (vectype); |
| |
| /* The rightmost bits of an aligned address must be zeros. |
| Construct the mask needed for this test. For example, |
| GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the |
| mask must be 15 = 0xf. */ |
| mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; |
| |
| /* FORNOW: use the same mask to test all potentially unaligned |
| references in the loop. The vectorizer currently supports |
| a single vector size, see the reference to |
| GET_MODE_NUNITS (TYPE_MODE (vectype)) where the |
| vectorization factor is computed. */ |
| gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo) |
| || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask); |
| LOOP_VINFO_PTR_MASK (loop_vinfo) = mask; |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push ( |
| DR_STMT (dr)); |
| } |
| } |
| |
| /* Versioning requires at least one misaligned data reference. */ |
| if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
| do_versioning = false; |
| else if (!do_versioning) |
| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0); |
| } |
| |
| if (do_versioning) |
| { |
| vec<gimple> may_misalign_stmts |
| = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); |
| gimple stmt; |
| |
| /* It can now be assumed that the data references in the statements |
| in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version |
| of the loop being vectorized. */ |
| FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt) |
| { |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| dr = STMT_VINFO_DATA_REF (stmt_info); |
| SET_DR_MISALIGNMENT (dr, 0); |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Alignment of access forced using versioning."); |
| } |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Versioning for alignment will be applied."); |
| |
| /* Peeling and versioning can't be done together at this time. */ |
| gcc_assert (! (do_peeling && do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
| gcc_assert (stat); |
| return stat; |
| } |
| |
| /* This point is reached if neither peeling nor versioning is being done. */ |
| gcc_assert (! (do_peeling || do_versioning)); |
| |
| stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
| return stat; |
| } |
| |
| |
| /* Function vect_find_same_alignment_drs. |
| |
| Update group and alignment relations according to the chosen |
| vectorization factor. */ |
| |
| static void |
| vect_find_same_alignment_drs (struct data_dependence_relation *ddr, |
| loop_vec_info loop_vinfo) |
| { |
| unsigned int i; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
| struct data_reference *dra = DDR_A (ddr); |
| struct data_reference *drb = DDR_B (ddr); |
| stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
| stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
| int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra)))); |
| int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb)))); |
| lambda_vector dist_v; |
| unsigned int loop_depth; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
| return; |
| |
| if (dra == drb) |
| return; |
| |
| if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
| return; |
| |
| /* Loop-based vectorization and known data dependence. */ |
| if (DDR_NUM_DIST_VECTS (ddr) == 0) |
| return; |
| |
| /* Data-dependence analysis reports a distance vector of zero |
| for data-references that overlap only in the first iteration |
| but have different sign step (see PR45764). |
| So as a sanity check require equal DR_STEP. */ |
| if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) |
| return; |
| |
| loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); |
| FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
| { |
| int dist = dist_v[loop_depth]; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "dependence distance = %d.", dist); |
| |
| /* Same loop iteration. */ |
| if (dist == 0 |
| || (dist % vectorization_factor == 0 && dra_size == drb_size)) |
| { |
| /* Two references with distance zero have the same alignment. */ |
| STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb); |
| STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra); |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "accesses have the same alignment."); |
| dump_printf (MSG_NOTE, |
| "dependence distance modulo vf == 0 between "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); |
| } |
| } |
| } |
| } |
| |
| |
| /* Function vect_analyze_data_refs_alignment |
| |
| Analyze the alignment of the data-references in the loop. |
| Return FALSE if a data reference is found that cannot be vectorized. */ |
| |
| bool |
| vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo, |
| bb_vec_info bb_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_refs_alignment ==="); |
| |
| /* Mark groups of data references with same alignment using |
| data dependence information. */ |
| if (loop_vinfo) |
| { |
| vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo); |
| struct data_dependence_relation *ddr; |
| unsigned int i; |
| |
| FOR_EACH_VEC_ELT (ddrs, i, ddr) |
| vect_find_same_alignment_drs (ddr, loop_vinfo); |
| } |
| |
| if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: can't calculate alignment " |
| "for data ref."); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze groups of accesses: check that DR belongs to a group of |
| accesses of legal size, step, etc. Detect gaps, single element |
| interleaving, and other special cases. Set grouped access info. |
| Collect groups of strided stores for further use in SLP analysis. */ |
| |
| static bool |
| vect_analyze_group_access (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| HOST_WIDE_INT groupsize, last_accessed_element = 1; |
| bool slp_impossible = false; |
| struct loop *loop = NULL; |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the |
| size of the interleaving group (including gaps). */ |
| groupsize = dr_step / type_size; |
| |
| /* Not consecutive access is possible only if it is a part of interleaving. */ |
| if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) |
| { |
| /* Check if it this DR is a part of interleaving, and is a single |
| element of the group that is accessed in the loop. */ |
| |
| /* Gaps are supported only for loads. STEP must be a multiple of the type |
| size. The size of the group must be a power of 2. */ |
| if (DR_IS_READ (dr) |
| && (dr_step % type_size) == 0 |
| && groupsize > 0 |
| && exact_log2 (groupsize) != -1) |
| { |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; |
| GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected single element interleaving "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); |
| dump_printf (MSG_NOTE, " step "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, step); |
| } |
| |
| if (loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Data access with gaps requires scalar " |
| "epilogue loop"); |
| if (loop->inner) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Peeling for outer loop is not" |
| " supported"); |
| return false; |
| } |
| |
| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; |
| } |
| |
| return true; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not consecutive access "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| /* Mark the statement as unvectorizable. */ |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt) |
| { |
| /* First stmt in the interleaving chain. Check the chain. */ |
| gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); |
| struct data_reference *data_ref = dr; |
| unsigned int count = 1; |
| tree next_step; |
| tree prev_init = DR_INIT (data_ref); |
| gimple prev = stmt; |
| HOST_WIDE_INT diff, count_in_bytes, gaps = 0; |
| |
| while (next) |
| { |
| /* Skip same data-refs. In case that two or more stmts share |
| data-ref (supported only for loads), we vectorize only the first |
| stmt, and the rest get their vectorized loads from the first |
| one. */ |
| if (!tree_int_cst_compare (DR_INIT (data_ref), |
| DR_INIT (STMT_VINFO_DATA_REF ( |
| vinfo_for_stmt (next))))) |
| { |
| if (DR_IS_WRITE (data_ref)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Two store stmts share the same dr."); |
| return false; |
| } |
| |
| /* Check that there is no load-store dependencies for this loads |
| to prevent a case of load-store-load to the same location. */ |
| if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next)) |
| || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev))) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "READ_WRITE dependence in interleaving."); |
| return false; |
| } |
| |
| /* For load use the same data-ref load. */ |
| GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; |
| |
| prev = next; |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
| continue; |
| } |
| |
| prev = next; |
| |
| /* Check that all the accesses have the same STEP. */ |
| next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next))); |
| if (tree_int_cst_compare (step, next_step)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not consecutive access in interleaving"); |
| return false; |
| } |
| |
| data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next)); |
| /* Check that the distance between two accesses is equal to the type |
| size. Otherwise, we have gaps. */ |
| diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) |
| - TREE_INT_CST_LOW (prev_init)) / type_size; |
| if (diff != 1) |
| { |
| /* FORNOW: SLP of accesses with gaps is not supported. */ |
| slp_impossible = true; |
| if (DR_IS_WRITE (data_ref)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "interleaved store with gaps"); |
| return false; |
| } |
| |
| gaps += diff - 1; |
| } |
| |
| last_accessed_element += diff; |
| |
| /* Store the gap from the previous member of the group. If there is no |
| gap in the access, GROUP_GAP is always 1. */ |
| GROUP_GAP (vinfo_for_stmt (next)) = diff; |
| |
| prev_init = DR_INIT (data_ref); |
| next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
| /* Count the number of data-refs in the chain. */ |
| count++; |
| } |
| |
| /* COUNT is the number of accesses found, we multiply it by the size of |
| the type to get COUNT_IN_BYTES. */ |
| count_in_bytes = type_size * count; |
| |
| /* Check that the size of the interleaving (including gaps) is not |
| greater than STEP. */ |
| if (dr_step && dr_step < count_in_bytes + gaps * type_size) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "interleaving size is greater than step for "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dr)); |
| } |
| return false; |
| } |
| |
| /* Check that the size of the interleaving is equal to STEP for stores, |
| i.e., that there are no gaps. */ |
| if (dr_step && dr_step != count_in_bytes) |
| { |
| if (DR_IS_READ (dr)) |
| { |
| slp_impossible = true; |
| /* There is a gap after the last load in the group. This gap is a |
| difference between the groupsize and the number of elements. |
| When there is no gap, this difference should be 0. */ |
| GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count; |
| } |
| else |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "interleaved store with gaps"); |
| return false; |
| } |
| } |
| |
| /* Check that STEP is a multiple of type size. */ |
| if (dr_step && (dr_step % type_size) != 0) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "step is not a multiple of type size: step "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step); |
| dump_printf (MSG_MISSED_OPTIMIZATION, " size "); |
| dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
| TYPE_SIZE_UNIT (scalar_type)); |
| } |
| return false; |
| } |
| |
| if (groupsize == 0) |
| groupsize = count; |
| |
| GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "Detected interleaving of size %d", (int)groupsize); |
| |
| /* SLP: create an SLP data structure for every interleaving group of |
| stores for further analysis in vect_analyse_slp. */ |
| if (DR_IS_WRITE (dr) && !slp_impossible) |
| { |
| if (loop_vinfo) |
| LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt); |
| if (bb_vinfo) |
| BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt); |
| } |
| |
| /* There is a gap in the end of the group. */ |
| if (groupsize - last_accessed_element > 0 && loop_vinfo) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Data access with gaps requires scalar " |
| "epilogue loop"); |
| if (loop->inner) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "Peeling for outer loop is not supported"); |
| return false; |
| } |
| |
| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; |
| } |
| } |
| |
| return true; |
| } |
| |
| |
| /* Analyze the access pattern of the data-reference DR. |
| In case of non-consecutive accesses call vect_analyze_group_access() to |
| analyze groups of accesses. */ |
| |
| static bool |
| vect_analyze_data_ref_access (struct data_reference *dr) |
| { |
| tree step = DR_STEP (dr); |
| tree scalar_type = TREE_TYPE (DR_REF (dr)); |
| gimple stmt = DR_STMT (dr); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
| struct loop *loop = NULL; |
| |
| if (loop_vinfo) |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| |
| if (loop_vinfo && !step) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "bad data-ref access in loop"); |
| return false; |
| } |
| |
| /* Allow invariant loads in not nested loops. */ |
| if (loop_vinfo && integer_zerop (step)) |
| { |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| if (nested_in_vect_loop_p (loop, stmt)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "zero step in inner loop of nest"); |
| return false; |
| } |
| return DR_IS_READ (dr); |
| } |
| |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| /* Interleaved accesses are not yet supported within outer-loop |
| vectorization for references in the inner-loop. */ |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| |
| /* For the rest of the analysis we use the outer-loop step. */ |
| step = STMT_VINFO_DR_STEP (stmt_info); |
| if (integer_zerop (step)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "zero step in outer loop."); |
| if (DR_IS_READ (dr)) |
| return true; |
| else |
| return false; |
| } |
| } |
| |
| /* Consecutive? */ |
| if (TREE_CODE (step) == INTEGER_CST) |
| { |
| HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
| if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) |
| || (dr_step < 0 |
| && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) |
| { |
| /* Mark that it is not interleaving. */ |
| GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
| return true; |
| } |
| } |
| |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "grouped access in outer loop."); |
| return false; |
| } |
| |
| /* Assume this is a DR handled by non-constant strided load case. */ |
| if (TREE_CODE (step) != INTEGER_CST) |
| return STMT_VINFO_STRIDE_LOAD_P (stmt_info); |
| |
| /* Not consecutive access - check if it's a part of interleaving group. */ |
| return vect_analyze_group_access (dr); |
| } |
| |
| |
| /* Function vect_analyze_data_ref_accesses. |
| |
| Analyze the access pattern of all the data references in the loop. |
| |
| FORNOW: the only access pattern that is considered vectorizable is a |
| simple step 1 (consecutive) access. |
| |
| FORNOW: handle only arrays and pointer accesses. */ |
| |
| bool |
| vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) |
| { |
| unsigned int i; |
| vec<data_reference_p> datarefs; |
| struct data_reference *dr; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_ref_accesses ==="); |
| |
| if (loop_vinfo) |
| datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| else |
| datarefs = BB_VINFO_DATAREFS (bb_vinfo); |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) |
| && !vect_analyze_data_ref_access (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: complicated access pattern."); |
| |
| if (bb_vinfo) |
| { |
| /* Mark the statement as not vectorizable. */ |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; |
| continue; |
| } |
| else |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Function vect_prune_runtime_alias_test_list. |
| |
| Prune a list of ddrs to be tested at run-time by versioning for alias. |
| Return FALSE if resulting list of ddrs is longer then allowed by |
| PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */ |
| |
| bool |
| vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) |
| { |
| vec<ddr_p> ddrs = |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); |
| unsigned i, j; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_prune_runtime_alias_test_list ==="); |
| |
| for (i = 0; i < ddrs.length (); ) |
| { |
| bool found; |
| ddr_p ddr_i; |
| |
| ddr_i = ddrs[i]; |
| found = false; |
| |
| for (j = 0; j < i; j++) |
| { |
| ddr_p ddr_j = ddrs[j]; |
| |
| if (vect_vfa_range_equal (ddr_i, ddr_j)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "found equal ranges "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr_i))); |
| dump_printf (MSG_NOTE, ", "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr_i))); |
| dump_printf (MSG_NOTE, " and "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr_j))); |
| dump_printf (MSG_NOTE, ", "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr_j))); |
| } |
| found = true; |
| break; |
| } |
| } |
| |
| if (found) |
| { |
| ddrs.ordered_remove (i); |
| continue; |
| } |
| i++; |
| } |
| |
| if (ddrs.length () > |
| (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "disable versioning for alias - max number of " |
| "generated checks exceeded."); |
| } |
| |
| LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).truncate (0); |
| |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /* Check whether a non-affine read in stmt is suitable for gather load |
| and if so, return a builtin decl for that operation. */ |
| |
| tree |
| vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, |
| tree *offp, int *scalep) |
| { |
| HOST_WIDE_INT scale = 1, pbitpos, pbitsize; |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
| tree offtype = NULL_TREE; |
| tree decl, base, off; |
| enum machine_mode pmode; |
| int punsignedp, pvolatilep; |
| |
| /* The gather builtins need address of the form |
| loop_invariant + vector * {1, 2, 4, 8} |
| or |
| loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }. |
| Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture |
| of loop invariants/SSA_NAMEs defined in the loop, with casts, |
| multiplications and additions in it. To get a vector, we need |
| a single SSA_NAME that will be defined in the loop and will |
| contain everything that is not loop invariant and that can be |
| vectorized. The following code attempts to find such a preexistng |
| SSA_NAME OFF and put the loop invariants into a tree BASE |
| that can be gimplified before the loop. */ |
| base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off, |
| &pmode, &punsignedp, &pvolatilep, false); |
| gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0); |
| |
| if (TREE_CODE (base) == MEM_REF) |
| { |
| if (!integer_zerop (TREE_OPERAND (base, 1))) |
| { |
| if (off == NULL_TREE) |
| { |
| double_int moff = mem_ref_offset (base); |
| off = double_int_to_tree (sizetype, moff); |
| } |
| else |
| off = size_binop (PLUS_EXPR, off, |
| fold_convert (sizetype, TREE_OPERAND (base, 1))); |
| } |
| base = TREE_OPERAND (base, 0); |
| } |
| else |
| base = build_fold_addr_expr (base); |
| |
| if (off == NULL_TREE) |
| off = size_zero_node; |
| |
| /* If base is not loop invariant, either off is 0, then we start with just |
| the constant offset in the loop invariant BASE and continue with base |
| as OFF, otherwise give up. |
| We could handle that case by gimplifying the addition of base + off |
| into some SSA_NAME and use that as off, but for now punt. */ |
| if (!expr_invariant_in_loop_p (loop, base)) |
| { |
| if (!integer_zerop (off)) |
| return NULL_TREE; |
| off = base; |
| base = size_int (pbitpos / BITS_PER_UNIT); |
| } |
| /* Otherwise put base + constant offset into the loop invariant BASE |
| and continue with OFF. */ |
| else |
| { |
| base = fold_convert (sizetype, base); |
| base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT)); |
| } |
| |
| /* OFF at this point may be either a SSA_NAME or some tree expression |
| from get_inner_reference. Try to peel off loop invariants from it |
| into BASE as long as possible. */ |
| STRIP_NOPS (off); |
| while (offtype == NULL_TREE) |
| { |
| enum tree_code code; |
| tree op0, op1, add = NULL_TREE; |
| |
| if (TREE_CODE (off) == SSA_NAME) |
| { |
| gimple def_stmt = SSA_NAME_DEF_STMT (off); |
| |
| if (expr_invariant_in_loop_p (loop, off)) |
| return NULL_TREE; |
| |
| if (gimple_code (def_stmt) != GIMPLE_ASSIGN) |
| break; |
| |
| op0 = gimple_assign_rhs1 (def_stmt); |
| code = gimple_assign_rhs_code (def_stmt); |
| op1 = gimple_assign_rhs2 (def_stmt); |
| } |
| else |
| { |
| if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS) |
| return NULL_TREE; |
| code = TREE_CODE (off); |
| extract_ops_from_tree (off, &code, &op0, &op1); |
| } |
| switch (code) |
| { |
| case POINTER_PLUS_EXPR: |
| case PLUS_EXPR: |
| if (expr_invariant_in_loop_p (loop, op0)) |
| { |
| add = op0; |
| off = op1; |
| do_add: |
| add = fold_convert (sizetype, add); |
| if (scale != 1) |
| add = size_binop (MULT_EXPR, add, size_int (scale)); |
| base = size_binop (PLUS_EXPR, base, add); |
| continue; |
| } |
| if (expr_invariant_in_loop_p (loop, op1)) |
| { |
| add = op1; |
| off = op0; |
| goto do_add; |
| } |
| break; |
| case MINUS_EXPR: |
| if (expr_invariant_in_loop_p (loop, op1)) |
| { |
| add = fold_convert (sizetype, op1); |
| add = size_binop (MINUS_EXPR, size_zero_node, add); |
| off = op0; |
| goto do_add; |
| } |
| break; |
| case MULT_EXPR: |
| if (scale == 1 && host_integerp (op1, 0)) |
| { |
| scale = tree_low_cst (op1, 0); |
| off = op0; |
| continue; |
| } |
| break; |
| case SSA_NAME: |
| off = op0; |
| continue; |
| CASE_CONVERT: |
| if (!POINTER_TYPE_P (TREE_TYPE (op0)) |
| && !INTEGRAL_TYPE_P (TREE_TYPE (op0))) |
| break; |
| if (TYPE_PRECISION (TREE_TYPE (op0)) |
| == TYPE_PRECISION (TREE_TYPE (off))) |
| { |
| off = op0; |
| continue; |
| } |
| if (TYPE_PRECISION (TREE_TYPE (op0)) |
| < TYPE_PRECISION (TREE_TYPE (off))) |
| { |
| off = op0; |
| offtype = TREE_TYPE (off); |
| STRIP_NOPS (off); |
| continue; |
| } |
| break; |
| default: |
| break; |
| } |
| break; |
| } |
| |
| /* If at the end OFF still isn't a SSA_NAME or isn't |
| defined in the loop, punt. */ |
| if (TREE_CODE (off) != SSA_NAME |
| || expr_invariant_in_loop_p (loop, off)) |
| return NULL_TREE; |
| |
| if (offtype == NULL_TREE) |
| offtype = TREE_TYPE (off); |
| |
| decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info), |
| offtype, scale); |
| if (decl == NULL_TREE) |
| return NULL_TREE; |
| |
| if (basep) |
| *basep = base; |
| if (offp) |
| *offp = off; |
| if (scalep) |
| *scalep = scale; |
| return decl; |
| } |
| |
| /* Check wether a non-affine load in STMT (being in the loop referred to |
| in LOOP_VINFO) is suitable for handling as strided load. That is the case |
| if its address is a simple induction variable. If so return the base |
| of that induction variable in *BASEP and the (loop-invariant) step |
| in *STEPP, both only when that pointer is non-zero. |
| |
| This handles ARRAY_REFs (with variant index) and MEM_REFs (with variant |
| base pointer) only. */ |
| |
| static bool |
| vect_check_strided_load (gimple stmt, loop_vec_info loop_vinfo) |
| { |
| struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
| stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
| struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
| tree base, off; |
| affine_iv iv; |
| |
| if (!DR_IS_READ (dr)) |
| return false; |
| |
| base = DR_REF (dr); |
| |
| if (TREE_CODE (base) == ARRAY_REF) |
| { |
| off = TREE_OPERAND (base, 1); |
| base = TREE_OPERAND (base, 0); |
| } |
| else if (TREE_CODE (base) == MEM_REF) |
| { |
| off = TREE_OPERAND (base, 0); |
| base = TREE_OPERAND (base, 1); |
| } |
| else |
| return false; |
| |
| if (TREE_CODE (off) != SSA_NAME) |
| return false; |
| |
| if (!expr_invariant_in_loop_p (loop, base) |
| || !simple_iv (loop, loop_containing_stmt (stmt), off, &iv, true)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Function vect_analyze_data_refs. |
| |
| Find all the data references in the loop or basic block. |
| |
| The general structure of the analysis of data refs in the vectorizer is as |
| follows: |
| 1- vect_analyze_data_refs(loop/bb): call |
| compute_data_dependences_for_loop/bb to find and analyze all data-refs |
| in the loop/bb and their dependences. |
| 2- vect_analyze_dependences(): apply dependence testing using ddrs. |
| 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. |
| 4- vect_analyze_drs_access(): check that ref_stmt.step is ok. |
| |
| */ |
| |
| bool |
| vect_analyze_data_refs (loop_vec_info loop_vinfo, |
| bb_vec_info bb_vinfo, |
| int *min_vf) |
| { |
| struct loop *loop = NULL; |
| basic_block bb = NULL; |
| unsigned int i; |
| vec<data_reference_p> datarefs; |
| struct data_reference *dr; |
| tree scalar_type; |
| bool res, stop_bb_analysis = false; |
| |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "=== vect_analyze_data_refs ===\n"); |
| |
| if (loop_vinfo) |
| { |
| loop = LOOP_VINFO_LOOP (loop_vinfo); |
| res = compute_data_dependences_for_loop |
| (loop, true, |
| &LOOP_VINFO_LOOP_NEST (loop_vinfo), |
| &LOOP_VINFO_DATAREFS (loop_vinfo), |
| &LOOP_VINFO_DDRS (loop_vinfo)); |
| |
| if (!res) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: loop contains function calls" |
| " or data references that cannot be analyzed"); |
| return false; |
| } |
| |
| datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
| } |
| else |
| { |
| gimple_stmt_iterator gsi; |
| |
| bb = BB_VINFO_BB (bb_vinfo); |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple stmt = gsi_stmt (gsi); |
| if (!find_data_references_in_stmt (NULL, stmt, |
| &BB_VINFO_DATAREFS (bb_vinfo))) |
| { |
| /* Mark the rest of the basic-block as unvectorizable. */ |
| for (; !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| stmt = gsi_stmt (gsi); |
| STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false; |
| } |
| break; |
| } |
| } |
| if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo), |
| &BB_VINFO_DDRS (bb_vinfo), |
| vNULL, true)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: basic block contains function" |
| " calls or data references that cannot be" |
| " analyzed"); |
| return false; |
| } |
| |
| datarefs = BB_VINFO_DATAREFS (bb_vinfo); |
| } |
| |
| /* Go through the data-refs, check that the analysis succeeded. Update |
| pointer from stmt_vec_info struct to DR and vectype. */ |
| |
| FOR_EACH_VEC_ELT (datarefs, i, dr) |
| { |
| gimple stmt; |
| stmt_vec_info stmt_info; |
| tree base, offset, init; |
| bool gather = false; |
| int vf; |
| |
| if (!dr || !DR_REF (dr)) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: unhandled data-ref "); |
| return false; |
| } |
| |
| stmt = DR_STMT (dr); |
| stmt_info = vinfo_for_stmt (stmt); |
| |
| if (stop_bb_analysis) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| continue; |
| } |
| |
| /* Check that analysis of the data-ref succeeded. */ |
| if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr) |
| || !DR_STEP (dr)) |
| { |
| /* If target supports vector gather loads, see if they can't |
| be used. */ |
| if (loop_vinfo |
| && DR_IS_READ (dr) |
| && !TREE_THIS_VOLATILE (DR_REF (dr)) |
| && targetm.vectorize.builtin_gather != NULL |
| && !nested_in_vect_loop_p (loop, stmt)) |
| { |
| struct data_reference *newdr |
| = create_data_ref (NULL, loop_containing_stmt (stmt), |
| DR_REF (dr), stmt, true); |
| gcc_assert (newdr != NULL && DR_REF (newdr)); |
| if (DR_BASE_ADDRESS (newdr) |
| && DR_OFFSET (newdr) |
| && DR_INIT (newdr) |
| && DR_STEP (newdr) |
| && integer_zerop (DR_STEP (newdr))) |
| { |
| dr = newdr; |
| gather = true; |
| } |
| else |
| free_data_ref (newdr); |
| } |
| |
| if (!gather) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: data ref analysis " |
| "failed "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| return false; |
| } |
| } |
| |
| if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST) |
| { |
| if (dump_enabled_p ()) |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: base addr of dr is a " |
| "constant"); |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| if (gather) |
| free_data_ref (dr); |
| return false; |
| } |
| |
| if (TREE_THIS_VOLATILE (DR_REF (dr))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: volatile type "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| return false; |
| } |
| |
| if (stmt_can_throw_internal (stmt)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: statement can throw an " |
| "exception "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| if (gather) |
| free_data_ref (dr); |
| return false; |
| } |
| |
| if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF |
| && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1))) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: statement is bitfield " |
| "access "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| if (gather) |
| free_data_ref (dr); |
| return false; |
| } |
| |
| base = unshare_expr (DR_BASE_ADDRESS (dr)); |
| offset = unshare_expr (DR_OFFSET (dr)); |
| init = unshare_expr (DR_INIT (dr)); |
| |
| if (is_gimple_call (stmt)) |
| { |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
| "not vectorized: dr in a call "); |
| dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
| } |
| |
| if (bb_vinfo) |
| { |
| STMT_VINFO_VECTORIZABLE (stmt_info) = false; |
| stop_bb_analysis = true; |
| continue; |
| } |
| |
| if (gather) |
| free_data_ref (dr); |
| return false; |
| } |
| |
| /* Update DR field in stmt_vec_info struct. */ |
| |
| /* If the dataref is in an inner-loop of the loop that is considered for |
| for vectorization, we also want to analyze the access relative to |
| the outer-loop (DR contains information only relative to the |
| inner-most enclosing loop). We do that by building a reference to the |
| first location accessed by the inner-loop, and analyze it relative to |
| the outer-loop. */ |
| if (loop && nested_in_vect_loop_p (loop, stmt)) |
| { |
| tree outer_step, outer_base, outer_init; |
| HOST_WIDE_INT pbitsize, pbitpos; |
| tree poffset; |
| enum machine_mode pmode; |
| int punsignedp, pvolatilep; |
| affine_iv base_iv, offset_iv; |
| tree dinit; |
| |
| /* Build a reference to the first location accessed by the |
| inner-loop: *(BASE+INIT). (The first location is actually |
| BASE+INIT+OFFSET, but we add OFFSET separately later). */ |
| tree inner_base = build_fold_indirect_ref |
| (fold_build_pointer_plus (base, init)); |
| |
| if (dump_enabled_p ()) |
| { |
| dump_printf_loc (MSG_NOTE, vect_location, |
| "analyze in outer-loop: "); |
| dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base); |
| } |
| |
| outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos, |
| &poffset, &pmode, &punsignedp, &pvolatilep, false); |
| gcc_assert (outer_base != NULL_TREE); |
| |
| if (pbitpos % BITS_PER_UNIT != 0) |
|