| /* Expansion pass for OMP directives. Outlines regions of certain OMP |
| directives to separate functions, converts others into explicit calls to the |
| runtime library (libgomp) and so forth |
| |
| Copyright (C) 2005-2017 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "memmodel.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "optabs.h" |
| #include "cgraph.h" |
| #include "pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "cfganal.h" |
| #include "internal-fn.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-into-ssa.h" |
| #include "tree-ssa.h" |
| #include "splay-tree.h" |
| #include "cfgloop.h" |
| #include "omp-general.h" |
| #include "omp-offload.h" |
| #include "tree-cfgcleanup.h" |
| #include "symbol-summary.h" |
| #include "cilk.h" |
| #include "gomp-constants.h" |
| #include "gimple-pretty-print.h" |
| #include "hsa-common.h" |
| #include "debug.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| |
| /* OMP region information. Every parallel and workshare |
| directive is enclosed between two markers, the OMP_* directive |
| and a corresponding GIMPLE_OMP_RETURN statement. */ |
| |
| struct omp_region |
| { |
| /* The enclosing region. */ |
| struct omp_region *outer; |
| |
| /* First child region. */ |
| struct omp_region *inner; |
| |
| /* Next peer region. */ |
| struct omp_region *next; |
| |
| /* Block containing the omp directive as its last stmt. */ |
| basic_block entry; |
| |
| /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */ |
| basic_block exit; |
| |
| /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */ |
| basic_block cont; |
| |
| /* If this is a combined parallel+workshare region, this is a list |
| of additional arguments needed by the combined parallel+workshare |
| library call. */ |
| vec<tree, va_gc> *ws_args; |
| |
| /* The code for the omp directive of this region. */ |
| enum gimple_code type; |
| |
| /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */ |
| enum omp_clause_schedule_kind sched_kind; |
| |
| /* Schedule modifiers. */ |
| unsigned char sched_modifiers; |
| |
| /* True if this is a combined parallel+workshare region. */ |
| bool is_combined_parallel; |
| |
| /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has |
| a depend clause. */ |
| gomp_ordered *ord_stmt; |
| }; |
| |
| static struct omp_region *root_omp_region; |
| static bool omp_any_child_fn_dumped; |
| |
| static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree, |
| bool = false); |
| static gphi *find_phi_with_arg_on_edge (tree, edge); |
| static void expand_omp (struct omp_region *region); |
| |
| /* Return true if REGION is a combined parallel+workshare region. */ |
| |
| static inline bool |
| is_combined_parallel (struct omp_region *region) |
| { |
| return region->is_combined_parallel; |
| } |
| |
| /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB |
| is the immediate dominator of PAR_ENTRY_BB, return true if there |
| are no data dependencies that would prevent expanding the parallel |
| directive at PAR_ENTRY_BB as a combined parallel+workshare region. |
| |
| When expanding a combined parallel+workshare region, the call to |
| the child function may need additional arguments in the case of |
| GIMPLE_OMP_FOR regions. In some cases, these arguments are |
| computed out of variables passed in from the parent to the child |
| via 'struct .omp_data_s'. For instance: |
| |
| #pragma omp parallel for schedule (guided, i * 4) |
| for (j ...) |
| |
| Is lowered into: |
| |
| # BLOCK 2 (PAR_ENTRY_BB) |
| .omp_data_o.i = i; |
| #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) |
| |
| # BLOCK 3 (WS_ENTRY_BB) |
| .omp_data_i = &.omp_data_o; |
| D.1667 = .omp_data_i->i; |
| D.1598 = D.1667 * 4; |
| #pragma omp for schedule (guided, D.1598) |
| |
| When we outline the parallel region, the call to the child function |
| 'bar.omp_fn.0' will need the value D.1598 in its argument list, but |
| that value is computed *after* the call site. So, in principle we |
| cannot do the transformation. |
| |
| To see whether the code in WS_ENTRY_BB blocks the combined |
| parallel+workshare call, we collect all the variables used in the |
| GIMPLE_OMP_FOR header check whether they appear on the LHS of any |
| statement in WS_ENTRY_BB. If so, then we cannot emit the combined |
| call. |
| |
| FIXME. If we had the SSA form built at this point, we could merely |
| hoist the code in block 3 into block 2 and be done with it. But at |
| this point we don't have dataflow information and though we could |
| hack something up here, it is really not worth the aggravation. */ |
| |
| static bool |
| workshare_safe_to_combine_p (basic_block ws_entry_bb) |
| { |
| struct omp_for_data fd; |
| gimple *ws_stmt = last_stmt (ws_entry_bb); |
| |
| if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) |
| return true; |
| |
| gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); |
| |
| omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL); |
| |
| if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) |
| return false; |
| if (fd.iter_type != long_integer_type_node) |
| return false; |
| |
| /* FIXME. We give up too easily here. If any of these arguments |
| are not constants, they will likely involve variables that have |
| been mapped into fields of .omp_data_s for sharing with the child |
| function. With appropriate data flow, it would be possible to |
| see through this. */ |
| if (!is_gimple_min_invariant (fd.loop.n1) |
| || !is_gimple_min_invariant (fd.loop.n2) |
| || !is_gimple_min_invariant (fd.loop.step) |
| || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) |
| return false; |
| |
| return true; |
| } |
| |
| /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier |
| presence (SIMD_SCHEDULE). */ |
| |
| static tree |
| omp_adjust_chunk_size (tree chunk_size, bool simd_schedule) |
| { |
| if (!simd_schedule) |
| return chunk_size; |
| |
| int vf = omp_max_vf (); |
| if (vf == 1) |
| return chunk_size; |
| |
| tree type = TREE_TYPE (chunk_size); |
| chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size, |
| build_int_cst (type, vf - 1)); |
| return fold_build2 (BIT_AND_EXPR, type, chunk_size, |
| build_int_cst (type, -vf)); |
| } |
| |
| /* Collect additional arguments needed to emit a combined |
| parallel+workshare call. WS_STMT is the workshare directive being |
| expanded. */ |
| |
| static vec<tree, va_gc> * |
| get_ws_args_for (gimple *par_stmt, gimple *ws_stmt) |
| { |
| tree t; |
| location_t loc = gimple_location (ws_stmt); |
| vec<tree, va_gc> *ws_args; |
| |
| if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt)) |
| { |
| struct omp_for_data fd; |
| tree n1, n2; |
| |
| omp_extract_for_data (for_stmt, &fd, NULL); |
| n1 = fd.loop.n1; |
| n2 = fd.loop.n2; |
| |
| if (gimple_omp_for_combined_into_p (for_stmt)) |
| { |
| tree innerc |
| = omp_find_clause (gimple_omp_parallel_clauses (par_stmt), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| n1 = OMP_CLAUSE_DECL (innerc); |
| innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| n2 = OMP_CLAUSE_DECL (innerc); |
| } |
| |
| vec_alloc (ws_args, 3 + (fd.chunk_size != 0)); |
| |
| t = fold_convert_loc (loc, long_integer_type_node, n1); |
| ws_args->quick_push (t); |
| |
| t = fold_convert_loc (loc, long_integer_type_node, n2); |
| ws_args->quick_push (t); |
| |
| t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); |
| ws_args->quick_push (t); |
| |
| if (fd.chunk_size) |
| { |
| t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); |
| t = omp_adjust_chunk_size (t, fd.simd_schedule); |
| ws_args->quick_push (t); |
| } |
| |
| return ws_args; |
| } |
| else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) |
| { |
| /* Number of sections is equal to the number of edges from the |
| GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to |
| the exit of the sections region. */ |
| basic_block bb = single_succ (gimple_bb (ws_stmt)); |
| t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); |
| vec_alloc (ws_args, 1); |
| ws_args->quick_push (t); |
| return ws_args; |
| } |
| |
| gcc_unreachable (); |
| } |
| |
| /* Discover whether REGION is a combined parallel+workshare region. */ |
| |
| static void |
| determine_parallel_type (struct omp_region *region) |
| { |
| basic_block par_entry_bb, par_exit_bb; |
| basic_block ws_entry_bb, ws_exit_bb; |
| |
| if (region == NULL || region->inner == NULL |
| || region->exit == NULL || region->inner->exit == NULL |
| || region->inner->cont == NULL) |
| return; |
| |
| /* We only support parallel+for and parallel+sections. */ |
| if (region->type != GIMPLE_OMP_PARALLEL |
| || (region->inner->type != GIMPLE_OMP_FOR |
| && region->inner->type != GIMPLE_OMP_SECTIONS)) |
| return; |
| |
| /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and |
| WS_EXIT_BB -> PAR_EXIT_BB. */ |
| par_entry_bb = region->entry; |
| par_exit_bb = region->exit; |
| ws_entry_bb = region->inner->entry; |
| ws_exit_bb = region->inner->exit; |
| |
| if (single_succ (par_entry_bb) == ws_entry_bb |
| && single_succ (ws_exit_bb) == par_exit_bb |
| && workshare_safe_to_combine_p (ws_entry_bb) |
| && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) |
| || (last_and_only_stmt (ws_entry_bb) |
| && last_and_only_stmt (par_exit_bb)))) |
| { |
| gimple *par_stmt = last_stmt (par_entry_bb); |
| gimple *ws_stmt = last_stmt (ws_entry_bb); |
| |
| if (region->inner->type == GIMPLE_OMP_FOR) |
| { |
| /* If this is a combined parallel loop, we need to determine |
| whether or not to use the combined library calls. There |
| are two cases where we do not apply the transformation: |
| static loops and any kind of ordered loop. In the first |
| case, we already open code the loop so there is no need |
| to do anything else. In the latter case, the combined |
| parallel loop call would still need extra synchronization |
| to implement ordered semantics, so there would not be any |
| gain in using the combined call. */ |
| tree clauses = gimple_omp_for_clauses (ws_stmt); |
| tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE); |
| if (c == NULL |
| || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) |
| == OMP_CLAUSE_SCHEDULE_STATIC) |
| || omp_find_clause (clauses, OMP_CLAUSE_ORDERED)) |
| { |
| region->is_combined_parallel = false; |
| region->inner->is_combined_parallel = false; |
| return; |
| } |
| } |
| |
| region->is_combined_parallel = true; |
| region->inner->is_combined_parallel = true; |
| region->ws_args = get_ws_args_for (par_stmt, ws_stmt); |
| } |
| } |
| |
| /* Debugging dumps for parallel regions. */ |
| void dump_omp_region (FILE *, struct omp_region *, int); |
| void debug_omp_region (struct omp_region *); |
| void debug_all_omp_regions (void); |
| |
| /* Dump the parallel region tree rooted at REGION. */ |
| |
| void |
| dump_omp_region (FILE *file, struct omp_region *region, int indent) |
| { |
| fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, |
| gimple_code_name[region->type]); |
| |
| if (region->inner) |
| dump_omp_region (file, region->inner, indent + 4); |
| |
| if (region->cont) |
| { |
| fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", |
| region->cont->index); |
| } |
| |
| if (region->exit) |
| fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", |
| region->exit->index); |
| else |
| fprintf (file, "%*s[no exit marker]\n", indent, ""); |
| |
| if (region->next) |
| dump_omp_region (file, region->next, indent); |
| } |
| |
| DEBUG_FUNCTION void |
| debug_omp_region (struct omp_region *region) |
| { |
| dump_omp_region (stderr, region, 0); |
| } |
| |
| DEBUG_FUNCTION void |
| debug_all_omp_regions (void) |
| { |
| dump_omp_region (stderr, root_omp_region, 0); |
| } |
| |
| /* Create a new parallel region starting at STMT inside region PARENT. */ |
| |
| static struct omp_region * |
| new_omp_region (basic_block bb, enum gimple_code type, |
| struct omp_region *parent) |
| { |
| struct omp_region *region = XCNEW (struct omp_region); |
| |
| region->outer = parent; |
| region->entry = bb; |
| region->type = type; |
| |
| if (parent) |
| { |
| /* This is a nested region. Add it to the list of inner |
| regions in PARENT. */ |
| region->next = parent->inner; |
| parent->inner = region; |
| } |
| else |
| { |
| /* This is a toplevel region. Add it to the list of toplevel |
| regions in ROOT_OMP_REGION. */ |
| region->next = root_omp_region; |
| root_omp_region = region; |
| } |
| |
| return region; |
| } |
| |
| /* Release the memory associated with the region tree rooted at REGION. */ |
| |
| static void |
| free_omp_region_1 (struct omp_region *region) |
| { |
| struct omp_region *i, *n; |
| |
| for (i = region->inner; i ; i = n) |
| { |
| n = i->next; |
| free_omp_region_1 (i); |
| } |
| |
| free (region); |
| } |
| |
| /* Release the memory for the entire omp region tree. */ |
| |
| void |
| omp_free_regions (void) |
| { |
| struct omp_region *r, *n; |
| for (r = root_omp_region; r ; r = n) |
| { |
| n = r->next; |
| free_omp_region_1 (r); |
| } |
| root_omp_region = NULL; |
| } |
| |
| /* A convenience function to build an empty GIMPLE_COND with just the |
| condition. */ |
| |
| static gcond * |
| gimple_build_cond_empty (tree cond) |
| { |
| enum tree_code pred_code; |
| tree lhs, rhs; |
| |
| gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); |
| return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); |
| } |
| |
| /* Return true if a parallel REGION is within a declare target function or |
| within a target region and is not a part of a gridified target. */ |
| |
| static bool |
| parallel_needs_hsa_kernel_p (struct omp_region *region) |
| { |
| bool indirect = false; |
| for (region = region->outer; region; region = region->outer) |
| { |
| if (region->type == GIMPLE_OMP_PARALLEL) |
| indirect = true; |
| else if (region->type == GIMPLE_OMP_TARGET) |
| { |
| gomp_target *tgt_stmt |
| = as_a <gomp_target *> (last_stmt (region->entry)); |
| |
| if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), |
| OMP_CLAUSE__GRIDDIM_)) |
| return indirect; |
| else |
| return true; |
| } |
| } |
| |
| if (lookup_attribute ("omp declare target", |
| DECL_ATTRIBUTES (current_function_decl))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function. |
| Add CHILD_FNDECL to decl chain of the supercontext of the block |
| ENTRY_BLOCK - this is the block which originally contained the |
| code from which CHILD_FNDECL was created. |
| |
| Together, these actions ensure that the debug info for the outlined |
| function will be emitted with the correct lexical scope. */ |
| |
| static void |
| adjust_context_and_scope (tree entry_block, tree child_fndecl) |
| { |
| if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK) |
| { |
| tree b = BLOCK_SUPERCONTEXT (entry_block); |
| |
| if (TREE_CODE (b) == BLOCK) |
| { |
| tree parent_fndecl; |
| |
| /* Follow supercontext chain until the parent fndecl |
| is found. */ |
| for (parent_fndecl = BLOCK_SUPERCONTEXT (b); |
| TREE_CODE (parent_fndecl) == BLOCK; |
| parent_fndecl = BLOCK_SUPERCONTEXT (parent_fndecl)) |
| ; |
| |
| gcc_assert (TREE_CODE (parent_fndecl) == FUNCTION_DECL); |
| |
| DECL_CONTEXT (child_fndecl) = parent_fndecl; |
| |
| DECL_CHAIN (child_fndecl) = BLOCK_VARS (b); |
| BLOCK_VARS (b) = child_fndecl; |
| } |
| } |
| } |
| |
| /* Build the function calls to GOMP_parallel_start etc to actually |
| generate the parallel operation. REGION is the parallel region |
| being expanded. BB is the block where to insert the code. WS_ARGS |
| will be set if this is a call to a combined parallel+workshare |
| construct, it contains the list of additional arguments needed by |
| the workshare construct. */ |
| |
| static void |
| expand_parallel_call (struct omp_region *region, basic_block bb, |
| gomp_parallel *entry_stmt, |
| vec<tree, va_gc> *ws_args) |
| { |
| tree t, t1, t2, val, cond, c, clauses, flags; |
| gimple_stmt_iterator gsi; |
| gimple *stmt; |
| enum built_in_function start_ix; |
| int start_ix2; |
| location_t clause_loc; |
| vec<tree, va_gc> *args; |
| |
| clauses = gimple_omp_parallel_clauses (entry_stmt); |
| |
| /* Determine what flavor of GOMP_parallel we will be |
| emitting. */ |
| start_ix = BUILT_IN_GOMP_PARALLEL; |
| if (is_combined_parallel (region)) |
| { |
| switch (region->inner->type) |
| { |
| case GIMPLE_OMP_FOR: |
| gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); |
| switch (region->inner->sched_kind) |
| { |
| case OMP_CLAUSE_SCHEDULE_RUNTIME: |
| start_ix2 = 3; |
| break; |
| case OMP_CLAUSE_SCHEDULE_DYNAMIC: |
| case OMP_CLAUSE_SCHEDULE_GUIDED: |
| if (region->inner->sched_modifiers |
| & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) |
| { |
| start_ix2 = 3 + region->inner->sched_kind; |
| break; |
| } |
| /* FALLTHRU */ |
| default: |
| start_ix2 = region->inner->sched_kind; |
| break; |
| } |
| start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC; |
| start_ix = (enum built_in_function) start_ix2; |
| break; |
| case GIMPLE_OMP_SECTIONS: |
| start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* By default, the value of NUM_THREADS is zero (selected at run time) |
| and there is no conditional. */ |
| cond = NULL_TREE; |
| val = build_int_cst (unsigned_type_node, 0); |
| flags = build_int_cst (unsigned_type_node, 0); |
| |
| c = omp_find_clause (clauses, OMP_CLAUSE_IF); |
| if (c) |
| cond = OMP_CLAUSE_IF_EXPR (c); |
| |
| c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS); |
| if (c) |
| { |
| val = OMP_CLAUSE_NUM_THREADS_EXPR (c); |
| clause_loc = OMP_CLAUSE_LOCATION (c); |
| } |
| else |
| clause_loc = gimple_location (entry_stmt); |
| |
| c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND); |
| if (c) |
| flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c)); |
| |
| /* Ensure 'val' is of the correct type. */ |
| val = fold_convert_loc (clause_loc, unsigned_type_node, val); |
| |
| /* If we found the clause 'if (cond)', build either |
| (cond != 0) or (cond ? val : 1u). */ |
| if (cond) |
| { |
| cond = gimple_boolify (cond); |
| |
| if (integer_zerop (val)) |
| val = fold_build2_loc (clause_loc, |
| EQ_EXPR, unsigned_type_node, cond, |
| build_int_cst (TREE_TYPE (cond), 0)); |
| else |
| { |
| basic_block cond_bb, then_bb, else_bb; |
| edge e, e_then, e_else; |
| tree tmp_then, tmp_else, tmp_join, tmp_var; |
| |
| tmp_var = create_tmp_var (TREE_TYPE (val)); |
| if (gimple_in_ssa_p (cfun)) |
| { |
| tmp_then = make_ssa_name (tmp_var); |
| tmp_else = make_ssa_name (tmp_var); |
| tmp_join = make_ssa_name (tmp_var); |
| } |
| else |
| { |
| tmp_then = tmp_var; |
| tmp_else = tmp_var; |
| tmp_join = tmp_var; |
| } |
| |
| e = split_block_after_labels (bb); |
| cond_bb = e->src; |
| bb = e->dest; |
| remove_edge (e); |
| |
| then_bb = create_empty_bb (cond_bb); |
| else_bb = create_empty_bb (then_bb); |
| set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); |
| set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); |
| |
| stmt = gimple_build_cond_empty (cond); |
| gsi = gsi_start_bb (cond_bb); |
| gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); |
| |
| gsi = gsi_start_bb (then_bb); |
| expand_omp_build_assign (&gsi, tmp_then, val, true); |
| |
| gsi = gsi_start_bb (else_bb); |
| expand_omp_build_assign (&gsi, tmp_else, |
| build_int_cst (unsigned_type_node, 1), |
| true); |
| |
| make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); |
| make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); |
| add_bb_to_loop (then_bb, cond_bb->loop_father); |
| add_bb_to_loop (else_bb, cond_bb->loop_father); |
| e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); |
| e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); |
| |
| if (gimple_in_ssa_p (cfun)) |
| { |
| gphi *phi = create_phi_node (tmp_join, bb); |
| add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); |
| add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); |
| } |
| |
| val = tmp_join; |
| } |
| |
| gsi = gsi_start_bb (bb); |
| val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| } |
| |
| gsi = gsi_last_bb (bb); |
| t = gimple_omp_parallel_data_arg (entry_stmt); |
| if (t == NULL) |
| t1 = null_pointer_node; |
| else |
| t1 = build_fold_addr_expr (t); |
| tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt); |
| t2 = build_fold_addr_expr (child_fndecl); |
| |
| adjust_context_and_scope (gimple_block (entry_stmt), child_fndecl); |
| |
| vec_alloc (args, 4 + vec_safe_length (ws_args)); |
| args->quick_push (t2); |
| args->quick_push (t1); |
| args->quick_push (val); |
| if (ws_args) |
| args->splice (*ws_args); |
| args->quick_push (flags); |
| |
| t = build_call_expr_loc_vec (UNKNOWN_LOCATION, |
| builtin_decl_explicit (start_ix), args); |
| |
| force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| |
| if (hsa_gen_requested_p () |
| && parallel_needs_hsa_kernel_p (region)) |
| { |
| cgraph_node *child_cnode = cgraph_node::get (child_fndecl); |
| hsa_register_kernel (child_cnode); |
| } |
| } |
| |
| /* Insert a function call whose name is FUNC_NAME with the information from |
| ENTRY_STMT into the basic_block BB. */ |
| |
| static void |
| expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt, |
| vec <tree, va_gc> *ws_args) |
| { |
| tree t, t1, t2; |
| gimple_stmt_iterator gsi; |
| vec <tree, va_gc> *args; |
| |
| gcc_assert (vec_safe_length (ws_args) == 2); |
| tree func_name = (*ws_args)[0]; |
| tree grain = (*ws_args)[1]; |
| |
| tree clauses = gimple_omp_parallel_clauses (entry_stmt); |
| tree count = omp_find_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_); |
| gcc_assert (count != NULL_TREE); |
| count = OMP_CLAUSE_OPERAND (count, 0); |
| |
| gsi = gsi_last_bb (bb); |
| t = gimple_omp_parallel_data_arg (entry_stmt); |
| if (t == NULL) |
| t1 = null_pointer_node; |
| else |
| t1 = build_fold_addr_expr (t); |
| t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt)); |
| |
| vec_alloc (args, 4); |
| args->quick_push (t2); |
| args->quick_push (t1); |
| args->quick_push (count); |
| args->quick_push (grain); |
| t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args); |
| |
| force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| } |
| |
| /* Build the function call to GOMP_task to actually |
| generate the task operation. BB is the block where to insert the code. */ |
| |
| static void |
| expand_task_call (struct omp_region *region, basic_block bb, |
| gomp_task *entry_stmt) |
| { |
| tree t1, t2, t3; |
| gimple_stmt_iterator gsi; |
| location_t loc = gimple_location (entry_stmt); |
| |
| tree clauses = gimple_omp_task_clauses (entry_stmt); |
| |
| tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); |
| tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED); |
| tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE); |
| tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); |
| tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL); |
| tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY); |
| |
| unsigned int iflags |
| = (untied ? GOMP_TASK_FLAG_UNTIED : 0) |
| | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0) |
| | (depend ? GOMP_TASK_FLAG_DEPEND : 0); |
| |
| bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt); |
| tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE; |
| tree num_tasks = NULL_TREE; |
| bool ull = false; |
| if (taskloop_p) |
| { |
| gimple *g = last_stmt (region->outer->entry); |
| gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR |
| && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP); |
| struct omp_for_data fd; |
| omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL); |
| startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); |
| endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar), |
| OMP_CLAUSE__LOOPTEMP_); |
| startvar = OMP_CLAUSE_DECL (startvar); |
| endvar = OMP_CLAUSE_DECL (endvar); |
| step = fold_convert_loc (loc, fd.iter_type, fd.loop.step); |
| if (fd.loop.cond_code == LT_EXPR) |
| iflags |= GOMP_TASK_FLAG_UP; |
| tree tclauses = gimple_omp_for_clauses (g); |
| num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS); |
| if (num_tasks) |
| num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks); |
| else |
| { |
| num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE); |
| if (num_tasks) |
| { |
| iflags |= GOMP_TASK_FLAG_GRAINSIZE; |
| num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks); |
| } |
| else |
| num_tasks = integer_zero_node; |
| } |
| num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks); |
| if (ifc == NULL_TREE) |
| iflags |= GOMP_TASK_FLAG_IF; |
| if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP)) |
| iflags |= GOMP_TASK_FLAG_NOGROUP; |
| ull = fd.iter_type == long_long_unsigned_type_node; |
| } |
| else if (priority) |
| iflags |= GOMP_TASK_FLAG_PRIORITY; |
| |
| tree flags = build_int_cst (unsigned_type_node, iflags); |
| |
| tree cond = boolean_true_node; |
| if (ifc) |
| { |
| if (taskloop_p) |
| { |
| tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); |
| t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, |
| build_int_cst (unsigned_type_node, |
| GOMP_TASK_FLAG_IF), |
| build_int_cst (unsigned_type_node, 0)); |
| flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, |
| flags, t); |
| } |
| else |
| cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); |
| } |
| |
| if (finalc) |
| { |
| tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc)); |
| t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, |
| build_int_cst (unsigned_type_node, |
| GOMP_TASK_FLAG_FINAL), |
| build_int_cst (unsigned_type_node, 0)); |
| flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); |
| } |
| if (depend) |
| depend = OMP_CLAUSE_DECL (depend); |
| else |
| depend = build_int_cst (ptr_type_node, 0); |
| if (priority) |
| priority = fold_convert (integer_type_node, |
| OMP_CLAUSE_PRIORITY_EXPR (priority)); |
| else |
| priority = integer_zero_node; |
| |
| gsi = gsi_last_bb (bb); |
| tree t = gimple_omp_task_data_arg (entry_stmt); |
| if (t == NULL) |
| t2 = null_pointer_node; |
| else |
| t2 = build_fold_addr_expr_loc (loc, t); |
| t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); |
| t = gimple_omp_task_copy_fn (entry_stmt); |
| if (t == NULL) |
| t3 = null_pointer_node; |
| else |
| t3 = build_fold_addr_expr_loc (loc, t); |
| |
| if (taskloop_p) |
| t = build_call_expr (ull |
| ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL) |
| : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP), |
| 11, t1, t2, t3, |
| gimple_omp_task_arg_size (entry_stmt), |
| gimple_omp_task_arg_align (entry_stmt), flags, |
| num_tasks, priority, startvar, endvar, step); |
| else |
| t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), |
| 9, t1, t2, t3, |
| gimple_omp_task_arg_size (entry_stmt), |
| gimple_omp_task_arg_align (entry_stmt), cond, flags, |
| depend, priority); |
| |
| force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| } |
| |
| /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ |
| |
| static tree |
| vec2chain (vec<tree, va_gc> *v) |
| { |
| tree chain = NULL_TREE, t; |
| unsigned ix; |
| |
| FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t) |
| { |
| DECL_CHAIN (t) = chain; |
| chain = t; |
| } |
| |
| return chain; |
| } |
| |
| /* Remove barriers in REGION->EXIT's block. Note that this is only |
| valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region |
| is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that |
| left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be |
| removed. */ |
| |
| static void |
| remove_exit_barrier (struct omp_region *region) |
| { |
| gimple_stmt_iterator gsi; |
| basic_block exit_bb; |
| edge_iterator ei; |
| edge e; |
| gimple *stmt; |
| int any_addressable_vars = -1; |
| |
| exit_bb = region->exit; |
| |
| /* If the parallel region doesn't return, we don't have REGION->EXIT |
| block at all. */ |
| if (! exit_bb) |
| return; |
| |
| /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The |
| workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of |
| statements that can appear in between are extremely limited -- no |
| memory operations at all. Here, we allow nothing at all, so the |
| only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ |
| gsi = gsi_last_bb (exit_bb); |
| gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); |
| gsi_prev (&gsi); |
| if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) |
| return; |
| |
| FOR_EACH_EDGE (e, ei, exit_bb->preds) |
| { |
| gsi = gsi_last_bb (e->src); |
| if (gsi_end_p (gsi)) |
| continue; |
| stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) == GIMPLE_OMP_RETURN |
| && !gimple_omp_return_nowait_p (stmt)) |
| { |
| /* OpenMP 3.0 tasks unfortunately prevent this optimization |
| in many cases. If there could be tasks queued, the barrier |
| might be needed to let the tasks run before some local |
| variable of the parallel that the task uses as shared |
| runs out of scope. The task can be spawned either |
| from within current function (this would be easy to check) |
| or from some function it calls and gets passed an address |
| of such a variable. */ |
| if (any_addressable_vars < 0) |
| { |
| gomp_parallel *parallel_stmt |
| = as_a <gomp_parallel *> (last_stmt (region->entry)); |
| tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); |
| tree local_decls, block, decl; |
| unsigned ix; |
| |
| any_addressable_vars = 0; |
| FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) |
| if (TREE_ADDRESSABLE (decl)) |
| { |
| any_addressable_vars = 1; |
| break; |
| } |
| for (block = gimple_block (stmt); |
| !any_addressable_vars |
| && block |
| && TREE_CODE (block) == BLOCK; |
| block = BLOCK_SUPERCONTEXT (block)) |
| { |
| for (local_decls = BLOCK_VARS (block); |
| local_decls; |
| local_decls = DECL_CHAIN (local_decls)) |
| if (TREE_ADDRESSABLE (local_decls)) |
| { |
| any_addressable_vars = 1; |
| break; |
| } |
| if (block == gimple_block (parallel_stmt)) |
| break; |
| } |
| } |
| if (!any_addressable_vars) |
| gimple_omp_return_set_nowait (stmt); |
| } |
| } |
| } |
| |
| static void |
| remove_exit_barriers (struct omp_region *region) |
| { |
| if (region->type == GIMPLE_OMP_PARALLEL) |
| remove_exit_barrier (region); |
| |
| if (region->inner) |
| { |
| region = region->inner; |
| remove_exit_barriers (region); |
| while (region->next) |
| { |
| region = region->next; |
| remove_exit_barriers (region); |
| } |
| } |
| } |
| |
| /* Optimize omp_get_thread_num () and omp_get_num_threads () |
| calls. These can't be declared as const functions, but |
| within one parallel body they are constant, so they can be |
| transformed there into __builtin_omp_get_{thread_num,num_threads} () |
| which are declared const. Similarly for task body, except |
| that in untied task omp_get_thread_num () can change at any task |
| scheduling point. */ |
| |
| static void |
| optimize_omp_library_calls (gimple *entry_stmt) |
| { |
| basic_block bb; |
| gimple_stmt_iterator gsi; |
| tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); |
| tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); |
| tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); |
| tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); |
| bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK |
| && omp_find_clause (gimple_omp_task_clauses (entry_stmt), |
| OMP_CLAUSE_UNTIED) != NULL); |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *call = gsi_stmt (gsi); |
| tree decl; |
| |
| if (is_gimple_call (call) |
| && (decl = gimple_call_fndecl (call)) |
| && DECL_EXTERNAL (decl) |
| && TREE_PUBLIC (decl) |
| && DECL_INITIAL (decl) == NULL) |
| { |
| tree built_in; |
| |
| if (DECL_NAME (decl) == thr_num_id) |
| { |
| /* In #pragma omp task untied omp_get_thread_num () can change |
| during the execution of the task region. */ |
| if (untied_task) |
| continue; |
| built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); |
| } |
| else if (DECL_NAME (decl) == num_thr_id) |
| built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); |
| else |
| continue; |
| |
| if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) |
| || gimple_call_num_args (call) != 0) |
| continue; |
| |
| if (flag_exceptions && !TREE_NOTHROW (decl)) |
| continue; |
| |
| if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE |
| || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), |
| TREE_TYPE (TREE_TYPE (built_in)))) |
| continue; |
| |
| gimple_call_set_fndecl (call, built_in); |
| } |
| } |
| } |
| |
| /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be |
| regimplified. */ |
| |
| static tree |
| expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *) |
| { |
| tree t = *tp; |
| |
| /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ |
| if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)) |
| return t; |
| |
| if (TREE_CODE (t) == ADDR_EXPR) |
| recompute_tree_invariant_for_addr_expr (t); |
| |
| *walk_subtrees = !TYPE_P (t) && !DECL_P (t); |
| return NULL_TREE; |
| } |
| |
| /* Prepend or append TO = FROM assignment before or after *GSI_P. */ |
| |
| static void |
| expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from, |
| bool after) |
| { |
| bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); |
| from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, |
| !after, after ? GSI_CONTINUE_LINKING |
| : GSI_SAME_STMT); |
| gimple *stmt = gimple_build_assign (to, from); |
| if (after) |
| gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING); |
| else |
| gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); |
| if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) |
| || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gimple_regimplify_operands (stmt, &gsi); |
| } |
| } |
| |
| /* Expand the OpenMP parallel or task directive starting at REGION. */ |
| |
| static void |
| expand_omp_taskreg (struct omp_region *region) |
| { |
| basic_block entry_bb, exit_bb, new_bb; |
| struct function *child_cfun; |
| tree child_fn, block, t; |
| gimple_stmt_iterator gsi; |
| gimple *entry_stmt, *stmt; |
| edge e; |
| vec<tree, va_gc> *ws_args; |
| |
| entry_stmt = last_stmt (region->entry); |
| child_fn = gimple_omp_taskreg_child_fn (entry_stmt); |
| child_cfun = DECL_STRUCT_FUNCTION (child_fn); |
| |
| entry_bb = region->entry; |
| if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK) |
| exit_bb = region->cont; |
| else |
| exit_bb = region->exit; |
| |
| bool is_cilk_for |
| = (flag_cilkplus |
| && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL |
| && omp_find_clause (gimple_omp_parallel_clauses (entry_stmt), |
| OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE); |
| |
| if (is_cilk_for) |
| /* If it is a _Cilk_for statement, it is modelled *like* a parallel for, |
| and the inner statement contains the name of the built-in function |
| and grain. */ |
| ws_args = region->inner->ws_args; |
| else if (is_combined_parallel (region)) |
| ws_args = region->ws_args; |
| else |
| ws_args = NULL; |
| |
| if (child_cfun->cfg) |
| { |
| /* Due to inlining, it may happen that we have already outlined |
| the region, in which case all we need to do is make the |
| sub-graph unreachable and emit the parallel call. */ |
| edge entry_succ_e, exit_succ_e; |
| |
| entry_succ_e = single_succ_edge (entry_bb); |
| |
| gsi = gsi_last_bb (entry_bb); |
| gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL |
| || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); |
| gsi_remove (&gsi, true); |
| |
| new_bb = entry_bb; |
| if (exit_bb) |
| { |
| exit_succ_e = single_succ_edge (exit_bb); |
| make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); |
| } |
| remove_edge_and_dominated_blocks (entry_succ_e); |
| } |
| else |
| { |
| unsigned srcidx, dstidx, num; |
| |
| /* If the parallel region needs data sent from the parent |
| function, then the very first statement (except possible |
| tree profile counter updates) of the parallel body |
| is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since |
| &.OMP_DATA_O is passed as an argument to the child function, |
| we need to replace it with the argument as seen by the child |
| function. |
| |
| In most cases, this will end up being the identity assignment |
| .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had |
| a function call that has been inlined, the original PARM_DECL |
| .OMP_DATA_I may have been converted into a different local |
| variable. In which case, we need to keep the assignment. */ |
| if (gimple_omp_taskreg_data_arg (entry_stmt)) |
| { |
| basic_block entry_succ_bb |
| = single_succ_p (entry_bb) ? single_succ (entry_bb) |
| : FALLTHRU_EDGE (entry_bb)->dest; |
| tree arg; |
| gimple *parcopy_stmt = NULL; |
| |
| for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) |
| { |
| gimple *stmt; |
| |
| gcc_assert (!gsi_end_p (gsi)); |
| stmt = gsi_stmt (gsi); |
| if (gimple_code (stmt) != GIMPLE_ASSIGN) |
| continue; |
| |
| if (gimple_num_ops (stmt) == 2) |
| { |
| tree arg = gimple_assign_rhs1 (stmt); |
| |
| /* We're ignore the subcode because we're |
| effectively doing a STRIP_NOPS. */ |
| |
| if (TREE_CODE (arg) == ADDR_EXPR |
| && TREE_OPERAND (arg, 0) |
| == gimple_omp_taskreg_data_arg (entry_stmt)) |
| { |
| parcopy_stmt = stmt; |
| break; |
| } |
| } |
| } |
| |
| gcc_assert (parcopy_stmt != NULL); |
| arg = DECL_ARGUMENTS (child_fn); |
| |
| if (!gimple_in_ssa_p (cfun)) |
| { |
| if (gimple_assign_lhs (parcopy_stmt) == arg) |
| gsi_remove (&gsi, true); |
| else |
| { |
| /* ?? Is setting the subcode really necessary ?? */ |
| gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); |
| gimple_assign_set_rhs1 (parcopy_stmt, arg); |
| } |
| } |
| else |
| { |
| tree lhs = gimple_assign_lhs (parcopy_stmt); |
| gcc_assert (SSA_NAME_VAR (lhs) == arg); |
| /* We'd like to set the rhs to the default def in the child_fn, |
| but it's too early to create ssa names in the child_fn. |
| Instead, we set the rhs to the parm. In |
| move_sese_region_to_fn, we introduce a default def for the |
| parm, map the parm to it's default def, and once we encounter |
| this stmt, replace the parm with the default def. */ |
| gimple_assign_set_rhs1 (parcopy_stmt, arg); |
| update_stmt (parcopy_stmt); |
| } |
| } |
| |
| /* Declare local variables needed in CHILD_CFUN. */ |
| block = DECL_INITIAL (child_fn); |
| BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); |
| /* The gimplifier could record temporaries in parallel/task block |
| rather than in containing function's local_decls chain, |
| which would mean cgraph missed finalizing them. Do it now. */ |
| for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) |
| if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) |
| varpool_node::finalize_decl (t); |
| DECL_SAVED_TREE (child_fn) = NULL; |
| /* We'll create a CFG for child_fn, so no gimple body is needed. */ |
| gimple_set_body (child_fn, NULL); |
| TREE_USED (block) = 1; |
| |
| /* Reset DECL_CONTEXT on function arguments. */ |
| for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) |
| DECL_CONTEXT (t) = child_fn; |
| |
| /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, |
| so that it can be moved to the child function. */ |
| gsi = gsi_last_bb (entry_bb); |
| stmt = gsi_stmt (gsi); |
| gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL |
| || gimple_code (stmt) == GIMPLE_OMP_TASK)); |
| e = split_block (entry_bb, stmt); |
| gsi_remove (&gsi, true); |
| entry_bb = e->dest; |
| edge e2 = NULL; |
| if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) |
| single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; |
| else |
| { |
| e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL); |
| gcc_assert (e2->dest == region->exit); |
| remove_edge (BRANCH_EDGE (entry_bb)); |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src); |
| gsi = gsi_last_bb (region->exit); |
| gcc_assert (!gsi_end_p (gsi) |
| && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); |
| gsi_remove (&gsi, true); |
| } |
| |
| /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */ |
| if (exit_bb) |
| { |
| gsi = gsi_last_bb (exit_bb); |
| gcc_assert (!gsi_end_p (gsi) |
| && (gimple_code (gsi_stmt (gsi)) |
| == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN))); |
| stmt = gimple_build_return (NULL); |
| gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); |
| gsi_remove (&gsi, true); |
| } |
| |
| /* Move the parallel region into CHILD_CFUN. */ |
| |
| if (gimple_in_ssa_p (cfun)) |
| { |
| init_tree_ssa (child_cfun); |
| init_ssa_operands (child_cfun); |
| child_cfun->gimple_df->in_ssa_p = true; |
| block = NULL_TREE; |
| } |
| else |
| block = gimple_block (entry_stmt); |
| |
| /* Make sure to generate early debug for the function before |
| outlining anything. */ |
| if (! gimple_in_ssa_p (cfun)) |
| (*debug_hooks->early_global_decl) (cfun->decl); |
| |
| new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); |
| if (exit_bb) |
| single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; |
| if (e2) |
| { |
| basic_block dest_bb = e2->dest; |
| if (!exit_bb) |
| make_edge (new_bb, dest_bb, EDGE_FALLTHRU); |
| remove_edge (e2); |
| set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb); |
| } |
| /* When the OMP expansion process cannot guarantee an up-to-date |
| loop tree arrange for the child function to fixup loops. */ |
| if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) |
| child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; |
| |
| /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ |
| num = vec_safe_length (child_cfun->local_decls); |
| for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) |
| { |
| t = (*child_cfun->local_decls)[srcidx]; |
| if (DECL_CONTEXT (t) == cfun->decl) |
| continue; |
| if (srcidx != dstidx) |
| (*child_cfun->local_decls)[dstidx] = t; |
| dstidx++; |
| } |
| if (dstidx != num) |
| vec_safe_truncate (child_cfun->local_decls, dstidx); |
| |
| /* Inform the callgraph about the new function. */ |
| child_cfun->curr_properties = cfun->curr_properties; |
| child_cfun->has_simduid_loops |= cfun->has_simduid_loops; |
| child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; |
| cgraph_node *node = cgraph_node::get_create (child_fn); |
| node->parallelized_function = 1; |
| cgraph_node::add_new_function (child_fn, true); |
| |
| bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) |
| && !DECL_ASSEMBLER_NAME_SET_P (child_fn); |
| |
| /* Fix the callgraph edges for child_cfun. Those for cfun will be |
| fixed in a following pass. */ |
| push_cfun (child_cfun); |
| if (need_asm) |
| assign_assembler_name_if_needed (child_fn); |
| |
| if (optimize) |
| optimize_omp_library_calls (entry_stmt); |
| update_max_bb_count (); |
| cgraph_edge::rebuild_edges (); |
| |
| /* Some EH regions might become dead, see PR34608. If |
| pass_cleanup_cfg isn't the first pass to happen with the |
| new child, these dead EH edges might cause problems. |
| Clean them up now. */ |
| if (flag_exceptions) |
| { |
| basic_block bb; |
| bool changed = false; |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| changed |= gimple_purge_dead_eh_edges (bb); |
| if (changed) |
| cleanup_tree_cfg (); |
| } |
| if (gimple_in_ssa_p (cfun)) |
| update_ssa (TODO_update_ssa); |
| if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) |
| verify_loop_structure (); |
| pop_cfun (); |
| |
| if (dump_file && !gimple_in_ssa_p (cfun)) |
| { |
| omp_any_child_fn_dumped = true; |
| dump_function_header (dump_file, child_fn, dump_flags); |
| dump_function_to_file (child_fn, dump_file, dump_flags); |
| } |
| } |
| |
| /* Emit a library call to launch the children threads. */ |
| if (is_cilk_for) |
| expand_cilk_for_call (new_bb, |
| as_a <gomp_parallel *> (entry_stmt), ws_args); |
| else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) |
| expand_parallel_call (region, new_bb, |
| as_a <gomp_parallel *> (entry_stmt), ws_args); |
| else |
| expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt)); |
| if (gimple_in_ssa_p (cfun)) |
| update_ssa (TODO_update_ssa_only_virtuals); |
| } |
| |
| /* Information about members of an OpenACC collapsed loop nest. */ |
| |
| struct oacc_collapse |
| { |
| tree base; /* Base value. */ |
| tree iters; /* Number of steps. */ |
| tree step; /* Step size. */ |
| tree tile; /* Tile increment (if tiled). */ |
| tree outer; /* Tile iterator var. */ |
| }; |
| |
| /* Helper for expand_oacc_for. Determine collapsed loop information. |
| Fill in COUNTS array. Emit any initialization code before GSI. |
| Return the calculated outer loop bound of BOUND_TYPE. */ |
| |
| static tree |
| expand_oacc_collapse_init (const struct omp_for_data *fd, |
| gimple_stmt_iterator *gsi, |
| oacc_collapse *counts, tree bound_type, |
| location_t loc) |
| { |
| tree tiling = fd->tiling; |
| tree total = build_int_cst (bound_type, 1); |
| int ix; |
| |
| gcc_assert (integer_onep (fd->loop.step)); |
| gcc_assert (integer_zerop (fd->loop.n1)); |
| |
| /* When tiling, the first operand of the tile clause applies to the |
| innermost loop, and we work outwards from there. Seems |
| backwards, but whatever. */ |
| for (ix = fd->collapse; ix--;) |
| { |
| const omp_for_data_loop *loop = &fd->loops[ix]; |
| |
| tree iter_type = TREE_TYPE (loop->v); |
| tree diff_type = iter_type; |
| tree plus_type = iter_type; |
| |
| gcc_assert (loop->cond_code == fd->loop.cond_code); |
| |
| if (POINTER_TYPE_P (iter_type)) |
| plus_type = sizetype; |
| if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) |
| diff_type = signed_type_for (diff_type); |
| |
| if (tiling) |
| { |
| tree num = build_int_cst (integer_type_node, fd->collapse); |
| tree loop_no = build_int_cst (integer_type_node, ix); |
| tree tile = TREE_VALUE (tiling); |
| gcall *call |
| = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile, |
| /* gwv-outer=*/integer_zero_node, |
| /* gwv-inner=*/integer_zero_node); |
| |
| counts[ix].outer = create_tmp_var (iter_type, ".outer"); |
| counts[ix].tile = create_tmp_var (diff_type, ".tile"); |
| gimple_call_set_lhs (call, counts[ix].tile); |
| gimple_set_location (call, loc); |
| gsi_insert_before (gsi, call, GSI_SAME_STMT); |
| |
| tiling = TREE_CHAIN (tiling); |
| } |
| else |
| { |
| counts[ix].tile = NULL; |
| counts[ix].outer = loop->v; |
| } |
| |
| tree b = loop->n1; |
| tree e = loop->n2; |
| tree s = loop->step; |
| bool up = loop->cond_code == LT_EXPR; |
| tree dir = build_int_cst (diff_type, up ? +1 : -1); |
| bool negating; |
| tree expr; |
| |
| b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| |
| /* Convert the step, avoiding possible unsigned->signed overflow. */ |
| negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); |
| if (negating) |
| s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); |
| s = fold_convert (diff_type, s); |
| if (negating) |
| s = fold_build1 (NEGATE_EXPR, diff_type, s); |
| s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| |
| /* Determine the range, avoiding possible unsigned->signed overflow. */ |
| negating = !up && TYPE_UNSIGNED (iter_type); |
| expr = fold_build2 (MINUS_EXPR, plus_type, |
| fold_convert (plus_type, negating ? b : e), |
| fold_convert (plus_type, negating ? e : b)); |
| expr = fold_convert (diff_type, expr); |
| if (negating) |
| expr = fold_build1 (NEGATE_EXPR, diff_type, expr); |
| tree range = force_gimple_operand_gsi |
| (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); |
| |
| /* Determine number of iterations. */ |
| expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); |
| expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); |
| expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); |
| |
| tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| |
| counts[ix].base = b; |
| counts[ix].iters = iters; |
| counts[ix].step = s; |
| |
| total = fold_build2 (MULT_EXPR, bound_type, total, |
| fold_convert (bound_type, iters)); |
| } |
| |
| return total; |
| } |
| |
| /* Emit initializers for collapsed loop members. INNER is true if |
| this is for the element loop of a TILE. IVAR is the outer |
| loop iteration variable, from which collapsed loop iteration values |
| are calculated. COUNTS array has been initialized by |
| expand_oacc_collapse_inits. */ |
| |
| static void |
| expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner, |
| gimple_stmt_iterator *gsi, |
| const oacc_collapse *counts, tree ivar) |
| { |
| tree ivar_type = TREE_TYPE (ivar); |
| |
| /* The most rapidly changing iteration variable is the innermost |
| one. */ |
| for (int ix = fd->collapse; ix--;) |
| { |
| const omp_for_data_loop *loop = &fd->loops[ix]; |
| const oacc_collapse *collapse = &counts[ix]; |
| tree v = inner ? loop->v : collapse->outer; |
| tree iter_type = TREE_TYPE (v); |
| tree diff_type = TREE_TYPE (collapse->step); |
| tree plus_type = iter_type; |
| enum tree_code plus_code = PLUS_EXPR; |
| tree expr; |
| |
| if (POINTER_TYPE_P (iter_type)) |
| { |
| plus_code = POINTER_PLUS_EXPR; |
| plus_type = sizetype; |
| } |
| |
| expr = ivar; |
| if (ix) |
| { |
| tree mod = fold_convert (ivar_type, collapse->iters); |
| ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod); |
| expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod); |
| ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| } |
| |
| expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr), |
| collapse->step); |
| expr = fold_build2 (plus_code, iter_type, |
| inner ? collapse->outer : collapse->base, |
| fold_convert (plus_type, expr)); |
| expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE, |
| true, GSI_SAME_STMT); |
| gassign *ass = gimple_build_assign (v, expr); |
| gsi_insert_before (gsi, ass, GSI_SAME_STMT); |
| } |
| } |
| |
| /* Helper function for expand_omp_{for_*,simd}. If this is the outermost |
| of the combined collapse > 1 loop constructs, generate code like: |
| if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB; |
| if (cond3 is <) |
| adj = STEP3 - 1; |
| else |
| adj = STEP3 + 1; |
| count3 = (adj + N32 - N31) / STEP3; |
| if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB; |
| if (cond2 is <) |
| adj = STEP2 - 1; |
| else |
| adj = STEP2 + 1; |
| count2 = (adj + N22 - N21) / STEP2; |
| if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB; |
| if (cond1 is <) |
| adj = STEP1 - 1; |
| else |
| adj = STEP1 + 1; |
| count1 = (adj + N12 - N11) / STEP1; |
| count = count1 * count2 * count3; |
| Furthermore, if ZERO_ITER_BB is NULL, create a BB which does: |
| count = 0; |
| and set ZERO_ITER_BB to that bb. If this isn't the outermost |
| of the combined loop constructs, just initialize COUNTS array |
| from the _looptemp_ clauses. */ |
| |
| /* NOTE: It *could* be better to moosh all of the BBs together, |
| creating one larger BB with all the computation and the unexpected |
| jump at the end. I.e. |
| |
| bool zero3, zero2, zero1, zero; |
| |
| zero3 = N32 c3 N31; |
| count3 = (N32 - N31) /[cl] STEP3; |
| zero2 = N22 c2 N21; |
| count2 = (N22 - N21) /[cl] STEP2; |
| zero1 = N12 c1 N11; |
| count1 = (N12 - N11) /[cl] STEP1; |
| zero = zero3 || zero2 || zero1; |
| count = count1 * count2 * count3; |
| if (__builtin_expect(zero, false)) goto zero_iter_bb; |
| |
| After all, we expect the zero=false, and thus we expect to have to |
| evaluate all of the comparison expressions, so short-circuiting |
| oughtn't be a win. Since the condition isn't protecting a |
| denominator, we're not concerned about divide-by-zero, so we can |
| fully evaluate count even if a numerator turned out to be wrong. |
| |
| It seems like putting this all together would create much better |
| scheduling opportunities, and less pressure on the chip's branch |
| predictor. */ |
| |
| static void |
| expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi, |
| basic_block &entry_bb, tree *counts, |
| basic_block &zero_iter1_bb, int &first_zero_iter1, |
| basic_block &zero_iter2_bb, int &first_zero_iter2, |
| basic_block &l2_dom_bb) |
| { |
| tree t, type = TREE_TYPE (fd->loop.v); |
| edge e, ne; |
| int i; |
| |
| /* Collapsed loops need work for expansion into SSA form. */ |
| gcc_assert (!gimple_in_ssa_p (cfun)); |
| |
| if (gimple_omp_for_combined_into_p (fd->for_stmt) |
| && TREE_CODE (fd->loop.n2) != INTEGER_CST) |
| { |
| gcc_assert (fd->ordered == 0); |
| /* First two _looptemp_ clauses are for istart/iend, counts[0] |
| isn't supposed to be handled, as the inner loop doesn't |
| use it. */ |
| tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| for (i = 0; i < fd->collapse; i++) |
| { |
| innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| if (i) |
| counts[i] = OMP_CLAUSE_DECL (innerc); |
| else |
| counts[0] = NULL_TREE; |
| } |
| return; |
| } |
| |
| for (i = fd->collapse; i < fd->ordered; i++) |
| { |
| tree itype = TREE_TYPE (fd->loops[i].v); |
| counts[i] = NULL_TREE; |
| t = fold_binary (fd->loops[i].cond_code, boolean_type_node, |
| fold_convert (itype, fd->loops[i].n1), |
| fold_convert (itype, fd->loops[i].n2)); |
| if (t && integer_zerop (t)) |
| { |
| for (i = fd->collapse; i < fd->ordered; i++) |
| counts[i] = build_int_cst (type, 0); |
| break; |
| } |
| } |
| for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++) |
| { |
| tree itype = TREE_TYPE (fd->loops[i].v); |
| |
| if (i >= fd->collapse && counts[i]) |
| continue; |
| if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse) |
| && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node, |
| fold_convert (itype, fd->loops[i].n1), |
| fold_convert (itype, fd->loops[i].n2))) |
| == NULL_TREE || !integer_onep (t))) |
| { |
| gcond *cond_stmt; |
| tree n1, n2; |
| n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1)); |
| n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2)); |
| n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, |
| NULL_TREE, NULL_TREE); |
| gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT); |
| if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), |
| expand_omp_regimplify_p, NULL, NULL) |
| || walk_tree (gimple_cond_rhs_ptr (cond_stmt), |
| expand_omp_regimplify_p, NULL, NULL)) |
| { |
| *gsi = gsi_for_stmt (cond_stmt); |
| gimple_regimplify_operands (cond_stmt, gsi); |
| } |
| e = split_block (entry_bb, cond_stmt); |
| basic_block &zero_iter_bb |
| = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb; |
| int &first_zero_iter |
| = i < fd->collapse ? first_zero_iter1 : first_zero_iter2; |
| if (zero_iter_bb == NULL) |
| { |
| gassign *assign_stmt; |
| first_zero_iter = i; |
| zero_iter_bb = create_empty_bb (entry_bb); |
| add_bb_to_loop (zero_iter_bb, entry_bb->loop_father); |
| *gsi = gsi_after_labels (zero_iter_bb); |
| if (i < fd->collapse) |
| assign_stmt = gimple_build_assign (fd->loop.n2, |
| build_zero_cst (type)); |
| else |
| { |
| counts[i] = create_tmp_reg (type, ".count"); |
| assign_stmt |
| = gimple_build_assign (counts[i], build_zero_cst (type)); |
| } |
| gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); |
| set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb, |
| entry_bb); |
| } |
| ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE); |
| ne->probability = profile_probability::very_unlikely (); |
| e->flags = EDGE_TRUE_VALUE; |
| e->probability = ne->probability.invert (); |
| if (l2_dom_bb == NULL) |
| l2_dom_bb = entry_bb; |
| entry_bb = e->dest; |
| *gsi = gsi_last_bb (entry_bb); |
| } |
| |
| if (POINTER_TYPE_P (itype)) |
| itype = signed_type_for (itype); |
| t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR |
| ? -1 : 1)); |
| t = fold_build2 (PLUS_EXPR, itype, |
| fold_convert (itype, fd->loops[i].step), t); |
| t = fold_build2 (PLUS_EXPR, itype, t, |
| fold_convert (itype, fd->loops[i].n2)); |
| t = fold_build2 (MINUS_EXPR, itype, t, |
| fold_convert (itype, fd->loops[i].n1)); |
| /* ?? We could probably use CEIL_DIV_EXPR instead of |
| TRUNC_DIV_EXPR and adjusting by hand. Unless we can't |
| generate the same code in the end because generically we |
| don't know that the values involved must be negative for |
| GT?? */ |
| if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) |
| t = fold_build2 (TRUNC_DIV_EXPR, itype, |
| fold_build1 (NEGATE_EXPR, itype, t), |
| fold_build1 (NEGATE_EXPR, itype, |
| fold_convert (itype, |
| fd->loops[i].step))); |
| else |
| t = fold_build2 (TRUNC_DIV_EXPR, itype, t, |
| fold_convert (itype, fd->loops[i].step)); |
| t = fold_convert (type, t); |
| if (TREE_CODE (t) == INTEGER_CST) |
| counts[i] = t; |
| else |
| { |
| if (i < fd->collapse || i != first_zero_iter2) |
| counts[i] = create_tmp_reg (type, ".count"); |
| expand_omp_build_assign (gsi, counts[i], t); |
| } |
| if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse) |
| { |
| if (i == 0) |
| t = counts[0]; |
| else |
| t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); |
| expand_omp_build_assign (gsi, fd->loop.n2, t); |
| } |
| } |
| } |
| |
| /* Helper function for expand_omp_{for_*,simd}. Generate code like: |
| T = V; |
| V3 = N31 + (T % count3) * STEP3; |
| T = T / count3; |
| V2 = N21 + (T % count2) * STEP2; |
| T = T / count2; |
| V1 = N11 + T * STEP1; |
| if this loop doesn't have an inner loop construct combined with it. |
| If it does have an inner loop construct combined with it and the |
| iteration count isn't known constant, store values from counts array |
| into its _looptemp_ temporaries instead. */ |
| |
| static void |
| expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi, |
| tree *counts, gimple *inner_stmt, tree startvar) |
| { |
| int i; |
| if (gimple_omp_for_combined_p (fd->for_stmt)) |
| { |
| /* If fd->loop.n2 is constant, then no propagation of the counts |
| is needed, they are constant. */ |
| if (TREE_CODE (fd->loop.n2) == INTEGER_CST) |
| return; |
| |
| tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR |
| ? gimple_omp_taskreg_clauses (inner_stmt) |
| : gimple_omp_for_clauses (inner_stmt); |
| /* First two _looptemp_ clauses are for istart/iend, counts[0] |
| isn't supposed to be handled, as the inner loop doesn't |
| use it. */ |
| tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| for (i = 0; i < fd->collapse; i++) |
| { |
| innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| if (i) |
| { |
| tree tem = OMP_CLAUSE_DECL (innerc); |
| tree t = fold_convert (TREE_TYPE (tem), counts[i]); |
| t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| gassign *stmt = gimple_build_assign (tem, t); |
| gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); |
| } |
| } |
| return; |
| } |
| |
| tree type = TREE_TYPE (fd->loop.v); |
| tree tem = create_tmp_reg (type, ".tem"); |
| gassign *stmt = gimple_build_assign (tem, startvar); |
| gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); |
| |
| for (i = fd->collapse - 1; i >= 0; i--) |
| { |
| tree vtype = TREE_TYPE (fd->loops[i].v), itype, t; |
| itype = vtype; |
| if (POINTER_TYPE_P (vtype)) |
| itype = signed_type_for (vtype); |
| if (i != 0) |
| t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); |
| else |
| t = tem; |
| t = fold_convert (itype, t); |
| t = fold_build2 (MULT_EXPR, itype, t, |
| fold_convert (itype, fd->loops[i].step)); |
| if (POINTER_TYPE_P (vtype)) |
| t = fold_build_pointer_plus (fd->loops[i].n1, t); |
| else |
| t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); |
| t = force_gimple_operand_gsi (gsi, t, |
| DECL_P (fd->loops[i].v) |
| && TREE_ADDRESSABLE (fd->loops[i].v), |
| NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| stmt = gimple_build_assign (fd->loops[i].v, t); |
| gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); |
| if (i != 0) |
| { |
| t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); |
| t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| stmt = gimple_build_assign (tem, t); |
| gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); |
| } |
| } |
| } |
| |
| /* Helper function for expand_omp_for_*. Generate code like: |
| L10: |
| V3 += STEP3; |
| if (V3 cond3 N32) goto BODY_BB; else goto L11; |
| L11: |
| V3 = N31; |
| V2 += STEP2; |
| if (V2 cond2 N22) goto BODY_BB; else goto L12; |
| L12: |
| V2 = N21; |
| V1 += STEP1; |
| goto BODY_BB; */ |
| |
| static basic_block |
| extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb, |
| basic_block body_bb) |
| { |
| basic_block last_bb, bb, collapse_bb = NULL; |
| int i; |
| gimple_stmt_iterator gsi; |
| edge e; |
| tree t; |
| gimple *stmt; |
| |
| last_bb = cont_bb; |
| for (i = fd->collapse - 1; i >= 0; i--) |
| { |
| tree vtype = TREE_TYPE (fd->loops[i].v); |
| |
| bb = create_empty_bb (last_bb); |
| add_bb_to_loop (bb, last_bb->loop_father); |
| gsi = gsi_start_bb (bb); |
| |
| if (i < fd->collapse - 1) |
| { |
| e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); |
| e->probability = profile_probability::guessed_always ().apply_scale (1, 8); |
| |
| t = fd->loops[i + 1].n1; |
| t = force_gimple_operand_gsi (&gsi, t, |
| DECL_P (fd->loops[i + 1].v) |
| && TREE_ADDRESSABLE (fd->loops[i |
| + 1].v), |
| NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| stmt = gimple_build_assign (fd->loops[i + 1].v, t); |
| gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); |
| } |
| else |
| collapse_bb = bb; |
| |
| set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); |
| |
| if (POINTER_TYPE_P (vtype)) |
| t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); |
| else |
| t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); |
| t = force_gimple_operand_gsi (&gsi, t, |
| DECL_P (fd->loops[i].v) |
| && TREE_ADDRESSABLE (fd->loops[i].v), |
| NULL_TREE, false, GSI_CONTINUE_LINKING); |
| stmt = gimple_build_assign (fd->loops[i].v, t); |
| gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); |
| |
| if (i > 0) |
| { |
| t = fd->loops[i].n2; |
| t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| tree v = fd->loops[i].v; |
| if (DECL_P (v) && TREE_ADDRESSABLE (v)) |
| v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t); |
| stmt = gimple_build_cond_empty (t); |
| gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); |
| e = make_edge (bb, body_bb, EDGE_TRUE_VALUE); |
| e->probability = profile_probability::guessed_always ().apply_scale (7, 8); |
| } |
| else |
| make_edge (bb, body_bb, EDGE_FALLTHRU); |
| last_bb = bb; |
| } |
| |
| return collapse_bb; |
| } |
| |
| /* Expand #pragma omp ordered depend(source). */ |
| |
| static void |
| expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd, |
| tree *counts, location_t loc) |
| { |
| enum built_in_function source_ix |
| = fd->iter_type == long_integer_type_node |
| ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST; |
| gimple *g |
| = gimple_build_call (builtin_decl_explicit (source_ix), 1, |
| build_fold_addr_expr (counts[fd->ordered])); |
| gimple_set_location (g, loc); |
| gsi_insert_before (gsi, g, GSI_SAME_STMT); |
| } |
| |
| /* Expand a single depend from #pragma omp ordered depend(sink:...). */ |
| |
| static void |
| expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd, |
| tree *counts, tree c, location_t loc) |
| { |
| auto_vec<tree, 10> args; |
| enum built_in_function sink_ix |
| = fd->iter_type == long_integer_type_node |
| ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT; |
| tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE; |
| int i; |
| gimple_stmt_iterator gsi2 = *gsi; |
| bool warned_step = false; |
| |
| for (i = 0; i < fd->ordered; i++) |
| { |
| tree step = NULL_TREE; |
| off = TREE_PURPOSE (deps); |
| if (TREE_CODE (off) == TRUNC_DIV_EXPR) |
| { |
| step = TREE_OPERAND (off, 1); |
| off = TREE_OPERAND (off, 0); |
| } |
| if (!integer_zerop (off)) |
| { |
| gcc_assert (fd->loops[i].cond_code == LT_EXPR |
| || fd->loops[i].cond_code == GT_EXPR); |
| bool forward = fd->loops[i].cond_code == LT_EXPR; |
| if (step) |
| { |
| /* Non-simple Fortran DO loops. If step is variable, |
| we don't know at compile even the direction, so can't |
| warn. */ |
| if (TREE_CODE (step) != INTEGER_CST) |
| break; |
| forward = tree_int_cst_sgn (step) != -1; |
| } |
| if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| warning_at (loc, 0, "%<depend(sink)%> clause waiting for " |
| "lexically later iteration"); |
| break; |
| } |
| deps = TREE_CHAIN (deps); |
| } |
| /* If all offsets corresponding to the collapsed loops are zero, |
| this depend clause can be ignored. FIXME: but there is still a |
| flush needed. We need to emit one __sync_synchronize () for it |
| though (perhaps conditionally)? Solve this together with the |
| conservative dependence folding optimization. |
| if (i >= fd->collapse) |
| return; */ |
| |
| deps = OMP_CLAUSE_DECL (c); |
| gsi_prev (&gsi2); |
| edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2)); |
| edge e2 = split_block_after_labels (e1->dest); |
| |
| gsi2 = gsi_after_labels (e1->dest); |
| *gsi = gsi_last_bb (e1->src); |
| for (i = 0; i < fd->ordered; i++) |
| { |
| tree itype = TREE_TYPE (fd->loops[i].v); |
| tree step = NULL_TREE; |
| tree orig_off = NULL_TREE; |
| if (POINTER_TYPE_P (itype)) |
| itype = sizetype; |
| if (i) |
| deps = TREE_CHAIN (deps); |
| off = TREE_PURPOSE (deps); |
| if (TREE_CODE (off) == TRUNC_DIV_EXPR) |
| { |
| step = TREE_OPERAND (off, 1); |
| off = TREE_OPERAND (off, 0); |
| gcc_assert (fd->loops[i].cond_code == LT_EXPR |
| && integer_onep (fd->loops[i].step) |
| && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))); |
| } |
| tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step); |
| if (step) |
| { |
| off = fold_convert_loc (loc, itype, off); |
| orig_off = off; |
| off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); |
| } |
| |
| if (integer_zerop (off)) |
| t = boolean_true_node; |
| else |
| { |
| tree a; |
| tree co = fold_convert_loc (loc, itype, off); |
| if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) |
| { |
| if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| co = fold_build1_loc (loc, NEGATE_EXPR, itype, co); |
| a = fold_build2_loc (loc, POINTER_PLUS_EXPR, |
| TREE_TYPE (fd->loops[i].v), fd->loops[i].v, |
| co); |
| } |
| else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), |
| fd->loops[i].v, co); |
| else |
| a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v), |
| fd->loops[i].v, co); |
| if (step) |
| { |
| tree t1, t2; |
| if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, |
| fd->loops[i].n1); |
| else |
| t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, |
| fd->loops[i].n2); |
| if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, |
| fd->loops[i].n2); |
| else |
| t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, |
| fd->loops[i].n1); |
| t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, |
| step, build_int_cst (TREE_TYPE (step), 0)); |
| if (TREE_CODE (step) != INTEGER_CST) |
| { |
| t1 = unshare_expr (t1); |
| t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| t2 = unshare_expr (t2); |
| t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| } |
| t = fold_build3_loc (loc, COND_EXPR, boolean_type_node, |
| t, t2, t1); |
| } |
| else if (fd->loops[i].cond_code == LT_EXPR) |
| { |
| if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, |
| fd->loops[i].n1); |
| else |
| t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, |
| fd->loops[i].n2); |
| } |
| else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a, |
| fd->loops[i].n2); |
| else |
| t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a, |
| fd->loops[i].n1); |
| } |
| if (cond) |
| cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); |
| else |
| cond = t; |
| |
| off = fold_convert_loc (loc, itype, off); |
| |
| if (step |
| || (fd->loops[i].cond_code == LT_EXPR |
| ? !integer_onep (fd->loops[i].step) |
| : !integer_minus_onep (fd->loops[i].step))) |
| { |
| if (step == NULL_TREE |
| && TYPE_UNSIGNED (itype) |
| && fd->loops[i].cond_code == GT_EXPR) |
| t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off, |
| fold_build1_loc (loc, NEGATE_EXPR, itype, |
| s)); |
| else |
| t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, |
| orig_off ? orig_off : off, s); |
| t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t, |
| build_int_cst (itype, 0)); |
| if (integer_zerop (t) && !warned_step) |
| { |
| warning_at (loc, 0, "%<depend(sink)%> refers to iteration never " |
| "in the iteration space"); |
| warned_step = true; |
| } |
| cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, |
| cond, t); |
| } |
| |
| if (i <= fd->collapse - 1 && fd->collapse > 1) |
| t = fd->loop.v; |
| else if (counts[i]) |
| t = counts[i]; |
| else |
| { |
| t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), |
| fd->loops[i].v, fd->loops[i].n1); |
| t = fold_convert_loc (loc, fd->iter_type, t); |
| } |
| if (step) |
| /* We have divided off by step already earlier. */; |
| else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) |
| off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, |
| fold_build1_loc (loc, NEGATE_EXPR, itype, |
| s)); |
| else |
| off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); |
| if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) |
| off = fold_build1_loc (loc, NEGATE_EXPR, itype, off); |
| off = fold_convert_loc (loc, fd->iter_type, off); |
| if (i <= fd->collapse - 1 && fd->collapse > 1) |
| { |
| if (i) |
| off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff, |
| off); |
| if (i < fd->collapse - 1) |
| { |
| coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off, |
| counts[i]); |
| continue; |
| } |
| } |
| off = unshare_expr (off); |
| t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off); |
| t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| args.safe_push (t); |
| } |
| gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args); |
| gimple_set_location (g, loc); |
| gsi_insert_before (&gsi2, g, GSI_SAME_STMT); |
| |
| cond = unshare_expr (cond); |
| cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT); |
| edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE); |
| e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); |
| e1->probability = e3->probability.invert (); |
| e1->flags = EDGE_TRUE_VALUE; |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); |
| |
| *gsi = gsi_after_labels (e2->dest); |
| } |
| |
| /* Expand all #pragma omp ordered depend(source) and |
| #pragma omp ordered depend(sink:...) constructs in the current |
| #pragma omp for ordered(n) region. */ |
| |
| static void |
| expand_omp_ordered_source_sink (struct omp_region *region, |
| struct omp_for_data *fd, tree *counts, |
| basic_block cont_bb) |
| { |
| struct omp_region *inner; |
| int i; |
| for (i = fd->collapse - 1; i < fd->ordered; i++) |
| if (i == fd->collapse - 1 && fd->collapse > 1) |
| counts[i] = NULL_TREE; |
| else if (i >= fd->collapse && !cont_bb) |
| counts[i] = build_zero_cst (fd->iter_type); |
| else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)) |
| && integer_onep (fd->loops[i].step)) |
| counts[i] = NULL_TREE; |
| else |
| counts[i] = create_tmp_var (fd->iter_type, ".orditer"); |
| tree atype |
| = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1); |
| counts[fd->ordered] = create_tmp_var (atype, ".orditera"); |
| TREE_ADDRESSABLE (counts[fd->ordered]) = 1; |
| |
| for (inner = region->inner; inner; inner = inner->next) |
| if (inner->type == GIMPLE_OMP_ORDERED) |
| { |
| gomp_ordered *ord_stmt = inner->ord_stmt; |
| gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt); |
| location_t loc = gimple_location (ord_stmt); |
| tree c; |
| for (c = gimple_omp_ordered_clauses (ord_stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) |
| break; |
| if (c) |
| expand_omp_ordered_source (&gsi, fd, counts, loc); |
| for (c = gimple_omp_ordered_clauses (ord_stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) |
| expand_omp_ordered_sink (&gsi, fd, counts, c, loc); |
| gsi_remove (&gsi, true); |
| } |
| } |
| |
| /* Wrap the body into fd->ordered - fd->collapse loops that aren't |
| collapsed. */ |
| |
| static basic_block |
| expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts, |
| basic_block cont_bb, basic_block body_bb, |
| bool ordered_lastprivate) |
| { |
| if (fd->ordered == fd->collapse) |
| return cont_bb; |
| |
| if (!cont_bb) |
| { |
| gimple_stmt_iterator gsi = gsi_after_labels (body_bb); |
| for (int i = fd->collapse; i < fd->ordered; i++) |
| { |
| tree type = TREE_TYPE (fd->loops[i].v); |
| tree n1 = fold_convert (type, fd->loops[i].n1); |
| expand_omp_build_assign (&gsi, fd->loops[i].v, n1); |
| tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], |
| size_int (i - fd->collapse + 1), |
| NULL_TREE, NULL_TREE); |
| expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); |
| } |
| return NULL; |
| } |
| |
| for (int i = fd->ordered - 1; i >= fd->collapse; i--) |
| { |
| tree t, type = TREE_TYPE (fd->loops[i].v); |
| gimple_stmt_iterator gsi = gsi_after_labels (body_bb); |
| expand_omp_build_assign (&gsi, fd->loops[i].v, |
| fold_convert (type, fd->loops[i].n1)); |
| if (counts[i]) |
| expand_omp_build_assign (&gsi, counts[i], |
| build_zero_cst (fd->iter_type)); |
| tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], |
| size_int (i - fd->collapse + 1), |
| NULL_TREE, NULL_TREE); |
| expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); |
| if (!gsi_end_p (gsi)) |
| gsi_prev (&gsi); |
| else |
| gsi = gsi_last_bb (body_bb); |
| edge e1 = split_block (body_bb, gsi_stmt (gsi)); |
| basic_block new_body = e1->dest; |
| if (body_bb == cont_bb) |
| cont_bb = new_body; |
| edge e2 = NULL; |
| basic_block new_header; |
| if (EDGE_COUNT (cont_bb->preds) > 0) |
| { |
| gsi = gsi_last_bb (cont_bb); |
| if (POINTER_TYPE_P (type)) |
| t = fold_build_pointer_plus (fd->loops[i].v, |
| fold_convert (sizetype, |
| fd->loops[i].step)); |
| else |
| t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v, |
| fold_convert (type, fd->loops[i].step)); |
| expand_omp_build_assign (&gsi, fd->loops[i].v, t); |
| if (counts[i]) |
| { |
| t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i], |
| build_int_cst (fd->iter_type, 1)); |
| expand_omp_build_assign (&gsi, counts[i], t); |
| t = counts[i]; |
| } |
| else |
| { |
| t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v), |
| fd->loops[i].v, fd->loops[i].n1); |
| t = fold_convert (fd->iter_type, t); |
| t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| } |
| aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], |
| size_int (i - fd->collapse + 1), |
| NULL_TREE, NULL_TREE); |
| expand_omp_build_assign (&gsi, aref, t); |
| gsi_prev (&gsi); |
| e2 = split_block (cont_bb, gsi_stmt (gsi)); |
| new_header = e2->dest; |
| } |
| else |
| new_header = cont_bb; |
| gsi = gsi_after_labels (new_header); |
| tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| tree n2 |
| = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2), |
| true, NULL_TREE, true, GSI_SAME_STMT); |
| t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2); |
| gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT); |
| edge e3 = split_block (new_header, gsi_stmt (gsi)); |
| cont_bb = e3->dest; |
| remove_edge (e1); |
| make_edge (body_bb, new_header, EDGE_FALLTHRU); |
| e3->flags = EDGE_FALSE_VALUE; |
| e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); |
| e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE); |
| e1->probability = e3->probability.invert (); |
| |
| set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb); |
| set_immediate_dominator (CDI_DOMINATORS, new_body, new_header); |
| |
| if (e2) |
| { |
| struct loop *loop = alloc_loop (); |
| loop->header = new_header; |
| loop->latch = e2->src; |
| add_loop (loop, body_bb->loop_father); |
| } |
| } |
| |
| /* If there are any lastprivate clauses and it is possible some loops |
| might have zero iterations, ensure all the decls are initialized, |
| otherwise we could crash evaluating C++ class iterators with lastprivate |
| clauses. */ |
| bool need_inits = false; |
| for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++) |
| if (need_inits) |
| { |
| tree type = TREE_TYPE (fd->loops[i].v); |
| gimple_stmt_iterator gsi = gsi_after_labels (body_bb); |
| expand_omp_build_assign (&gsi, fd->loops[i].v, |
| fold_convert (type, fd->loops[i].n1)); |
| } |
| else |
| { |
| tree type = TREE_TYPE (fd->loops[i].v); |
| tree this_cond = fold_build2 (fd->loops[i].cond_code, |
| boolean_type_node, |
| fold_convert (type, fd->loops[i].n1), |
| fold_convert (type, fd->loops[i].n2)); |
| if (!integer_onep (this_cond)) |
| need_inits = true; |
| } |
| |
| return cont_bb; |
| } |
| |
| /* A subroutine of expand_omp_for. Generate code for a parallel |
| loop with any schedule. Given parameters: |
| |
| for (V = N1; V cond N2; V += STEP) BODY; |
| |
| where COND is "<" or ">", we generate pseudocode |
| |
| more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); |
| if (more) goto L0; else goto L3; |
| L0: |
| V = istart0; |
| iend = iend0; |
| L1: |
| BODY; |
| V += STEP; |
| if (V cond iend) goto L1; else goto L2; |
| L2: |
| if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; |
| L3: |
| |
| If this is a combined omp parallel loop, instead of the call to |
| GOMP_loop_foo_start, we call GOMP_loop_foo_next. |
| If this is gimple_omp_for_combined_p loop, then instead of assigning |
| V and iend in L0 we assign the first two _looptemp_ clause decls of the |
| inner GIMPLE_OMP_FOR and V += STEP; and |
| if (V cond iend) goto L1; else goto L2; are removed. |
| |
| For collapsed loops, given parameters: |
| collapse(3) |
| for (V1 = N11; V1 cond1 N12; V1 += STEP1) |
| for (V2 = N21; V2 cond2 N22; V2 += STEP2) |
| for (V3 = N31; V3 cond3 N32; V3 += STEP3) |
| BODY; |
| |
| we generate pseudocode |
| |
| if (__builtin_expect (N32 cond3 N31, 0)) goto Z0; |
| if (cond3 is <) |
| adj = STEP3 - 1; |
| else |
| adj = STEP3 + 1; |
| count3 = (adj + N32 - N31) / STEP3; |
| if (__builtin_expect (N22 cond2 N21, 0)) goto Z0; |
| if (cond2 is <) |
| adj = STEP2 - 1; |
| else |
| adj = STEP2 + 1; |
| count2 = (adj + N22 - N21) / STEP2; |
| if (__builtin_expect (N12 cond1 N11, 0)) goto Z0; |
| if (cond1 is <) |
| adj = STEP1 - 1; |
| else |
| adj = STEP1 + 1; |
| count1 = (adj + N12 - N11) / STEP1; |
| count = count1 * count2 * count3; |
| goto Z1; |
| Z0: |
| count = 0; |
| Z1: |
| more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); |
| if (more) goto L0; else goto L3; |
| L0: |
| V = istart0; |
| T = V; |
| V3 = N31 + (T % count3) * STEP3; |
| T = T / count3; |
| V2 = N21 + (T % count2) * STEP2; |
| T = T / count2; |
| V1 = N11 + T * STEP1; |
| iend = iend0; |
| L1: |
| BODY; |
| V += 1; |
| if (V < iend) goto L10; else goto L2; |
| L10: |
| V3 += STEP3; |
| if (V3 cond3 N32) goto L1; else goto L11; |
| L11: |
| V3 = N31; |
| V2 += STEP2; |
| if (V2 cond2 N22) goto L1; else goto L12; |
| L12: |
| V2 = N21; |
| V1 += STEP1; |
| goto L1; |
| L2: |
| if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; |
| L3: |
| |
| */ |
| |
| static void |
| expand_omp_for_generic (struct omp_region *region, |
| struct omp_for_data *fd, |
| enum built_in_function start_fn, |
| enum built_in_function next_fn, |
| gimple *inner_stmt) |
| { |
| tree type, istart0, iend0, iend; |
| tree t, vmain, vback, bias = NULL_TREE; |
| basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; |
| basic_block l2_bb = NULL, l3_bb = NULL; |
| gimple_stmt_iterator gsi; |
| gassign *assign_stmt; |
| bool in_combined_parallel = is_combined_parallel (region); |
| bool broken_loop = region->cont == NULL; |
| edge e, ne; |
| tree *counts = NULL; |
| int i; |
| bool ordered_lastprivate = false; |
| |
| gcc_assert (!broken_loop || !in_combined_parallel); |
| gcc_assert (fd->iter_type == long_integer_type_node |
| || !in_combined_parallel); |
| |
| entry_bb = region->entry; |
| cont_bb = region->cont; |
| collapse_bb = NULL; |
| gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); |
| gcc_assert (broken_loop |
| || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); |
| l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); |
| l1_bb = single_succ (l0_bb); |
| if (!broken_loop) |
| { |
| l2_bb = create_empty_bb (cont_bb); |
| gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb |
| || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest |
| == l1_bb)); |
| gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); |
| } |
| else |
| l2_bb = NULL; |
| l3_bb = BRANCH_EDGE (entry_bb)->dest; |
| exit_bb = region->exit; |
| |
| gsi = gsi_last_bb (entry_bb); |
| |
| gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); |
| if (fd->ordered |
| && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi)), |
| OMP_CLAUSE_LASTPRIVATE)) |
| ordered_lastprivate = false; |
| if (fd->collapse > 1 || fd->ordered) |
| { |
| int first_zero_iter1 = -1, first_zero_iter2 = -1; |
| basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL; |
| |
| counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse); |
| expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, |
| zero_iter1_bb, first_zero_iter1, |
| zero_iter2_bb, first_zero_iter2, l2_dom_bb); |
| |
| if (zero_iter1_bb) |
| { |
| /* Some counts[i] vars might be uninitialized if |
| some loop has zero iterations. But the body shouldn't |
| be executed in that case, so just avoid uninit warnings. */ |
| for (i = first_zero_iter1; |
| i < (fd->ordered ? fd->ordered : fd->collapse); i++) |
| if (SSA_VAR_P (counts[i])) |
| TREE_NO_WARNING (counts[i]) = 1; |
| gsi_prev (&gsi); |
| e = split_block (entry_bb, gsi_stmt (gsi)); |
| entry_bb = e->dest; |
| make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU); |
| gsi = gsi_last_bb (entry_bb); |
| set_immediate_dominator (CDI_DOMINATORS, entry_bb, |
| get_immediate_dominator (CDI_DOMINATORS, |
| zero_iter1_bb)); |
| } |
| if (zero_iter2_bb) |
| { |
| /* Some counts[i] vars might be uninitialized if |
| some loop has zero iterations. But the body shouldn't |
| be executed in that case, so just avoid uninit warnings. */ |
| for (i = first_zero_iter2; i < fd->ordered; i++) |
| if (SSA_VAR_P (counts[i])) |
| TREE_NO_WARNING (counts[i]) = 1; |
| if (zero_iter1_bb) |
| make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); |
| else |
| { |
| gsi_prev (&gsi); |
| e = split_block (entry_bb, gsi_stmt (gsi)); |
| entry_bb = e->dest; |
| make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); |
| gsi = gsi_last_bb (entry_bb); |
| set_immediate_dominator (CDI_DOMINATORS, entry_bb, |
| get_immediate_dominator |
| (CDI_DOMINATORS, zero_iter2_bb)); |
| } |
| } |
| if (fd->collapse == 1) |
| { |
| counts[0] = fd->loop.n2; |
| fd->loop = fd->loops[0]; |
| } |
| } |
| |
| type = TREE_TYPE (fd->loop.v); |
| istart0 = create_tmp_var (fd->iter_type, ".istart0"); |
| iend0 = create_tmp_var (fd->iter_type, ".iend0"); |
| TREE_ADDRESSABLE (istart0) = 1; |
| TREE_ADDRESSABLE (iend0) = 1; |
| |
| /* See if we need to bias by LLONG_MIN. */ |
| if (fd->iter_type == long_long_unsigned_type_node |
| && TREE_CODE (type) == INTEGER_TYPE |
| && !TYPE_UNSIGNED (type) |
| && fd->ordered == 0) |
| { |
| tree n1, n2; |
| |
| if (fd->loop.cond_code == LT_EXPR) |
| { |
| n1 = fd->loop.n1; |
| n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); |
| } |
| else |
| { |
| n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); |
| n2 = fd->loop.n1; |
| } |
| if (TREE_CODE (n1) != INTEGER_CST |
| || TREE_CODE (n2) != INTEGER_CST |
| || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) |
| bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); |
| } |
| |
| gimple_stmt_iterator gsif = gsi; |
| gsi_prev (&gsif); |
| |
| tree arr = NULL_TREE; |
| if (in_combined_parallel) |
| { |
| gcc_assert (fd->ordered == 0); |
| /* In a combined parallel loop, emit a call to |
| GOMP_loop_foo_next. */ |
| t = build_call_expr (builtin_decl_explicit (next_fn), 2, |
| build_fold_addr_expr (istart0), |
| build_fold_addr_expr (iend0)); |
| } |
| else |
| { |
| tree t0, t1, t2, t3, t4; |
| /* If this is not a combined parallel loop, emit a call to |
| GOMP_loop_foo_start in ENTRY_BB. */ |
| t4 = build_fold_addr_expr (iend0); |
| t3 = build_fold_addr_expr (istart0); |
| if (fd->ordered) |
| { |
| t0 = build_int_cst (unsigned_type_node, |
| fd->ordered - fd->collapse + 1); |
| arr = create_tmp_var (build_array_type_nelts (fd->iter_type, |
| fd->ordered |
| - fd->collapse + 1), |
| ".omp_counts"); |
| DECL_NAMELESS (arr) = 1; |
| TREE_ADDRESSABLE (arr) = 1; |
| TREE_STATIC (arr) = 1; |
| vec<constructor_elt, va_gc> *v; |
| vec_alloc (v, fd->ordered - fd->collapse + 1); |
| int idx; |
| |
| for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++) |
| { |
| tree c; |
| if (idx == 0 && fd->collapse > 1) |
| c = fd->loop.n2; |
| else |
| c = counts[idx + fd->collapse - 1]; |
| tree purpose = size_int (idx); |
| CONSTRUCTOR_APPEND_ELT (v, purpose, c); |
| if (TREE_CODE (c) != INTEGER_CST) |
| TREE_STATIC (arr) = 0; |
| } |
| |
| DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v); |
| if (!TREE_STATIC (arr)) |
| force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR, |
| void_type_node, arr), |
| true, NULL_TREE, true, GSI_SAME_STMT); |
| t1 = build_fold_addr_expr (arr); |
| t2 = NULL_TREE; |
| } |
| else |
| { |
| t2 = fold_convert (fd->iter_type, fd->loop.step); |
| t1 = fd->loop.n2; |
| t0 = fd->loop.n1; |
| if (gimple_omp_for_combined_into_p (fd->for_stmt)) |
| { |
| tree innerc |
| = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| t0 = OMP_CLAUSE_DECL (innerc); |
| innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| t1 = OMP_CLAUSE_DECL (innerc); |
| } |
| if (POINTER_TYPE_P (TREE_TYPE (t0)) |
| && TYPE_PRECISION (TREE_TYPE (t0)) |
| != TYPE_PRECISION (fd->iter_type)) |
| { |
| /* Avoid casting pointers to integer of a different size. */ |
| tree itype = signed_type_for (type); |
| t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); |
| t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); |
| } |
| else |
| { |
| t1 = fold_convert (fd->iter_type, t1); |
| t0 = fold_convert (fd->iter_type, t0); |
| } |
| if (bias) |
| { |
| t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); |
| t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); |
| } |
| } |
| if (fd->iter_type == long_integer_type_node || fd->ordered) |
| { |
| if (fd->chunk_size) |
| { |
| t = fold_convert (fd->iter_type, fd->chunk_size); |
| t = omp_adjust_chunk_size (t, fd->simd_schedule); |
| if (fd->ordered) |
| t = build_call_expr (builtin_decl_explicit (start_fn), |
| 5, t0, t1, t, t3, t4); |
| else |
| t = build_call_expr (builtin_decl_explicit (start_fn), |
| 6, t0, t1, t2, t, t3, t4); |
| } |
| else if (fd->ordered) |
| t = build_call_expr (builtin_decl_explicit (start_fn), |
| 4, t0, t1, t3, t4); |
| else |
| t = build_call_expr (builtin_decl_explicit (start_fn), |
| 5, t0, t1, t2, t3, t4); |
| } |
| else |
| { |
| tree t5; |
| tree c_bool_type; |
| tree bfn_decl; |
| |
| /* The GOMP_loop_ull_*start functions have additional boolean |
| argument, true for < loops and false for > loops. |
| In Fortran, the C bool type can be different from |
| boolean_type_node. */ |
| bfn_decl = builtin_decl_explicit (start_fn); |
| c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); |
| t5 = build_int_cst (c_bool_type, |
| fd->loop.cond_code == LT_EXPR ? 1 : 0); |
| if (fd->chunk_size) |
| { |
| tree bfn_decl = builtin_decl_explicit (start_fn); |
| t = fold_convert (fd->iter_type, fd->chunk_size); |
| t = omp_adjust_chunk_size (t, fd->simd_schedule); |
| t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); |
| } |
| else |
| t = build_call_expr (builtin_decl_explicit (start_fn), |
| 6, t5, t0, t1, t2, t3, t4); |
| } |
| } |
| if (TREE_TYPE (t) != boolean_type_node) |
| t = fold_build2 (NE_EXPR, boolean_type_node, |
| t, build_int_cst (TREE_TYPE (t), 0)); |
| t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| true, GSI_SAME_STMT); |
| if (arr && !TREE_STATIC (arr)) |
| { |
| tree clobber = build_constructor (TREE_TYPE (arr), NULL); |
| TREE_THIS_VOLATILE (clobber) = 1; |
| gsi_insert_before (&gsi, gimple_build_assign (arr, clobber), |
| GSI_SAME_STMT); |
| } |
| gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); |
| |
| /* Remove the GIMPLE_OMP_FOR statement. */ |
| gsi_remove (&gsi, true); |
| |
| if (gsi_end_p (gsif)) |
| gsif = gsi_after_labels (gsi_bb (gsif)); |
| gsi_next (&gsif); |
| |
| /* Iteration setup for sequential loop goes in L0_BB. */ |
| tree startvar = fd->loop.v; |
| tree endvar = NULL_TREE; |
| |
| if (gimple_omp_for_combined_p (fd->for_stmt)) |
| { |
| gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR |
| && gimple_omp_for_kind (inner_stmt) |
| == GF_OMP_FOR_KIND_SIMD); |
| tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| startvar = OMP_CLAUSE_DECL (innerc); |
| innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), |
| OMP_CLAUSE__LOOPTEMP_); |
| gcc_assert (innerc); |
| endvar = OMP_CLAUSE_DECL (innerc); |
| } |
| |
| gsi = gsi_start_bb (l0_bb); |
| t = istart0; |
| if (fd->ordered && fd->collapse == 1) |
| t = fold_build2 (MULT_EXPR, fd->iter_type, t, |
| fold_convert (fd->iter_type, fd->loop.step)); |
| else if (bias) |
| t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); |
| if (fd->ordered && fd->collapse == 1) |
| { |
| if (POINTER_TYPE_P (TREE_TYPE (startvar))) |
| t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), |
| fd->loop.n1, fold_convert (sizetype, t)); |
| else |
| { |
| t = fold_convert (TREE_TYPE (startvar), t); |
| t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), |
| fd->loop.n1, t); |
| } |
| } |
| else |
| { |
| if (POINTER_TYPE_P (TREE_TYPE (startvar))) |
| t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); |
| t = fold_convert (TREE_TYPE (startvar), t); |
| } |
| t = force_gimple_operand_gsi (&gsi, t, |
| DECL_P (startvar) |
| && TREE_ADDRESSABLE (startvar), |
| NULL_TREE, false, GSI_CONTINUE_LINKING); |
| assign_stmt = gimple_build_assign (startvar, t); |
| gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); |
| |
| t = iend0; |
| if (fd->ordered && fd->collapse == 1) |
| t = fold_build2 (MULT_EXPR, fd->iter_type, t, |
| fold_convert (fd->iter_type, fd->loop.step)); |
| else if (bias) |
| t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); |
| if (fd->ordered && fd->collapse == 1) |
| { |
| if (POINTER_TYPE_P (TREE_TYPE (startvar))) |
| t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), |
| fd->loop.n1, fold_convert (sizetype, t)); |
| else |
| { |
| t = fold_convert (TREE_TYPE (startvar), t); |
| t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), |
| fd->loop.n1, t); |
| } |
| } |
| else |
| { |
| if (POINTER_TYPE_P (TREE_TYPE (startvar))) |
| t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); |
| t = fold_convert (TREE_TYPE (startvar), t); |
| } |
| iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| if (endvar) |
| { |
| assign_stmt = gimple_build_assign (endvar, iend); |
| gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); |
| if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend))) |
| assign_stmt = gimple_build_assign (fd->loop.v, iend); |
| else |
| assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend); |
| gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); |
| } |
| /* Handle linear clause adjustments. */ |
| tree itercnt = NULL_TREE; |
| if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) |
| for (tree c = gimple_omp_for_clauses (fd->for_stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR |
| && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) |
| { |
| tree d = OMP_CLAUSE_DECL (c); |
| bool is_ref = omp_is_reference (d); |
| tree t = d, a, dest; |
| if (is_ref) |
| t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); |
| tree type = TREE_TYPE (t); |
| if (POINTER_TYPE_P (type)) |
| type = sizetype; |
| dest = unshare_expr (t); |
| tree v = create_tmp_var (TREE_TYPE (t), NULL); |
| expand_omp_build_assign (&gsif, v, t); |
| if (itercnt == NULL_TREE) |
| { |
| itercnt = startvar; |
| tree n1 = fd->loop.n1; |
| if (POINTER_TYPE_P (TREE_TYPE (itercnt))) |
| { |
| itercnt |
| = fold_convert (signed_type_for (TREE_TYPE (itercnt)), |
| itercnt); |
| n1 = fold_convert (TREE_TYPE (itercnt), n1); |
| } |
| itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt), |
| itercnt, n1); |
| itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt), |
| itercnt, fd->loop.step); |
| itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, |
| NULL_TREE, false, |
| GSI_CONTINUE_LINKING); |
| } |
| a = fold_build2 (MULT_EXPR, type, |
| fold_convert (type, itercnt), |
| fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); |
| t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR |
| : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); |
| t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, |
| false, GSI_CONTINUE_LINKING); |
| assign_stmt = gimple_build_assign (dest, t); |
| gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); |
|