| /* Lowering pass for OMP directives. Converts OMP directives into explicit |
| calls to the runtime library (libgomp), data marshalling to implement data |
| sharing and copying clauses, offloading to accelerators, and more. |
| |
| Contributed by Diego Novillo <dnovillo@redhat.com> |
| |
| Copyright (C) 2005-2022 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "cgraph.h" |
| #include "pretty-print.h" |
| #include "diagnostic-core.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "internal-fn.h" |
| #include "gimple-fold.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "gimplify-me.h" |
| #include "gimple-walk.h" |
| #include "tree-iterator.h" |
| #include "tree-inline.h" |
| #include "langhooks.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "splay-tree.h" |
| #include "omp-general.h" |
| #include "omp-low.h" |
| #include "gimple-low.h" |
| #include "alloc-pool.h" |
| #include "symbol-summary.h" |
| #include "tree-nested.h" |
| #include "context.h" |
| #include "gomp-constants.h" |
| #include "gimple-pretty-print.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "omp-offload.h" |
| |
| /* Lowering of OMP parallel and workshare constructs proceeds in two |
| phases. The first phase scans the function looking for OMP statements |
| and then for variables that must be replaced to satisfy data sharing |
| clauses. The second phase expands code for the constructs, as well as |
| re-gimplifying things when variables have been replaced with complex |
| expressions. |
| |
| Final code generation is done by pass_expand_omp. The flowgraph is |
| scanned for regions which are then moved to a new |
| function, to be invoked by the thread library, or offloaded. */ |
| |
| /* Context structure. Used to store information about each parallel |
| directive in the code. */ |
| |
| struct omp_context |
| { |
| /* This field must be at the beginning, as we do "inheritance": Some |
| callback functions for tree-inline.cc (e.g., omp_copy_decl) |
| receive a copy_body_data pointer that is up-casted to an |
| omp_context pointer. */ |
| copy_body_data cb; |
| |
| /* The tree of contexts corresponding to the encountered constructs. */ |
| struct omp_context *outer; |
| gimple *stmt; |
| |
| /* Map variables to fields in a structure that allows communication |
| between sending and receiving threads. */ |
| splay_tree field_map; |
| tree record_type; |
| tree sender_decl; |
| tree receiver_decl; |
| |
| /* These are used just by task contexts, if task firstprivate fn is |
| needed. srecord_type is used to communicate from the thread |
| that encountered the task construct to task firstprivate fn, |
| record_type is allocated by GOMP_task, initialized by task firstprivate |
| fn and passed to the task body fn. */ |
| splay_tree sfield_map; |
| tree srecord_type; |
| |
| /* A chain of variables to add to the top-level block surrounding the |
| construct. In the case of a parallel, this is in the child function. */ |
| tree block_vars; |
| |
| /* Label to which GOMP_cancel{,llation_point} and explicit and implicit |
| barriers should jump to during omplower pass. */ |
| tree cancel_label; |
| |
| /* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL |
| otherwise. */ |
| gimple *simt_stmt; |
| |
| /* For task reductions registered in this context, a vector containing |
| the length of the private copies block (if constant, otherwise NULL) |
| and then offsets (if constant, otherwise NULL) for each entry. */ |
| vec<tree> task_reductions; |
| |
| /* A hash map from the reduction clauses to the registered array |
| elts. */ |
| hash_map<tree, unsigned> *task_reduction_map; |
| |
| /* And a hash map from the lastprivate(conditional:) variables to their |
| corresponding tracking loop iteration variables. */ |
| hash_map<tree, tree> *lastprivate_conditional_map; |
| |
| /* And a hash map from the allocate variables to their corresponding |
| allocators. */ |
| hash_map<tree, tree> *allocate_map; |
| |
| /* A tree_list of the reduction clauses in this context. This is |
| only used for checking the consistency of OpenACC reduction |
| clauses in scan_omp_for and is not guaranteed to contain a valid |
| value outside of this function. */ |
| tree local_reduction_clauses; |
| |
| /* A tree_list of the reduction clauses in outer contexts. This is |
| only used for checking the consistency of OpenACC reduction |
| clauses in scan_omp_for and is not guaranteed to contain a valid |
| value outside of this function. */ |
| tree outer_reduction_clauses; |
| |
| /* Nesting depth of this context. Used to beautify error messages re |
| invalid gotos. The outermost ctx is depth 1, with depth 0 being |
| reserved for the main body of the function. */ |
| int depth; |
| |
| /* True if this parallel directive is nested within another. */ |
| bool is_nested; |
| |
| /* True if this construct can be cancelled. */ |
| bool cancellable; |
| |
| /* True if lower_omp_1 should look up lastprivate conditional in parent |
| context. */ |
| bool combined_into_simd_safelen1; |
| |
| /* True if there is nested scan context with inclusive clause. */ |
| bool scan_inclusive; |
| |
| /* True if there is nested scan context with exclusive clause. */ |
| bool scan_exclusive; |
| |
| /* True in the second simd loop of for simd with inscan reductions. */ |
| bool for_simd_scan_phase; |
| |
| /* True if there is order(concurrent) clause on the construct. */ |
| bool order_concurrent; |
| |
| /* True if there is bind clause on the construct (i.e. a loop construct). */ |
| bool loop_p; |
| |
| /* Only used for omp target contexts. True if a teams construct is |
| strictly nested in it. */ |
| bool teams_nested_p; |
| |
| /* Only used for omp target contexts. True if an OpenMP construct other |
| than teams is strictly nested in it. */ |
| bool nonteams_nested_p; |
| |
| /* Candidates for adjusting OpenACC privatization level. */ |
| vec<tree> oacc_privatization_candidates; |
| }; |
| |
| static splay_tree all_contexts; |
| static int taskreg_nesting_level; |
| static int target_nesting_level; |
| static bitmap make_addressable_vars; |
| static bitmap global_nonaddressable_vars; |
| static vec<omp_context *> taskreg_contexts; |
| static vec<gomp_task *> task_cpyfns; |
| |
| static void scan_omp (gimple_seq *, omp_context *); |
| static tree scan_omp_1_op (tree *, int *, void *); |
| |
| #define WALK_SUBSTMTS \ |
| case GIMPLE_BIND: \ |
| case GIMPLE_TRY: \ |
| case GIMPLE_CATCH: \ |
| case GIMPLE_EH_FILTER: \ |
| case GIMPLE_TRANSACTION: \ |
| /* The sub-statements for these should be walked. */ \ |
| *handled_ops_p = false; \ |
| break; |
| |
| /* Return whether CTX represents an OpenACC 'parallel' or 'serial' construct. |
| (This doesn't include OpenACC 'kernels' decomposed parts.) */ |
| |
| static bool |
| is_oacc_parallel_or_serial (omp_context *ctx) |
| { |
| enum gimple_code outer_type = gimple_code (ctx->stmt); |
| return ((outer_type == GIMPLE_OMP_TARGET) |
| && ((gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_PARALLEL) |
| || (gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_SERIAL))); |
| } |
| |
| /* Return whether CTX represents an OpenACC 'kernels' construct. |
| (This doesn't include OpenACC 'kernels' decomposed parts.) */ |
| |
| static bool |
| is_oacc_kernels (omp_context *ctx) |
| { |
| enum gimple_code outer_type = gimple_code (ctx->stmt); |
| return ((outer_type == GIMPLE_OMP_TARGET) |
| && (gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_KERNELS)); |
| } |
| |
| /* Return whether CTX represents an OpenACC 'kernels' decomposed part. */ |
| |
| static bool |
| is_oacc_kernels_decomposed_part (omp_context *ctx) |
| { |
| enum gimple_code outer_type = gimple_code (ctx->stmt); |
| return ((outer_type == GIMPLE_OMP_TARGET) |
| && ((gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_PARALLELIZED) |
| || (gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_GANG_SINGLE) |
| || (gimple_omp_target_kind (ctx->stmt) |
| == GF_OMP_TARGET_KIND_OACC_DATA_KERNELS))); |
| } |
| |
| /* Return true if STMT corresponds to an OpenMP target region. */ |
| static bool |
| is_omp_target (gimple *stmt) |
| { |
| if (gimple_code (stmt) == GIMPLE_OMP_TARGET) |
| { |
| int kind = gimple_omp_target_kind (stmt); |
| return (kind == GF_OMP_TARGET_KIND_REGION |
| || kind == GF_OMP_TARGET_KIND_DATA |
| || kind == GF_OMP_TARGET_KIND_ENTER_DATA |
| || kind == GF_OMP_TARGET_KIND_EXIT_DATA); |
| } |
| return false; |
| } |
| |
| /* If DECL is the artificial dummy VAR_DECL created for non-static |
| data member privatization, return the underlying "this" parameter, |
| otherwise return NULL. */ |
| |
| tree |
| omp_member_access_dummy_var (tree decl) |
| { |
| if (!VAR_P (decl) |
| || !DECL_ARTIFICIAL (decl) |
| || !DECL_IGNORED_P (decl) |
| || !DECL_HAS_VALUE_EXPR_P (decl) |
| || !lang_hooks.decls.omp_disregard_value_expr (decl, false)) |
| return NULL_TREE; |
| |
| tree v = DECL_VALUE_EXPR (decl); |
| if (TREE_CODE (v) != COMPONENT_REF) |
| return NULL_TREE; |
| |
| while (1) |
| switch (TREE_CODE (v)) |
| { |
| case COMPONENT_REF: |
| case MEM_REF: |
| case INDIRECT_REF: |
| CASE_CONVERT: |
| case POINTER_PLUS_EXPR: |
| v = TREE_OPERAND (v, 0); |
| continue; |
| case PARM_DECL: |
| if (DECL_CONTEXT (v) == current_function_decl |
| && DECL_ARTIFICIAL (v) |
| && TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE) |
| return v; |
| return NULL_TREE; |
| default: |
| return NULL_TREE; |
| } |
| } |
| |
| /* Helper for unshare_and_remap, called through walk_tree. */ |
| |
| static tree |
| unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data) |
| { |
| tree *pair = (tree *) data; |
| if (*tp == pair[0]) |
| { |
| *tp = unshare_expr (pair[1]); |
| *walk_subtrees = 0; |
| } |
| else if (IS_TYPE_OR_DECL_P (*tp)) |
| *walk_subtrees = 0; |
| return NULL_TREE; |
| } |
| |
| /* Return unshare_expr (X) with all occurrences of FROM |
| replaced with TO. */ |
| |
| static tree |
| unshare_and_remap (tree x, tree from, tree to) |
| { |
| tree pair[2] = { from, to }; |
| x = unshare_expr (x); |
| walk_tree (&x, unshare_and_remap_1, pair, NULL); |
| return x; |
| } |
| |
| /* Convenience function for calling scan_omp_1_op on tree operands. */ |
| |
| static inline tree |
| scan_omp_op (tree *tp, omp_context *ctx) |
| { |
| struct walk_stmt_info wi; |
| |
| memset (&wi, 0, sizeof (wi)); |
| wi.info = ctx; |
| wi.want_locations = true; |
| |
| return walk_tree (tp, scan_omp_1_op, &wi, NULL); |
| } |
| |
| static void lower_omp (gimple_seq *, omp_context *); |
| static tree lookup_decl_in_outer_ctx (tree, omp_context *); |
| static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); |
| |
| /* Return true if CTX is for an omp parallel. */ |
| |
| static inline bool |
| is_parallel_ctx (omp_context *ctx) |
| { |
| return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL; |
| } |
| |
| |
| /* Return true if CTX is for an omp task. */ |
| |
| static inline bool |
| is_task_ctx (omp_context *ctx) |
| { |
| return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK; |
| } |
| |
| |
| /* Return true if CTX is for an omp taskloop. */ |
| |
| static inline bool |
| is_taskloop_ctx (omp_context *ctx) |
| { |
| return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR |
| && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP; |
| } |
| |
| |
| /* Return true if CTX is for a host omp teams. */ |
| |
| static inline bool |
| is_host_teams_ctx (omp_context *ctx) |
| { |
| return gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS |
| && gimple_omp_teams_host (as_a <gomp_teams *> (ctx->stmt)); |
| } |
| |
| /* Return true if CTX is for an omp parallel or omp task or host omp teams |
| (the last one is strictly not a task region in OpenMP speak, but we |
| need to treat it similarly). */ |
| |
| static inline bool |
| is_taskreg_ctx (omp_context *ctx) |
| { |
| return is_parallel_ctx (ctx) || is_task_ctx (ctx) || is_host_teams_ctx (ctx); |
| } |
| |
| /* Return true if EXPR is variable sized. */ |
| |
| static inline bool |
| is_variable_sized (const_tree expr) |
| { |
| return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); |
| } |
| |
| /* Lookup variables. The "maybe" form |
| allows for the variable form to not have been entered, otherwise we |
| assert that the variable must have been entered. */ |
| |
| static inline tree |
| lookup_decl (tree var, omp_context *ctx) |
| { |
| tree *n = ctx->cb.decl_map->get (var); |
| return *n; |
| } |
| |
| static inline tree |
| maybe_lookup_decl (const_tree var, omp_context *ctx) |
| { |
| tree *n = ctx->cb.decl_map->get (const_cast<tree> (var)); |
| return n ? *n : NULL_TREE; |
| } |
| |
| static inline tree |
| lookup_field (tree var, omp_context *ctx) |
| { |
| splay_tree_node n; |
| n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); |
| return (tree) n->value; |
| } |
| |
| static inline tree |
| lookup_sfield (splay_tree_key key, omp_context *ctx) |
| { |
| splay_tree_node n; |
| n = splay_tree_lookup (ctx->sfield_map |
| ? ctx->sfield_map : ctx->field_map, key); |
| return (tree) n->value; |
| } |
| |
| static inline tree |
| lookup_sfield (tree var, omp_context *ctx) |
| { |
| return lookup_sfield ((splay_tree_key) var, ctx); |
| } |
| |
| static inline tree |
| maybe_lookup_field (splay_tree_key key, omp_context *ctx) |
| { |
| splay_tree_node n; |
| n = splay_tree_lookup (ctx->field_map, key); |
| return n ? (tree) n->value : NULL_TREE; |
| } |
| |
| static inline tree |
| maybe_lookup_field (tree var, omp_context *ctx) |
| { |
| return maybe_lookup_field ((splay_tree_key) var, ctx); |
| } |
| |
| /* Return true if DECL should be copied by pointer. SHARED_CTX is |
| the parallel context if DECL is to be shared. */ |
| |
| static bool |
| use_pointer_for_field (tree decl, omp_context *shared_ctx) |
| { |
| if (AGGREGATE_TYPE_P (TREE_TYPE (decl)) |
| || TYPE_ATOMIC (TREE_TYPE (decl))) |
| return true; |
| |
| /* We can only use copy-in/copy-out semantics for shared variables |
| when we know the value is not accessible from an outer scope. */ |
| if (shared_ctx) |
| { |
| gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt)); |
| |
| /* ??? Trivially accessible from anywhere. But why would we even |
| be passing an address in this case? Should we simply assert |
| this to be false, or should we have a cleanup pass that removes |
| these from the list of mappings? */ |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, shared_ctx))) |
| return true; |
| |
| /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell |
| without analyzing the expression whether or not its location |
| is accessible to anyone else. In the case of nested parallel |
| regions it certainly may be. */ |
| if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) |
| return true; |
| |
| /* Do not use copy-in/copy-out for variables that have their |
| address taken. */ |
| if (is_global_var (decl)) |
| { |
| /* For file scope vars, track whether we've seen them as |
| non-addressable initially and in that case, keep the same |
| answer for the duration of the pass, even when they are made |
| addressable later on e.g. through reduction expansion. Global |
| variables which weren't addressable before the pass will not |
| have their privatized copies address taken. See PR91216. */ |
| if (!TREE_ADDRESSABLE (decl)) |
| { |
| if (!global_nonaddressable_vars) |
| global_nonaddressable_vars = BITMAP_ALLOC (NULL); |
| bitmap_set_bit (global_nonaddressable_vars, DECL_UID (decl)); |
| } |
| else if (!global_nonaddressable_vars |
| || !bitmap_bit_p (global_nonaddressable_vars, |
| DECL_UID (decl))) |
| return true; |
| } |
| else if (TREE_ADDRESSABLE (decl)) |
| return true; |
| |
| /* lower_send_shared_vars only uses copy-in, but not copy-out |
| for these. */ |
| if (TREE_READONLY (decl) |
| || ((TREE_CODE (decl) == RESULT_DECL |
| || TREE_CODE (decl) == PARM_DECL) |
| && DECL_BY_REFERENCE (decl))) |
| return false; |
| |
| /* Disallow copy-in/out in nested parallel if |
| decl is shared in outer parallel, otherwise |
| each thread could store the shared variable |
| in its own copy-in location, making the |
| variable no longer really shared. */ |
| if (shared_ctx->is_nested) |
| { |
| omp_context *up; |
| |
| for (up = shared_ctx->outer; up; up = up->outer) |
| if ((is_taskreg_ctx (up) |
| || (gimple_code (up->stmt) == GIMPLE_OMP_TARGET |
| && is_gimple_omp_offloaded (up->stmt))) |
| && maybe_lookup_decl (decl, up)) |
| break; |
| |
| if (up) |
| { |
| tree c; |
| |
| if (gimple_code (up->stmt) == GIMPLE_OMP_TARGET) |
| { |
| for (c = gimple_omp_target_clauses (up->stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && OMP_CLAUSE_DECL (c) == decl) |
| break; |
| } |
| else |
| for (c = gimple_omp_taskreg_clauses (up->stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED |
| && OMP_CLAUSE_DECL (c) == decl) |
| break; |
| |
| if (c) |
| goto maybe_mark_addressable_and_ret; |
| } |
| } |
| |
| /* For tasks avoid using copy-in/out. As tasks can be |
| deferred or executed in different thread, when GOMP_task |
| returns, the task hasn't necessarily terminated. */ |
| if (is_task_ctx (shared_ctx)) |
| { |
| tree outer; |
| maybe_mark_addressable_and_ret: |
| outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx); |
| if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer)) |
| { |
| /* Taking address of OUTER in lower_send_shared_vars |
| might need regimplification of everything that uses the |
| variable. */ |
| if (!make_addressable_vars) |
| make_addressable_vars = BITMAP_ALLOC (NULL); |
| bitmap_set_bit (make_addressable_vars, DECL_UID (outer)); |
| TREE_ADDRESSABLE (outer) = 1; |
| } |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| /* Construct a new automatic decl similar to VAR. */ |
| |
| static tree |
| omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) |
| { |
| tree copy = copy_var_decl (var, name, type); |
| |
| DECL_CONTEXT (copy) = current_function_decl; |
| |
| if (ctx) |
| { |
| DECL_CHAIN (copy) = ctx->block_vars; |
| ctx->block_vars = copy; |
| } |
| else |
| record_vars (copy); |
| |
| /* If VAR is listed in make_addressable_vars, it wasn't |
| originally addressable, but was only later made so. |
| We don't need to take address of privatizations |
| from that var. */ |
| if (TREE_ADDRESSABLE (var) |
| && ((make_addressable_vars |
| && bitmap_bit_p (make_addressable_vars, DECL_UID (var))) |
| || (global_nonaddressable_vars |
| && bitmap_bit_p (global_nonaddressable_vars, DECL_UID (var))))) |
| TREE_ADDRESSABLE (copy) = 0; |
| |
| return copy; |
| } |
| |
| static tree |
| omp_copy_decl_1 (tree var, omp_context *ctx) |
| { |
| return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); |
| } |
| |
| /* Build tree nodes to access the field for VAR on the receiver side. */ |
| |
| static tree |
| build_receiver_ref (tree var, bool by_ref, omp_context *ctx) |
| { |
| tree x, field = lookup_field (var, ctx); |
| |
| /* If the receiver record type was remapped in the child function, |
| remap the field into the new record type. */ |
| x = maybe_lookup_field (field, ctx); |
| if (x != NULL) |
| field = x; |
| |
| x = build_simple_mem_ref (ctx->receiver_decl); |
| TREE_THIS_NOTRAP (x) = 1; |
| x = omp_build_component_ref (x, field); |
| if (by_ref) |
| { |
| x = build_simple_mem_ref (x); |
| TREE_THIS_NOTRAP (x) = 1; |
| } |
| |
| return x; |
| } |
| |
| /* Build tree nodes to access VAR in the scope outer to CTX. In the case |
| of a parallel, this is a component reference; for workshare constructs |
| this is some variable. */ |
| |
| static tree |
| build_outer_var_ref (tree var, omp_context *ctx, |
| enum omp_clause_code code = OMP_CLAUSE_ERROR) |
| { |
| tree x; |
| omp_context *outer = ctx->outer; |
| for (; outer; outer = outer->outer) |
| { |
| if (gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP) |
| continue; |
| if (gimple_code (outer->stmt) == GIMPLE_OMP_SCOPE |
| && !maybe_lookup_decl (var, outer)) |
| continue; |
| break; |
| } |
| |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) |
| x = var; |
| else if (is_variable_sized (var)) |
| { |
| x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); |
| x = build_outer_var_ref (x, ctx, code); |
| x = build_simple_mem_ref (x); |
| } |
| else if (is_taskreg_ctx (ctx)) |
| { |
| bool by_ref = use_pointer_for_field (var, NULL); |
| x = build_receiver_ref (var, by_ref, ctx); |
| } |
| else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR |
| && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD) |
| || ctx->loop_p |
| || (code == OMP_CLAUSE_PRIVATE |
| && (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR |
| || gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS |
| || gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE))) |
| { |
| /* #pragma omp simd isn't a worksharing construct, and can reference |
| even private vars in its linear etc. clauses. |
| Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer |
| to private vars in all worksharing constructs. */ |
| x = NULL_TREE; |
| if (outer && is_taskreg_ctx (outer)) |
| x = lookup_decl (var, outer); |
| else if (outer) |
| x = maybe_lookup_decl_in_outer_ctx (var, ctx); |
| if (x == NULL_TREE) |
| x = var; |
| } |
| else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx)) |
| { |
| gcc_assert (outer); |
| splay_tree_node n |
| = splay_tree_lookup (outer->field_map, |
| (splay_tree_key) &DECL_UID (var)); |
| if (n == NULL) |
| { |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, outer))) |
| x = var; |
| else |
| x = lookup_decl (var, outer); |
| } |
| else |
| { |
| tree field = (tree) n->value; |
| /* If the receiver record type was remapped in the child function, |
| remap the field into the new record type. */ |
| x = maybe_lookup_field (field, outer); |
| if (x != NULL) |
| field = x; |
| |
| x = build_simple_mem_ref (outer->receiver_decl); |
| x = omp_build_component_ref (x, field); |
| if (use_pointer_for_field (var, outer)) |
| x = build_simple_mem_ref (x); |
| } |
| } |
| else if (outer) |
| x = lookup_decl (var, outer); |
| else if (omp_privatize_by_reference (var)) |
| /* This can happen with orphaned constructs. If var is reference, it is |
| possible it is shared and as such valid. */ |
| x = var; |
| else if (omp_member_access_dummy_var (var)) |
| x = var; |
| else |
| gcc_unreachable (); |
| |
| if (x == var) |
| { |
| tree t = omp_member_access_dummy_var (var); |
| if (t) |
| { |
| x = DECL_VALUE_EXPR (var); |
| tree o = maybe_lookup_decl_in_outer_ctx (t, ctx); |
| if (o != t) |
| x = unshare_and_remap (x, t, o); |
| else |
| x = unshare_expr (x); |
| } |
| } |
| |
| if (omp_privatize_by_reference (var)) |
| x = build_simple_mem_ref (x); |
| |
| return x; |
| } |
| |
| /* Build tree nodes to access the field for VAR on the sender side. */ |
| |
| static tree |
| build_sender_ref (splay_tree_key key, omp_context *ctx) |
| { |
| tree field = lookup_sfield (key, ctx); |
| return omp_build_component_ref (ctx->sender_decl, field); |
| } |
| |
| static tree |
| build_sender_ref (tree var, omp_context *ctx) |
| { |
| return build_sender_ref ((splay_tree_key) var, ctx); |
| } |
| |
| /* Add a new field for VAR inside the structure CTX->SENDER_DECL. If |
| BASE_POINTERS_RESTRICT, declare the field with restrict. */ |
| |
| static void |
| install_var_field (tree var, bool by_ref, int mask, omp_context *ctx) |
| { |
| tree field, type, sfield = NULL_TREE; |
| splay_tree_key key = (splay_tree_key) var; |
| |
| if ((mask & 16) != 0) |
| { |
| key = (splay_tree_key) &DECL_NAME (var); |
| gcc_checking_assert (key != (splay_tree_key) var); |
| } |
| if ((mask & 8) != 0) |
| { |
| key = (splay_tree_key) &DECL_UID (var); |
| gcc_checking_assert (key != (splay_tree_key) var); |
| } |
| gcc_assert ((mask & 1) == 0 |
| || !splay_tree_lookup (ctx->field_map, key)); |
| gcc_assert ((mask & 2) == 0 || !ctx->sfield_map |
| || !splay_tree_lookup (ctx->sfield_map, key)); |
| gcc_assert ((mask & 3) == 3 |
| || !is_gimple_omp_oacc (ctx->stmt)); |
| |
| type = TREE_TYPE (var); |
| if ((mask & 16) != 0) |
| type = lang_hooks.decls.omp_array_data (var, true); |
| |
| /* Prevent redeclaring the var in the split-off function with a restrict |
| pointer type. Note that we only clear type itself, restrict qualifiers in |
| the pointed-to type will be ignored by points-to analysis. */ |
| if (POINTER_TYPE_P (type) |
| && TYPE_RESTRICT (type)) |
| type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT); |
| |
| if (mask & 4) |
| { |
| gcc_assert (TREE_CODE (type) == ARRAY_TYPE); |
| type = build_pointer_type (build_pointer_type (type)); |
| } |
| else if (by_ref) |
| type = build_pointer_type (type); |
| else if ((mask & (32 | 3)) == 1 |
| && omp_privatize_by_reference (var)) |
| type = TREE_TYPE (type); |
| |
| field = build_decl (DECL_SOURCE_LOCATION (var), |
| FIELD_DECL, DECL_NAME (var), type); |
| |
| /* Remember what variable this field was created for. This does have a |
| side effect of making dwarf2out ignore this member, so for helpful |
| debugging we clear it later in delete_omp_context. */ |
| DECL_ABSTRACT_ORIGIN (field) = var; |
| if ((mask & 16) == 0 && type == TREE_TYPE (var)) |
| { |
| SET_DECL_ALIGN (field, DECL_ALIGN (var)); |
| DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var); |
| TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var); |
| } |
| else |
| SET_DECL_ALIGN (field, TYPE_ALIGN (type)); |
| |
| if ((mask & 3) == 3) |
| { |
| insert_field_into_struct (ctx->record_type, field); |
| if (ctx->srecord_type) |
| { |
| sfield = build_decl (DECL_SOURCE_LOCATION (var), |
| FIELD_DECL, DECL_NAME (var), type); |
| DECL_ABSTRACT_ORIGIN (sfield) = var; |
| SET_DECL_ALIGN (sfield, DECL_ALIGN (field)); |
| DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field); |
| TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field); |
| insert_field_into_struct (ctx->srecord_type, sfield); |
| } |
| } |
| else |
| { |
| if (ctx->srecord_type == NULL_TREE) |
| { |
| tree t; |
| |
| ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE); |
| ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); |
| for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) |
| { |
| sfield = build_decl (DECL_SOURCE_LOCATION (t), |
| FIELD_DECL, DECL_NAME (t), TREE_TYPE (t)); |
| DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t); |
| insert_field_into_struct (ctx->srecord_type, sfield); |
| splay_tree_insert (ctx->sfield_map, |
| (splay_tree_key) DECL_ABSTRACT_ORIGIN (t), |
| (splay_tree_value) sfield); |
| } |
| } |
| sfield = field; |
| insert_field_into_struct ((mask & 1) ? ctx->record_type |
| : ctx->srecord_type, field); |
| } |
| |
| if (mask & 1) |
| splay_tree_insert (ctx->field_map, key, (splay_tree_value) field); |
| if ((mask & 2) && ctx->sfield_map) |
| splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield); |
| } |
| |
| static tree |
| install_var_local (tree var, omp_context *ctx) |
| { |
| tree new_var = omp_copy_decl_1 (var, ctx); |
| insert_decl_map (&ctx->cb, var, new_var); |
| return new_var; |
| } |
| |
| /* Adjust the replacement for DECL in CTX for the new context. This means |
| copying the DECL_VALUE_EXPR, and fixing up the type. */ |
| |
| static void |
| fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) |
| { |
| tree new_decl, size; |
| |
| new_decl = lookup_decl (decl, ctx); |
| |
| TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); |
| |
| if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) |
| && DECL_HAS_VALUE_EXPR_P (decl)) |
| { |
| tree ve = DECL_VALUE_EXPR (decl); |
| walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL); |
| SET_DECL_VALUE_EXPR (new_decl, ve); |
| DECL_HAS_VALUE_EXPR_P (new_decl) = 1; |
| } |
| |
| if (!TREE_CONSTANT (DECL_SIZE (new_decl))) |
| { |
| size = remap_decl (DECL_SIZE (decl), &ctx->cb); |
| if (size == error_mark_node) |
| size = TYPE_SIZE (TREE_TYPE (new_decl)); |
| DECL_SIZE (new_decl) = size; |
| |
| size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); |
| if (size == error_mark_node) |
| size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); |
| DECL_SIZE_UNIT (new_decl) = size; |
| } |
| } |
| |
| /* The callback for remap_decl. Search all containing contexts for a |
| mapping of the variable; this avoids having to duplicate the splay |
| tree ahead of time. We know a mapping doesn't already exist in the |
| given context. Create new mappings to implement default semantics. */ |
| |
| static tree |
| omp_copy_decl (tree var, copy_body_data *cb) |
| { |
| omp_context *ctx = (omp_context *) cb; |
| tree new_var; |
| |
| if (TREE_CODE (var) == LABEL_DECL) |
| { |
| if (FORCED_LABEL (var) || DECL_NONLOCAL (var)) |
| return var; |
| new_var = create_artificial_label (DECL_SOURCE_LOCATION (var)); |
| DECL_CONTEXT (new_var) = current_function_decl; |
| insert_decl_map (&ctx->cb, var, new_var); |
| return new_var; |
| } |
| |
| while (!is_taskreg_ctx (ctx)) |
| { |
| ctx = ctx->outer; |
| if (ctx == NULL) |
| return var; |
| new_var = maybe_lookup_decl (var, ctx); |
| if (new_var) |
| return new_var; |
| } |
| |
| if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) |
| return var; |
| |
| return error_mark_node; |
| } |
| |
| /* Create a new context, with OUTER_CTX being the surrounding context. */ |
| |
| static omp_context * |
| new_omp_context (gimple *stmt, omp_context *outer_ctx) |
| { |
| omp_context *ctx = XCNEW (omp_context); |
| |
| splay_tree_insert (all_contexts, (splay_tree_key) stmt, |
| (splay_tree_value) ctx); |
| ctx->stmt = stmt; |
| |
| if (outer_ctx) |
| { |
| ctx->outer = outer_ctx; |
| ctx->cb = outer_ctx->cb; |
| ctx->cb.block = NULL; |
| ctx->depth = outer_ctx->depth + 1; |
| } |
| else |
| { |
| ctx->cb.src_fn = current_function_decl; |
| ctx->cb.dst_fn = current_function_decl; |
| ctx->cb.src_node = cgraph_node::get (current_function_decl); |
| gcc_checking_assert (ctx->cb.src_node); |
| ctx->cb.dst_node = ctx->cb.src_node; |
| ctx->cb.src_cfun = cfun; |
| ctx->cb.copy_decl = omp_copy_decl; |
| ctx->cb.eh_lp_nr = 0; |
| ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; |
| ctx->cb.adjust_array_error_bounds = true; |
| ctx->cb.dont_remap_vla_if_no_change = true; |
| ctx->depth = 1; |
| } |
| |
| ctx->cb.decl_map = new hash_map<tree, tree>; |
| |
| return ctx; |
| } |
| |
| static gimple_seq maybe_catch_exception (gimple_seq); |
| |
| /* Finalize task copyfn. */ |
| |
| static void |
| finalize_task_copyfn (gomp_task *task_stmt) |
| { |
| struct function *child_cfun; |
| tree child_fn; |
| gimple_seq seq = NULL, new_seq; |
| gbind *bind; |
| |
| child_fn = gimple_omp_task_copy_fn (task_stmt); |
| if (child_fn == NULL_TREE) |
| return; |
| |
| child_cfun = DECL_STRUCT_FUNCTION (child_fn); |
| DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; |
| |
| push_cfun (child_cfun); |
| bind = gimplify_body (child_fn, false); |
| gimple_seq_add_stmt (&seq, bind); |
| new_seq = maybe_catch_exception (seq); |
| if (new_seq != seq) |
| { |
| bind = gimple_build_bind (NULL, new_seq, NULL); |
| seq = NULL; |
| gimple_seq_add_stmt (&seq, bind); |
| } |
| gimple_set_body (child_fn, seq); |
| pop_cfun (); |
| |
| /* Inform the callgraph about the new function. */ |
| cgraph_node *node = cgraph_node::get_create (child_fn); |
| node->parallelized_function = 1; |
| cgraph_node::add_new_function (child_fn, false); |
| } |
| |
| /* Destroy a omp_context data structures. Called through the splay tree |
| value delete callback. */ |
| |
| static void |
| delete_omp_context (splay_tree_value value) |
| { |
| omp_context *ctx = (omp_context *) value; |
| |
| delete ctx->cb.decl_map; |
| |
| if (ctx->field_map) |
| splay_tree_delete (ctx->field_map); |
| if (ctx->sfield_map) |
| splay_tree_delete (ctx->sfield_map); |
| |
| /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before |
| it produces corrupt debug information. */ |
| if (ctx->record_type) |
| { |
| tree t; |
| for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t)) |
| DECL_ABSTRACT_ORIGIN (t) = NULL; |
| } |
| if (ctx->srecord_type) |
| { |
| tree t; |
| for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t)) |
| DECL_ABSTRACT_ORIGIN (t) = NULL; |
| } |
| |
| if (ctx->task_reduction_map) |
| { |
| ctx->task_reductions.release (); |
| delete ctx->task_reduction_map; |
| } |
| |
| delete ctx->lastprivate_conditional_map; |
| delete ctx->allocate_map; |
| |
| XDELETE (ctx); |
| } |
| |
| /* Fix up RECEIVER_DECL with a type that has been remapped to the child |
| context. */ |
| |
| static void |
| fixup_child_record_type (omp_context *ctx) |
| { |
| tree f, type = ctx->record_type; |
| |
| if (!ctx->receiver_decl) |
| return; |
| /* ??? It isn't sufficient to just call remap_type here, because |
| variably_modified_type_p doesn't work the way we expect for |
| record types. Testing each field for whether it needs remapping |
| and creating a new record by hand works, however. */ |
| for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f)) |
| if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) |
| break; |
| if (f) |
| { |
| tree name, new_fields = NULL; |
| |
| type = lang_hooks.types.make_type (RECORD_TYPE); |
| name = DECL_NAME (TYPE_NAME (ctx->record_type)); |
| name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl), |
| TYPE_DECL, name, type); |
| TYPE_NAME (type) = name; |
| |
| for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f)) |
| { |
| tree new_f = copy_node (f); |
| DECL_CONTEXT (new_f) = type; |
| TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); |
| DECL_CHAIN (new_f) = new_fields; |
| walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL); |
| walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, |
| &ctx->cb, NULL); |
| walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r, |
| &ctx->cb, NULL); |
| new_fields = new_f; |
| |
| /* Arrange to be able to look up the receiver field |
| given the sender field. */ |
| splay_tree_insert (ctx->field_map, (splay_tree_key) f, |
| (splay_tree_value) new_f); |
| } |
| TYPE_FIELDS (type) = nreverse (new_fields); |
| layout_type (type); |
| } |
| |
| /* In a target region we never modify any of the pointers in *.omp_data_i, |
| so attempt to help the optimizers. */ |
| if (is_gimple_omp_offloaded (ctx->stmt)) |
| type = build_qualified_type (type, TYPE_QUAL_CONST); |
| |
| TREE_TYPE (ctx->receiver_decl) |
| = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT); |
| } |
| |
| /* Instantiate decls as necessary in CTX to satisfy the data sharing |
| specified by CLAUSES. */ |
| |
| static void |
| scan_sharing_clauses (tree clauses, omp_context *ctx) |
| { |
| tree c, decl; |
| bool scan_array_reductions = false; |
| |
| for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE |
| && (OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) == NULL_TREE |
| /* omp_default_mem_alloc is 1 */ |
| || !integer_onep (OMP_CLAUSE_ALLOCATE_ALLOCATOR (c)) |
| || OMP_CLAUSE_ALLOCATE_ALIGN (c) != NULL_TREE)) |
| { |
| if (ctx->allocate_map == NULL) |
| ctx->allocate_map = new hash_map<tree, tree>; |
| tree val = integer_zero_node; |
| if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (c)) |
| val = OMP_CLAUSE_ALLOCATE_ALLOCATOR (c); |
| if (OMP_CLAUSE_ALLOCATE_ALIGN (c)) |
| val = build_tree_list (val, OMP_CLAUSE_ALLOCATE_ALIGN (c)); |
| ctx->allocate_map->put (OMP_CLAUSE_DECL (c), val); |
| } |
| |
| for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| bool by_ref; |
| |
| switch (OMP_CLAUSE_CODE (c)) |
| { |
| case OMP_CLAUSE_PRIVATE: |
| decl = OMP_CLAUSE_DECL (c); |
| if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) |
| goto do_private; |
| else if (!is_variable_sized (decl)) |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE_SHARED: |
| decl = OMP_CLAUSE_DECL (c); |
| if (ctx->allocate_map && ctx->allocate_map->get (decl)) |
| ctx->allocate_map->remove (decl); |
| /* Ignore shared directives in teams construct inside of |
| target construct. */ |
| if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS |
| && !is_host_teams_ctx (ctx)) |
| { |
| /* Global variables don't need to be copied, |
| the receiver side will use them directly. */ |
| tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx); |
| if (is_global_var (odecl)) |
| break; |
| insert_decl_map (&ctx->cb, decl, odecl); |
| break; |
| } |
| gcc_assert (is_taskreg_ctx (ctx)); |
| gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl)) |
| || !is_variable_sized (decl)); |
| /* Global variables don't need to be copied, |
| the receiver side will use them directly. */ |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) |
| break; |
| if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)) |
| { |
| use_pointer_for_field (decl, ctx); |
| break; |
| } |
| by_ref = use_pointer_for_field (decl, NULL); |
| if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c)) |
| || TREE_ADDRESSABLE (decl) |
| || by_ref |
| || omp_privatize_by_reference (decl)) |
| { |
| by_ref = use_pointer_for_field (decl, ctx); |
| install_var_field (decl, by_ref, 3, ctx); |
| install_var_local (decl, ctx); |
| break; |
| } |
| /* We don't need to copy const scalar vars back. */ |
| OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); |
| goto do_private; |
| |
| case OMP_CLAUSE_REDUCTION: |
| /* Collect 'reduction' clauses on OpenACC compute construct. */ |
| if (is_gimple_omp_oacc (ctx->stmt) |
| && is_gimple_omp_offloaded (ctx->stmt)) |
| { |
| /* No 'reduction' clauses on OpenACC 'kernels'. */ |
| gcc_checking_assert (!is_oacc_kernels (ctx)); |
| /* Likewise, on OpenACC 'kernels' decomposed parts. */ |
| gcc_checking_assert (!is_oacc_kernels_decomposed_part (ctx)); |
| |
| ctx->local_reduction_clauses |
| = tree_cons (NULL, c, ctx->local_reduction_clauses); |
| } |
| /* FALLTHRU */ |
| |
| case OMP_CLAUSE_IN_REDUCTION: |
| decl = OMP_CLAUSE_DECL (c); |
| if (ctx->allocate_map |
| && ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION |
| && (OMP_CLAUSE_REDUCTION_INSCAN (c) |
| || OMP_CLAUSE_REDUCTION_TASK (c))) |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION |
| || is_task_ctx (ctx))) |
| { |
| /* For now. */ |
| if (ctx->allocate_map->get (decl)) |
| ctx->allocate_map->remove (decl); |
| } |
| if (TREE_CODE (decl) == MEM_REF) |
| { |
| tree t = TREE_OPERAND (decl, 0); |
| if (TREE_CODE (t) == POINTER_PLUS_EXPR) |
| t = TREE_OPERAND (t, 0); |
| if (TREE_CODE (t) == INDIRECT_REF |
| || TREE_CODE (t) == ADDR_EXPR) |
| t = TREE_OPERAND (t, 0); |
| if (is_omp_target (ctx->stmt)) |
| { |
| if (is_variable_sized (t)) |
| { |
| gcc_assert (DECL_HAS_VALUE_EXPR_P (t)); |
| t = DECL_VALUE_EXPR (t); |
| gcc_assert (TREE_CODE (t) == INDIRECT_REF); |
| t = TREE_OPERAND (t, 0); |
| gcc_assert (DECL_P (t)); |
| } |
| tree at = t; |
| if (ctx->outer) |
| scan_omp_op (&at, ctx->outer); |
| tree nt = omp_copy_decl_1 (at, ctx->outer); |
| splay_tree_insert (ctx->field_map, |
| (splay_tree_key) &DECL_CONTEXT (t), |
| (splay_tree_value) nt); |
| if (at != t) |
| splay_tree_insert (ctx->field_map, |
| (splay_tree_key) &DECL_CONTEXT (at), |
| (splay_tree_value) nt); |
| break; |
| } |
| install_var_local (t, ctx); |
| if (is_taskreg_ctx (ctx) |
| && (!is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx)) |
| || (is_task_ctx (ctx) |
| && (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE |
| || (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE |
| && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) |
| == POINTER_TYPE))))) |
| && !is_variable_sized (t) |
| && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION |
| || (!OMP_CLAUSE_REDUCTION_TASK (c) |
| && !is_task_ctx (ctx)))) |
| { |
| by_ref = use_pointer_for_field (t, NULL); |
| if (is_task_ctx (ctx) |
| && TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE |
| && TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == POINTER_TYPE) |
| { |
| install_var_field (t, false, 1, ctx); |
| install_var_field (t, by_ref, 2, ctx); |
| } |
| else |
| install_var_field (t, by_ref, 3, ctx); |
| } |
| break; |
| } |
| if (is_omp_target (ctx->stmt)) |
| { |
| tree at = decl; |
| if (ctx->outer) |
| scan_omp_op (&at, ctx->outer); |
| tree nt = omp_copy_decl_1 (at, ctx->outer); |
| splay_tree_insert (ctx->field_map, |
| (splay_tree_key) &DECL_CONTEXT (decl), |
| (splay_tree_value) nt); |
| if (at != decl) |
| splay_tree_insert (ctx->field_map, |
| (splay_tree_key) &DECL_CONTEXT (at), |
| (splay_tree_value) nt); |
| break; |
| } |
| if (is_task_ctx (ctx) |
| || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION |
| && OMP_CLAUSE_REDUCTION_TASK (c) |
| && is_parallel_ctx (ctx))) |
| { |
| /* Global variables don't need to be copied, |
| the receiver side will use them directly. */ |
| if (!is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) |
| { |
| by_ref = use_pointer_for_field (decl, ctx); |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION) |
| install_var_field (decl, by_ref, 3, ctx); |
| } |
| install_var_local (decl, ctx); |
| break; |
| } |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION |
| && OMP_CLAUSE_REDUCTION_TASK (c)) |
| { |
| install_var_local (decl, ctx); |
| break; |
| } |
| goto do_private; |
| |
| case OMP_CLAUSE_LASTPRIVATE: |
| /* Let the corresponding firstprivate clause create |
| the variable. */ |
| if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) |
| break; |
| /* FALLTHRU */ |
| |
| case OMP_CLAUSE_FIRSTPRIVATE: |
| case OMP_CLAUSE_LINEAR: |
| decl = OMP_CLAUSE_DECL (c); |
| do_private: |
| if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_HAS_DEVICE_ADDR) |
| && is_gimple_omp_offloaded (ctx->stmt)) |
| { |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) |
| { |
| by_ref = !omp_privatize_by_reference (decl); |
| install_var_field (decl, by_ref, 3, ctx); |
| } |
| else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_HAS_DEVICE_ADDR) |
| { |
| if (TREE_CODE (decl) == INDIRECT_REF) |
| decl = TREE_OPERAND (decl, 0); |
| install_var_field (decl, true, 3, ctx); |
| } |
| else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| install_var_field (decl, true, 3, ctx); |
| else |
| install_var_field (decl, false, 3, ctx); |
| } |
| if (is_variable_sized (decl)) |
| { |
| if (is_task_ctx (ctx)) |
| { |
| if (ctx->allocate_map |
| && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) |
| { |
| /* For now. */ |
| if (ctx->allocate_map->get (decl)) |
| ctx->allocate_map->remove (decl); |
| } |
| install_var_field (decl, false, 1, ctx); |
| } |
| break; |
| } |
| else if (is_taskreg_ctx (ctx)) |
| { |
| bool global |
| = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)); |
| by_ref = use_pointer_for_field (decl, NULL); |
| |
| if (is_task_ctx (ctx) |
| && (global || by_ref || omp_privatize_by_reference (decl))) |
| { |
| if (ctx->allocate_map |
| && ctx->allocate_map->get (decl)) |
| install_var_field (decl, by_ref, 32 | 1, ctx); |
| else |
| install_var_field (decl, false, 1, ctx); |
| if (!global) |
| install_var_field (decl, by_ref, 2, ctx); |
| } |
| else if (!global) |
| install_var_field (decl, by_ref, 3, ctx); |
| } |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE_USE_DEVICE_PTR: |
| case OMP_CLAUSE_USE_DEVICE_ADDR: |
| decl = OMP_CLAUSE_DECL (c); |
| |
| /* Fortran array descriptors. */ |
| if (lang_hooks.decls.omp_array_data (decl, true)) |
| install_var_field (decl, false, 19, ctx); |
| else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR |
| && !omp_privatize_by_reference (decl) |
| && !omp_is_allocatable_or_ptr (decl)) |
| || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| install_var_field (decl, true, 11, ctx); |
| else |
| install_var_field (decl, false, 11, ctx); |
| if (DECL_SIZE (decl) |
| && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) |
| { |
| tree decl2 = DECL_VALUE_EXPR (decl); |
| gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); |
| decl2 = TREE_OPERAND (decl2, 0); |
| gcc_assert (DECL_P (decl2)); |
| install_var_local (decl2, ctx); |
| } |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE_HAS_DEVICE_ADDR: |
| decl = OMP_CLAUSE_DECL (c); |
| while (TREE_CODE (decl) == INDIRECT_REF |
| || TREE_CODE (decl) == ARRAY_REF) |
| decl = TREE_OPERAND (decl, 0); |
| goto do_private; |
| |
| case OMP_CLAUSE_IS_DEVICE_PTR: |
| decl = OMP_CLAUSE_DECL (c); |
| goto do_private; |
| |
| case OMP_CLAUSE__LOOPTEMP_: |
| case OMP_CLAUSE__REDUCTEMP_: |
| gcc_assert (is_taskreg_ctx (ctx)); |
| decl = OMP_CLAUSE_DECL (c); |
| install_var_field (decl, false, 3, ctx); |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE_COPYPRIVATE: |
| case OMP_CLAUSE_COPYIN: |
| decl = OMP_CLAUSE_DECL (c); |
| by_ref = use_pointer_for_field (decl, NULL); |
| install_var_field (decl, by_ref, 3, ctx); |
| break; |
| |
| case OMP_CLAUSE_FINAL: |
| case OMP_CLAUSE_IF: |
| case OMP_CLAUSE_NUM_THREADS: |
| case OMP_CLAUSE_NUM_TEAMS: |
| case OMP_CLAUSE_THREAD_LIMIT: |
| case OMP_CLAUSE_DEVICE: |
| case OMP_CLAUSE_SCHEDULE: |
| case OMP_CLAUSE_DIST_SCHEDULE: |
| case OMP_CLAUSE_DEPEND: |
| case OMP_CLAUSE_PRIORITY: |
| case OMP_CLAUSE_GRAINSIZE: |
| case OMP_CLAUSE_NUM_TASKS: |
| case OMP_CLAUSE_NUM_GANGS: |
| case OMP_CLAUSE_NUM_WORKERS: |
| case OMP_CLAUSE_VECTOR_LENGTH: |
| case OMP_CLAUSE_DETACH: |
| case OMP_CLAUSE_FILTER: |
| if (ctx->outer) |
| scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); |
| break; |
| |
| case OMP_CLAUSE_TO: |
| case OMP_CLAUSE_FROM: |
| case OMP_CLAUSE_MAP: |
| if (ctx->outer) |
| scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer); |
| decl = OMP_CLAUSE_DECL (c); |
| /* If requested, make 'decl' addressable. */ |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && OMP_CLAUSE_MAP_DECL_MAKE_ADDRESSABLE (c)) |
| { |
| gcc_checking_assert (DECL_P (decl)); |
| |
| bool decl_addressable = TREE_ADDRESSABLE (decl); |
| if (!decl_addressable) |
| { |
| if (!make_addressable_vars) |
| make_addressable_vars = BITMAP_ALLOC (NULL); |
| bitmap_set_bit (make_addressable_vars, DECL_UID (decl)); |
| TREE_ADDRESSABLE (decl) = 1; |
| } |
| |
| if (dump_enabled_p ()) |
| { |
| location_t loc = OMP_CLAUSE_LOCATION (c); |
| const dump_user_location_t d_u_loc |
| = dump_user_location_t::from_location_t (loc); |
| /* PR100695 "Format decoder, quoting in 'dump_printf' etc." */ |
| #if __GNUC__ >= 10 |
| # pragma GCC diagnostic push |
| # pragma GCC diagnostic ignored "-Wformat" |
| #endif |
| if (!decl_addressable) |
| dump_printf_loc (MSG_NOTE, d_u_loc, |
| "variable %<%T%>" |
| " made addressable\n", |
| decl); |
| else |
| dump_printf_loc (MSG_NOTE, d_u_loc, |
| "variable %<%T%>" |
| " already made addressable\n", |
| decl); |
| #if __GNUC__ >= 10 |
| # pragma GCC diagnostic pop |
| #endif |
| } |
| |
| /* Done. */ |
| OMP_CLAUSE_MAP_DECL_MAKE_ADDRESSABLE (c) = 0; |
| } |
| /* Global variables with "omp declare target" attribute |
| don't need to be copied, the receiver side will use them |
| directly. However, global variables with "omp declare target link" |
| attribute need to be copied. Or when ALWAYS modifier is used. */ |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && DECL_P (decl) |
| && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER |
| && (OMP_CLAUSE_MAP_KIND (c) |
| != GOMP_MAP_FIRSTPRIVATE_REFERENCE) |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH) |
| || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TO |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_FROM |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TOFROM |
| && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET |
| && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)) |
| && varpool_node::get_create (decl)->offloadable |
| && !lookup_attribute ("omp declare target link", |
| DECL_ATTRIBUTES (decl))) |
| break; |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) |
| { |
| /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are |
| not offloaded; there is nothing to map for those. */ |
| if (!is_gimple_omp_offloaded (ctx->stmt) |
| && !POINTER_TYPE_P (TREE_TYPE (decl)) |
| && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)) |
| break; |
| } |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && DECL_P (decl) |
| && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH |
| || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH) |
| && is_omp_target (ctx->stmt)) |
| { |
| /* If this is an offloaded region, an attach operation should |
| only exist when the pointer variable is mapped in a prior |
| clause. */ |
| if (is_gimple_omp_offloaded (ctx->stmt)) |
| gcc_assert |
| (maybe_lookup_decl (decl, ctx) |
| || (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)) |
| && lookup_attribute ("omp declare target", |
| DECL_ATTRIBUTES (decl)))); |
| |
| /* By itself, attach/detach is generated as part of pointer |
| variable mapping and should not create new variables in the |
| offloaded region, however sender refs for it must be created |
| for its address to be passed to the runtime. */ |
| tree field |
| = build_decl (OMP_CLAUSE_LOCATION (c), |
| FIELD_DECL, NULL_TREE, ptr_type_node); |
| SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node)); |
| insert_field_into_struct (ctx->record_type, field); |
| /* To not clash with a map of the pointer variable itself, |
| attach/detach maps have their field looked up by the *clause* |
| tree expression, not the decl. */ |
| gcc_assert (!splay_tree_lookup (ctx->field_map, |
| (splay_tree_key) c)); |
| splay_tree_insert (ctx->field_map, (splay_tree_key) c, |
| (splay_tree_value) field); |
| break; |
| } |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER |
| || (OMP_CLAUSE_MAP_KIND (c) |
| == GOMP_MAP_FIRSTPRIVATE_REFERENCE))) |
| { |
| if (TREE_CODE (decl) == COMPONENT_REF |
| || (TREE_CODE (decl) == INDIRECT_REF |
| && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF |
| && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0))) |
| == REFERENCE_TYPE))) |
| break; |
| if (DECL_SIZE (decl) |
| && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) |
| { |
| tree decl2 = DECL_VALUE_EXPR (decl); |
| gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); |
| decl2 = TREE_OPERAND (decl2, 0); |
| gcc_assert (DECL_P (decl2)); |
| install_var_local (decl2, ctx); |
| } |
| install_var_local (decl, ctx); |
| break; |
| } |
| if (DECL_P (decl)) |
| { |
| if (DECL_SIZE (decl) |
| && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) |
| { |
| tree decl2 = DECL_VALUE_EXPR (decl); |
| gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); |
| decl2 = TREE_OPERAND (decl2, 0); |
| gcc_assert (DECL_P (decl2)); |
| install_var_field (decl2, true, 3, ctx); |
| install_var_local (decl2, ctx); |
| install_var_local (decl, ctx); |
| } |
| else |
| { |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP |
| && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER |
| && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) |
| && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| install_var_field (decl, true, 7, ctx); |
| else |
| install_var_field (decl, true, 3, ctx); |
| if (is_gimple_omp_offloaded (ctx->stmt) |
| && !(is_gimple_omp_oacc (ctx->stmt) |
| && OMP_CLAUSE_MAP_IN_REDUCTION (c))) |
| install_var_local (decl, ctx); |
| } |
| } |
| else |
| { |
| tree base = get_base_address (decl); |
| tree nc = OMP_CLAUSE_CHAIN (c); |
| if (DECL_P (base) |
| && nc != NULL_TREE |
| && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP |
| && OMP_CLAUSE_DECL (nc) == base |
| && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER |
| && integer_zerop (OMP_CLAUSE_SIZE (nc))) |
| { |
| OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1; |
| OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1; |
| } |
| else |
| { |
| if (ctx->outer) |
| { |
| scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer); |
| decl = OMP_CLAUSE_DECL (c); |
| } |
| gcc_assert (!splay_tree_lookup (ctx->field_map, |
| (splay_tree_key) decl)); |
| tree field |
| = build_decl (OMP_CLAUSE_LOCATION (c), |
| FIELD_DECL, NULL_TREE, ptr_type_node); |
| SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node)); |
| insert_field_into_struct (ctx->record_type, field); |
| splay_tree_insert (ctx->field_map, (splay_tree_key) decl, |
| (splay_tree_value) field); |
| } |
| } |
| break; |
| |
| case OMP_CLAUSE_ORDER: |
| ctx->order_concurrent = true; |
| break; |
| |
| case OMP_CLAUSE_BIND: |
| ctx->loop_p = true; |
| break; |
| |
| case OMP_CLAUSE_NOWAIT: |
| case OMP_CLAUSE_ORDERED: |
| case OMP_CLAUSE_COLLAPSE: |
| case OMP_CLAUSE_UNTIED: |
| case OMP_CLAUSE_MERGEABLE: |
| case OMP_CLAUSE_PROC_BIND: |
| case OMP_CLAUSE_SAFELEN: |
| case OMP_CLAUSE_SIMDLEN: |
| case OMP_CLAUSE_THREADS: |
| case OMP_CLAUSE_SIMD: |
| case OMP_CLAUSE_NOGROUP: |
| case OMP_CLAUSE_DEFAULTMAP: |
| case OMP_CLAUSE_ASYNC: |
| case OMP_CLAUSE_WAIT: |
| case OMP_CLAUSE_GANG: |
| case OMP_CLAUSE_WORKER: |
| case OMP_CLAUSE_VECTOR: |
| case OMP_CLAUSE_INDEPENDENT: |
| case OMP_CLAUSE_AUTO: |
| case OMP_CLAUSE_SEQ: |
| case OMP_CLAUSE_TILE: |
| case OMP_CLAUSE__SIMT_: |
| case OMP_CLAUSE_DEFAULT: |
| case OMP_CLAUSE_NONTEMPORAL: |
| case OMP_CLAUSE_IF_PRESENT: |
| case OMP_CLAUSE_FINALIZE: |
| case OMP_CLAUSE_TASK_REDUCTION: |
| case OMP_CLAUSE_ALLOCATE: |
| break; |
| |
| case OMP_CLAUSE_ALIGNED: |
| decl = OMP_CLAUSE_DECL (c); |
| if (is_global_var (decl) |
| && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE__CONDTEMP_: |
| decl = OMP_CLAUSE_DECL (c); |
| if (is_parallel_ctx (ctx)) |
| { |
| install_var_field (decl, false, 3, ctx); |
| install_var_local (decl, ctx); |
| } |
| else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR |
| && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD |
| && !OMP_CLAUSE__CONDTEMP__ITER (c)) |
| install_var_local (decl, ctx); |
| break; |
| |
| case OMP_CLAUSE__CACHE_: |
| case OMP_CLAUSE_NOHOST: |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| switch (OMP_CLAUSE_CODE (c)) |
| { |
| case OMP_CLAUSE_LASTPRIVATE: |
| /* Let the corresponding firstprivate clause create |
| the variable. */ |
| if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) |
| scan_array_reductions = true; |
| if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) |
| break; |
| /* FALLTHRU */ |
| |
| case OMP_CLAUSE_FIRSTPRIVATE: |
| case OMP_CLAUSE_PRIVATE: |
| case OMP_CLAUSE_LINEAR: |
| case OMP_CLAUSE_HAS_DEVICE_ADDR: |
| case OMP_CLAUSE_IS_DEVICE_PTR: |
| decl = OMP_CLAUSE_DECL (c); |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_HAS_DEVICE_ADDR) |
| { |
| while (TREE_CODE (decl) == INDIRECT_REF |
| || TREE_CODE (decl) == ARRAY_REF) |
| decl = TREE_OPERAND (decl, 0); |
| } |
| |
| if (is_variable_sized (decl)) |
| { |
| if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_HAS_DEVICE_ADDR) |
| && is_gimple_omp_offloaded (ctx->stmt)) |
| { |
| tree decl2 = DECL_VALUE_EXPR (decl); |
| gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); |
| decl2 = TREE_OPERAND (decl2, 0); |
| gcc_assert (DECL_P (decl2)); |
| install_var_local (decl2, ctx); |
| fixup_remapped_decl (decl2, ctx, false); |
| } |
| install_var_local (decl, ctx); |
| } |
| fixup_remapped_decl (decl, ctx, |
| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE |
| && OMP_CLAUSE_PRIVATE_DEBUG (c)); |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR |
| && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)) |
| scan_array_reductions = true; |
| break; |
| |
| case OMP_CLAUSE_REDUCTION: |
| case OMP_CLAUSE_IN_REDUCTION: |
| decl = OMP_CLAUSE_DECL (c); |
| if (TREE_CODE (decl) != MEM_REF && !is_omp_target (ctx->stmt)) |
| { |
| if (is_variable_sized (decl)) |
| install_var_local (decl, ctx); |
| fixup_remapped_decl (decl, ctx, false); |
| } |
| if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) |
| scan_array_reductions = true; |
| break; |
| |
| case OMP_CLAUSE_TASK_REDUCTION: |
| if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) |
| scan_array_reductions = true; |
| break; |
| |
| case OMP_CLAUSE_SHARED: |
| /* Ignore shared directives in teams construct inside of |
| target construct. */ |
| if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS |
| && !is_host_teams_ctx (ctx)) |
| break; |
| decl = OMP_CLAUSE_DECL (c); |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) |
| break; |
| if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)) |
| { |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, |
| ctx->outer))) |
| break; |
| bool by_ref = use_pointer_for_field (decl, ctx); |
| install_var_field (decl, by_ref, 11, ctx); |
| break; |
| } |
| fixup_remapped_decl (decl, ctx, false); |
| break; |
| |
| case OMP_CLAUSE_MAP: |
| if (!is_gimple_omp_offloaded (ctx->stmt)) |
| break; |
| decl = OMP_CLAUSE_DECL (c); |
| if (DECL_P (decl) |
| && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER |
| && (OMP_CLAUSE_MAP_KIND (c) |
| != GOMP_MAP_FIRSTPRIVATE_REFERENCE)) |
| || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) |
| && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)) |
| && varpool_node::get_create (decl)->offloadable) |
| break; |
| if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH |
| || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH) |
| && is_omp_target (ctx->stmt) |
| && !is_gimple_omp_offloaded (ctx->stmt)) |
| break; |
| if (DECL_P (decl)) |
| { |
| if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER |
| || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) |
| && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE |
| && !COMPLETE_TYPE_P (TREE_TYPE (decl))) |
| { |
| tree new_decl = lookup_decl (decl, ctx); |
| TREE_TYPE (new_decl) |
| = remap_type (TREE_TYPE (decl), &ctx->cb); |
| } |
| else if (DECL_SIZE (decl) |
| && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) |
| { |
| tree decl2 = DECL_VALUE_EXPR (decl); |
| gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); |
| decl2 = TREE_OPERAND (decl2, 0); |
| gcc_assert (DECL_P (decl2)); |
| fixup_remapped_decl (decl2, ctx, false); |
| fixup_remapped_decl (decl, ctx, true); |
| } |
| else |
| fixup_remapped_decl (decl, ctx, false); |
| } |
| break; |
| |
| case OMP_CLAUSE_COPYPRIVATE: |
| case OMP_CLAUSE_COPYIN: |
| case OMP_CLAUSE_DEFAULT: |
| case OMP_CLAUSE_IF: |
| case OMP_CLAUSE_NUM_THREADS: |
| case OMP_CLAUSE_NUM_TEAMS: |
| case OMP_CLAUSE_THREAD_LIMIT: |
| case OMP_CLAUSE_DEVICE: |
| case OMP_CLAUSE_SCHEDULE: |
| case OMP_CLAUSE_DIST_SCHEDULE: |
| case OMP_CLAUSE_NOWAIT: |
| case OMP_CLAUSE_ORDERED: |
| case OMP_CLAUSE_COLLAPSE: |
| case OMP_CLAUSE_UNTIED: |
| case OMP_CLAUSE_FINAL: |
| case OMP_CLAUSE_MERGEABLE: |
| case OMP_CLAUSE_PROC_BIND: |
| case OMP_CLAUSE_SAFELEN: |
| case OMP_CLAUSE_SIMDLEN: |
| case OMP_CLAUSE_ALIGNED: |
| case OMP_CLAUSE_DEPEND: |
| case OMP_CLAUSE_DETACH: |
| case OMP_CLAUSE_ALLOCATE: |
| case OMP_CLAUSE__LOOPTEMP_: |
| case OMP_CLAUSE__REDUCTEMP_: |
| case OMP_CLAUSE_TO: |
| case OMP_CLAUSE_FROM: |
| case OMP_CLAUSE_PRIORITY: |
| case OMP_CLAUSE_GRAINSIZE: |
| case OMP_CLAUSE_NUM_TASKS: |
| case OMP_CLAUSE_THREADS: |
| case OMP_CLAUSE_SIMD: |
| case OMP_CLAUSE_NOGROUP: |
| case OMP_CLAUSE_DEFAULTMAP: |
| case OMP_CLAUSE_ORDER: |
| case OMP_CLAUSE_BIND: |
| case OMP_CLAUSE_USE_DEVICE_PTR: |
| case OMP_CLAUSE_USE_DEVICE_ADDR: |
| case OMP_CLAUSE_NONTEMPORAL: |
| case OMP_CLAUSE_ASYNC: |
| case OMP_CLAUSE_WAIT: |
| case OMP_CLAUSE_NUM_GANGS: |
| case OMP_CLAUSE_NUM_WORKERS: |
| case OMP_CLAUSE_VECTOR_LENGTH: |
| case OMP_CLAUSE_GANG: |
| case OMP_CLAUSE_WORKER: |
| case OMP_CLAUSE_VECTOR: |
| case OMP_CLAUSE_INDEPENDENT: |
| case OMP_CLAUSE_AUTO: |
| case OMP_CLAUSE_SEQ: |
| case OMP_CLAUSE_TILE: |
| case OMP_CLAUSE__SIMT_: |
| case OMP_CLAUSE_IF_PRESENT: |
| case OMP_CLAUSE_FINALIZE: |
| case OMP_CLAUSE_FILTER: |
| case OMP_CLAUSE__CONDTEMP_: |
| break; |
| |
| case OMP_CLAUSE__CACHE_: |
| case OMP_CLAUSE_NOHOST: |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| gcc_checking_assert (!scan_array_reductions |
| || !is_gimple_omp_oacc (ctx->stmt)); |
| if (scan_array_reductions) |
| { |
| for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) |
| if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION |
| || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) |
| && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) |
| { |
| omp_context *rctx = ctx; |
| if (is_omp_target (ctx->stmt)) |
| rctx = ctx->outer; |
| scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), rctx); |
| scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), rctx); |
| } |
| else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE |
| && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) |
| scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx); |
| else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR |
| && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)) |
| scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx); |
| } |
| } |
| |
| /* Create a new name for omp child function. Returns an identifier. */ |
| |
| static tree |
| create_omp_child_function_name (bool task_copy) |
| { |
| return clone_function_name_numbered (current_function_decl, |
| task_copy ? "_omp_cpyfn" : "_omp_fn"); |
| } |
| |
| /* Return true if CTX may belong to offloaded code: either if current function |
| is offloaded, or any enclosing context corresponds to a target region. */ |
| |
| static bool |
| omp_maybe_offloaded_ctx (omp_context *ctx) |
| { |
| if (cgraph_node::get (current_function_decl)->offloadable) |
| return true; |
| for (; ctx; ctx = ctx->outer) |
| if (is_gimple_omp_offloaded (ctx->stmt)) |
| return true; |
| return false; |
| } |
| |
| /* Build a decl for the omp child function. It'll not contain a body |
| yet, just the bare decl. */ |
| |
| static void |
| create_omp_child_function (omp_context *ctx, bool task_copy) |
| { |
| tree decl, type, name, t; |
| |
| name = create_omp_child_function_name (task_copy); |
| if (task_copy) |
| type = build_function_type_list (void_type_node, ptr_type_node, |
| ptr_type_node, NULL_TREE); |
| else |
| type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); |
| |
| decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type); |
| |
| gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt) |
| || !task_copy); |
| if (!task_copy) |
| ctx->cb.dst_fn = decl; |
| else |
| gimple_omp_task_set_copy_fn (ctx->stmt, decl); |
| |
| TREE_STATIC (decl) = 1; |
| TREE_USED (decl) = 1; |
| DECL_ARTIFICIAL (decl) = 1; |
| DECL_IGNORED_P (decl) = 0; |
| TREE_PUBLIC (decl) = 0; |
| DECL_UNINLINABLE (decl) = 1; |
| DECL_EXTERNAL (decl) = 0; |
| DECL_CONTEXT (decl) = NULL_TREE; |
| DECL_INITIAL (decl) = make_node (BLOCK); |
| BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl; |
| DECL_ATTRIBUTES (decl) = DECL_ATTRIBUTES (current_function_decl); |
| /* Remove omp declare simd attribute from the new attributes. */ |
| if (tree a = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (decl))) |
| { |
| while (tree a2 = lookup_attribute ("omp declare simd", TREE_CHAIN (a))) |
| a = a2; |
| a = TREE_CHAIN (a); |
| for (tree *p = &DECL_ATTRIBUTES (decl); *p != a;) |
| if (is_attribute_p ("omp declare simd", get_attribute_name (*p))) |
| *p = TREE_CHAIN (*p); |
| else |
| { |
| tree chain = TREE_CHAIN (*p); |
| *p = copy_node (*p); |
| p = &TREE_CHAIN (*p); |
| *p = chain; |
| } |
| } |
| DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl) |
| = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (current_function_decl); |
| DECL_FUNCTION_SPECIFIC_TARGET (decl) |
| = DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl); |
| DECL_FUNCTION_VERSIONED (decl) |
| = DECL_FUNCTION_VERSIONED (current_function_decl); |
| |
| if (omp_maybe_offloaded_ctx (ctx)) |
| { |
| cgraph_node::get_create (decl)->offloadable = 1; |
| if (ENABLE_OFFLOADING) |
| g->have_offload = true; |
| } |
| |
| if (cgraph_node::get_create (decl)->offloadable) |
| { |
| const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt) |
| ? "omp target entrypoint" |
| : "omp declare target"); |
| if (lookup_attribute ("omp declare target", |
| DECL_ATTRIBUTES (current_function_decl))) |
| { |
| if (is_gimple_omp_offloaded (ctx->stmt)) |
| DECL_ATTRIBUTES (decl) |
| = remove_attribute ("omp declare target", |
| copy_list (DECL_ATTRIBUTES (decl))); |
| else |
| target_attr = NULL; |
| } |
| if (target_attr) |
| DECL_ATTRIBUTES (decl) |
| = tree_cons (get_identifier (target_attr), |
| NULL_TREE, DECL_ATTRIBUTES (decl)); |
| } |
| |
| t = build_decl (DECL_SOURCE_LOCATION (decl), |
| RESULT_DECL, NULL_TREE, void_type_node); |
| DECL_ARTIFICIAL (t) = 1; |
| DECL_IGNORED_P (t) = 1; |
| DECL_CONTEXT (t) = decl; |
| DECL_RESULT (decl) = t; |
| |
| tree data_name = get_identifier (".omp_data_i"); |
| t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name, |
| ptr_type_node); |
| DECL_ARTIFICIAL (t) = 1; |
| DECL_NAMELESS (t) = 1; |
| DECL_ARG_TYPE (t) = ptr_type_node; |
| DECL_CONTEXT (t) = current_function_decl; |
| TREE_USED (t) = 1; |
| TREE_READONLY (t) = 1; |
| DECL_ARGUMENTS (decl) = t; |
| if (!task_copy) |
| ctx->receiver_decl = t; |
| else |
| { |
| t = build_decl (DECL_SOURCE_LOCATION (decl), |
| PARM_DECL, get_identifier (".omp_data_o"), |
| ptr_type_node); |
| DECL_ARTIFICIAL (t) = 1; |
| DECL_NAMELESS (t) = 1; |
| DECL_ARG_TYPE (t) = ptr_type_node; |
| DECL_CONTEXT (t) = current_function_decl; |
| TREE_USED (t) = 1; |
| TREE_ADDRESSABLE (t) = 1; |
| DECL_CHAIN (t) = DECL_ARGUMENTS (decl); |
| DECL_ARGUMENTS (decl) = t; |
| } |
| |
| /* Allocate memory for the function structure. The call to |
| allocate_struct_function clobbers CFUN, so we need to restore |
| it afterward. */ |
| push_struct_function (decl); |
| cfun->function_end_locus = gimple_location (ctx->stmt); |
| init_tree_ssa (cfun); |
| pop_cfun (); |
| } |
| |
| /* Callback for walk_gimple_seq. Check if combined parallel |
| contains gimple_omp_for_combined_into_p OMP_FOR. */ |
| |
| tree |
| omp_find_combined_for (gimple_stmt_iterator *gsi_p, |
| bool *handled_ops_p, |
| struct walk_stmt_info *wi) |
| { |
| gimple *stmt = gsi_stmt (*gsi_p); |
| |
| *handled_ops_p = true; |
| switch (gimple_code (stmt)) |
| { |
| WALK_SUBSTMTS; |
| |
| case GIMPLE_OMP_FOR: |
| if (gimple_omp_for_combined_into_p (stmt) |
| && gimple_omp_for_kind (stmt) |
| == *(const enum gf_mask *) (wi->info)) |
| { |
| wi->info = stmt; |
| return integer_zero_node; |
| } |
| break; |
| default: |
| break; |
| } |
| return NULL; |
| } |
| |
| /* Add _LOOPTEMP_/_REDUCTEMP_ clauses on OpenMP parallel or task. */ |
| |
| static void |
| add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt, |
| omp_context *outer_ctx) |
| { |
| struct walk_stmt_info wi; |
| |
| memset (&wi, 0, sizeof (wi)); |
| wi.val_only = true; |
| wi.info = (void *) &msk; |
| walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi); |
| if (wi.info != (void *) &msk) |
| { |
| gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info); |
| struct omp_for_data fd; |
| omp_extract_for_data (for_stmt, &fd, NULL); |
| /* We need two temporaries with fd.loop.v type (istart/iend) |
| and then (fd.collapse - 1) temporaries with the same |
| type for count2 ... countN-1 vars if not constant. */ |
| size_t count = 2, i; |
| tree type = fd.iter_type; |
| if (fd.collapse > 1 |
| && TREE_CODE (fd.loop.n2) != INTEGER_CST) |
| { |
| count += fd.collapse - 1; |
| /* If there are lastprivate clauses on the inner |
| GIMPLE_OMP_FOR, add one more temporaries for the total number |
| of iterations (product of count1 ... countN-1). */ |
| if (omp_find_clause (gimple_omp_for_clauses (for_stmt), |
| OMP_CLAUSE_LASTPRIVATE) |
| || (msk == GF_OMP_FOR_KIND_FOR |
| && omp_find_clause (gimple_omp_parallel_clauses (stmt), |
| OMP_CLAUSE_LASTPRIVATE))) |
| { |
| tree temp = create_tmp_var (type); |
| tree c = build_omp_clause (UNKNOWN_LOCATION, |
| OMP_CLAUSE__LOOPTEMP_); |
| insert_decl_map (&outer_ctx->cb, temp, temp); |
| OMP_CLAUSE_DECL (c) = temp; |
| OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt); |
| gimple_omp_taskreg_set_clauses (stmt, c); |
| } |
| if (fd.non_rect |
| && fd.last_nonrect == fd.first_nonrect + 1) |
| if (tree v = gimple_omp_for_index (for_stmt, fd.last_nonrect)) |
| if (!TYPE_UNSIGNED (TREE_TYPE (v))) |
| { |
| v = gimple_omp_for_index (for_stmt, fd.first_nonrect); |
| tree type2 = TREE_TYPE (v); |
| count++; |
| for (i = 0; i < 3; i++) |
| { |
| tree temp = create_tmp_var (type2); |
| tree c = build_omp_clause (UNKNOWN_LOCATION, |
| OMP_CLAUSE__LOOPTEMP_); |
| insert_decl_map (&outer_ctx->cb, temp, temp); |
| OMP_CLAUSE_DECL (c) = temp; |
| OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt); |
| gimple_omp_taskreg_set_clauses (stmt, c); |
| } |
| } |
| } |
| for (i = 0; i < count; i++) |
| { |
| tree temp = create_tmp_var (type); |
| tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_); |
| insert_decl_map (&outer_ctx->cb, temp, temp); |
| OMP_CLAUSE_DECL (c) = temp; |
| OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt); |
| gimple_omp_taskreg_set_clauses (stmt, c); |
| } |
| } |
| if (msk == GF_OMP_FOR_KIND_TASKLOOP |
| && omp_find_clause (gimple_omp_task_clauses (stmt), |
| OMP_CLAUSE_REDUCTION)) |
| { |
| tree type = build_pointer_type (pointer_sized_int_node); |
| tree temp = create_tmp_var (type); |
| tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_); |
| insert_decl_map (&outer_ctx->cb, temp, temp); |
| OMP_CLAUSE_DECL (c) = temp; |
| OMP_CLAUSE_CHAIN (c) = gimple_omp_task_clauses (stmt); |
| gimple_omp_task_set_clauses (stmt, c); |
| } |
| } |
| |
| /* Scan an OpenMP parallel directive. */ |
| |
| static void |
| scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx) |
| { |
| omp_context *ctx; |
| tree name; |
| gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi)); |
| |
| /* Ignore parallel directives with empty bodies, unless there |
| are copyin clauses. */ |
| if (optimize > 0 |
| && empty_body_p (gimple_omp_body (stmt)) |
| && omp_find_clause (gimple_omp_parallel_clauses (stmt), |
| OMP_CLAUSE_COPYIN) == NULL) |
| { |
| gsi_replace (gsi, gimple_build_nop (), false); |
| return; |
| } |
| |
| if (gimple_omp_parallel_combined_p (stmt)) |
| add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx); |
| for (tree c = omp_find_clause (gimple_omp_parallel_clauses (stmt), |
| OMP_CLAUSE_REDUCTION); |
| c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_REDUCTION)) |
| if (OMP_CLAUSE_REDUCTION_TASK (c)) |
| { |
| tree type = build_pointer_type (pointer_sized_int_node); |
| tree temp = create_tmp_var (type); |
| tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_); |
| if (outer_ctx) |
| insert_decl_map (&outer_ctx->cb, temp, temp); |
| OMP_CLAUSE_DECL (c) = temp; |
| OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt); |
| gimple_omp_parallel_set_clauses (stmt, c); |
| break; |
| } |
| else if (OMP_CLAUSE_CHAIN (c) == NULL_TREE) |
| break; |
| |
| ctx = new_omp_context (stmt, outer_ctx); |
| taskreg_contexts.safe_push (ctx); |
| if (taskreg_nesting_level > 1) |
| ctx->is_nested = true; |
| ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); |
| ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); |
| name = create_tmp_var_name (".omp_data_s"); |
| name = build_decl (gimple_location (stmt), |
| TYPE_DECL, name, ctx->record_type); |
| DECL_ARTIFICIAL (name) = 1; |
| DECL_NAMELESS (name) = 1; |
| TYPE_NAME (ctx->record_type) = name; |
| TYPE_ARTIFICIAL (ctx->record_type) = 1; |
| create_omp_child_function (ctx, false); |
| gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn); |
| |
| scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx); |
| scan_omp (gimple_omp_body_ptr (stmt), ctx); |
| |
| if (TYPE_FIELDS (ctx->record_type) == NULL) |
| ctx->record_type = ctx->receiver_decl = NULL; |
| } |
| |
| /* Scan an OpenMP task directive. */ |
| |
| static void |
| scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx) |
| { |
| omp_context *ctx; |
| tree name, t; |
| gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi)); |
| |
| /* Ignore task directives with empty bodies, unless they have depend |
| clause. */ |
| if (optimize > 0 |
| && gimple_omp_body (stmt) |
| && empty_body_p (gimple_omp_body (stmt)) |
| && !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND)) |
| { |
| gsi_replace (gsi, gimple_build_nop (), false); |
| return; |
| } |
| |
| if (gimple_omp_task_taskloop_p (stmt)) |
| add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx); |
| |
| ctx = new_omp_context (stmt, outer_ctx); |
| |
| if (gimple_omp_task_taskwait_p (stmt)) |
| { |
| scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx); |
| return; |
| } |
| |
| taskreg_contexts.safe_push (ctx); |
| if (taskreg_nesting_level > 1) |
| ctx->is_nested = true; |
| ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); |
| ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); |
| name = create_tmp_var_name (".omp_data_s"); |
| name = build_decl (gimple_location (stmt), |
| TYPE_DECL, name, ctx->record_type); |
| DECL_ARTIFICIAL (name) = 1; |
| DECL_NAMELESS (name) = 1; |
| TYPE_NAME (ctx->record_type) = name; |
| TYPE_ARTIFICIAL (ctx->record_type) = 1; |
| create_omp_child_function (ctx, false); |
| gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn); |
| |
| scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx); |
| |
| if (ctx->srecord_type) |
| { |
| name = create_tmp_var_name (".omp_data_a"); |
| name = build_decl (gimple_location (stmt), |
| TYPE_DECL, name, ctx->srecord_type); |
| DECL_ARTIFICIAL (name) = 1; |
| DECL_NAMELESS (name) = 1; |
| TYPE_NAME (ctx->srecord_type) = name; |
| TYPE_ARTIFICIAL (ctx->srecord_type) = 1; |
| create_omp_child_function (ctx, true); |
| } |
| |
| scan_omp (gimple_omp_body_ptr (stmt), ctx); |
| |
| if (TYPE_FIELDS (ctx->record_type) == NULL) |
| { |
| ctx->record_type = ctx->receiver_decl = NULL; |
| t = build_int_cst (long_integer_type_node, 0); |
| gimple_omp_task_set_arg_size (stmt, t); |
| t = build_int_cst (long_integer_type_node, 1); |
| gimple_omp_task_set_arg_align (stmt, t); |
| } |
| } |
| |
| /* Helper function for finish_taskreg_scan, called through walk_tree. |
| If maybe_lookup_decl_in_outer_context returns non-NULL for some |
| tree, replace it in the expression. */ |
| |
| static tree |
| finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data) |
| { |
| if (VAR_P (*tp)) |
| { |
| omp_context *ctx = (omp_context *) data; |
| tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx); |
| if (t != *tp) |
| { |
| if (DECL_HAS_VALUE_EXPR_P (t)) |
| t = unshare_expr (DECL_VALUE_EXPR (t)); |
| *tp = t; |
| } |
| *walk_subtrees = 0; |
| } |
| else if (IS_TYPE_OR_DECL_P (*tp)) |
| *walk_subtrees = 0; |
| return NULL_TREE; |
| } |
| |
| /* If any decls have been made addressable during scan_omp, |
| adjust their fields if needed, and layout record types |
| of parallel/task constructs. */ |
| |
| static void |
| finish_taskreg_scan (omp_context *ctx) |
| { |
| if (ctx->record_type == NULL_TREE) |
| return; |
| |
| /* If any make_addressable_vars were needed, verify all |
| OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK,TEAMS} |
| statements if use_pointer_for_field hasn't changed |
| because of that. If it did, update field types now. */ |
| if (make_addressable_vars) |
| { |
| tree c; |
| |
| for (c = gimple_omp_taskreg_clauses (ctx->stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED |
| && !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)) |
| { |
| tree decl = OMP_CLAUSE_DECL (c); |
| |
| /* Global variables don't need to be copied, |
| the receiver side will use them directly. */ |
| if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) |
| continue; |
| if (!bitmap_bit_p (make_addressable_vars, DECL_UID (decl)) |
| || !use_pointer_for_field (decl, ctx)) |
| continue; |
| tree field = lookup_field (decl, ctx); |
| if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE |
| && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl)) |
| continue; |
| TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl)); |
| TREE_THIS_VOLATILE (field) = 0; |
| DECL_USER_ALIGN (field) = 0; |
| SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field))); |
| if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field)) |
| SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field)); |
| if (ctx->srecord_type) |
| { |
| tree sfield = lookup_sfield (decl, ctx); |
| TREE_TYPE (sfield) = TREE_TYPE (field); |
| TREE_THIS_VOLATILE (sfield) = 0; |
| DECL_USER_ALIGN (sfield) = 0; |
| SET_DECL_ALIGN (sfield, DECL_ALIGN (field)); |
| if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield)) |
| SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield)); |
| } |
| } |
| } |
| |
| if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL) |
| { |
| tree clauses = gimple_omp_parallel_clauses (ctx->stmt); |
| tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); |
| if (c) |
| { |
| /* Move the _reductemp_ clause first. GOMP_parallel_reductions |
| expects to find it at the start of data. */ |
| tree f = lookup_field (OMP_CLAUSE_DECL (c), ctx); |
| tree *p = &TYPE_FIELDS (ctx->record_type); |
| while (*p) |
| if (*p == f) |
| { |
| *p = DECL_CHAIN (*p); |
| break; |
| } |
| else |
| p = &DECL_CHAIN (*p); |
| DECL_CHAIN (f) = TYPE_FIELDS (ctx->record_type); |
| TYPE_FIELDS (ctx->record_type) = f; |
| } |
| layout_type (ctx->record_type); |
| fixup_child_record_type (ctx); |
| } |
| else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) |
| { |
| layout_type (ctx->record_type); |
| fixup_child_record_type (ctx); |
| } |
| else |
| { |
| location_t loc = gimple_location (ctx->stmt); |
| tree *p, vla_fields = NULL_TREE, *q = &vla_fields; |
| tree detach_clause |
| = omp_find_clause (gimple_omp_task_clauses (ctx->stmt), |
| OMP_CLAUSE_DETACH); |
| /* Move VLA fields to the end. */ |
| p = &TYPE_FIELDS (ctx->record_type); |
| while (*p) |
| if (!TYPE_SIZE_UNIT (TREE_TYPE (*p)) |
| || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p)))) |
| { |
| *q = *p; |
| *p = TREE_CHAIN (*p); |
| TREE_CHAIN (*q) = NULL_TREE; |
| q = &TREE_CHAIN (*q); |
| } |
| else |
| p = &DECL_CHAIN (*p); |
| *p = vla_fields; |
| if (gimple_omp_task_taskloop_p (ctx->stmt)) |
| { |
| /* Move fields corresponding to first and second _looptemp_ |
| clause first. There are filled by GOMP_taskloop |
| and thus need to be in specific positions. */ |
| tree clauses = gimple_omp_task_clauses (ctx->stmt); |
| tree c1 = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); |
| tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1), |
| OMP_CLAUSE__LOOPTEMP_); |
| tree c3 = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); |
| tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx); |
| tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx); |
| tree f3 = c3 ? lookup_field (OMP_CLAUSE_DECL (c3), ctx) : NULL_TREE; |
| p = &TYPE_FIELDS (ctx->record_type); |
| while (*p) |
| if (*p == f1 || *p == f2 || *p == f3) |
| *p = DECL_CHAIN (*p); |
| else |
| p = &DECL_CHAIN (*p); |
| DECL_CHAIN (f1) = f2; |
| if (c3) |
| { |
| DECL_CHAIN (f2) = f3; |
| DECL_CHAIN (f3) = TYPE_FIELDS (ctx->record_type); |
| } |
| else |
| DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type); |
| TYPE_FIELDS (ctx->record_type) = f1; |
| if (ctx->srecord_type) |
| { |
| f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx); |
| f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx); |
| if (c3) |
| f3 = lookup_sfield (OMP_CLAUSE_DECL (c3), ctx); |
| p = &TYPE_FIELDS (ctx->srecord_type); |
| while (*p) |
| if (*p == f1 || *p == f2 || *p == f3) |
| *p = DECL_CHAIN (*p); |
| else |
| p = &DECL_CHAIN (*p); |
| DECL_CHAIN (f1) = f2; |
| DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type); |
| if (c3) |
| { |
| DECL_CHAIN (f2) = f3; |
| DECL_CHAIN (f3) = TYPE_FIELDS (ctx->srecord_type); |
| } |
| else |
| DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type); |
| TYPE_FIELDS (ctx->srecord_type) = f1; |
| } |
| } |
| if (detach_clause) |
| { |
| tree c, field; |
| |
| /* Look for a firstprivate clause with the detach event handle. */ |
| for (c = gimple_omp_taskreg_clauses (ctx->stmt); |
| c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE) |
| continue; |
| if (maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_DECL (c), ctx) |
| == OMP_CLAUSE_DECL (detach_clause)) |
| break; |
| } |
| |
| gcc_assert (c); |
| field = lookup_field (OMP_CLAUSE_DECL (c), ctx); |
| |
| /* Move field corresponding to the detach clause first. |
| This is filled by GOMP_task and needs to be in a |
| specific position. */ |
| p = &TYPE_FIELDS (ctx->record_type); |
| while (*p) |
| if (*p == field) |
| *p = DECL_CHAIN (*p); |
| else |
| p = &DECL_CHAIN (*p); |
| DECL_CHAIN (field) = TYPE_FIELDS (ctx->record_type); |
| TYPE_FIELDS (ctx->record_type) = field; |
| if (ctx->srecord_type) |
| { |
| field = lookup_sfield (OMP_CLAUSE_DECL (c), ctx); |
| p = &TYPE_FIELDS (ctx->srecord_type); |
| while (*p) |
| if (*p == field) |
| *p = DECL_CHAIN (*p); |
| else |
| p = &DECL_CHAIN (*p); |
| DECL_CHAIN (field) = TYPE_FIELDS (ctx->srecord_type); |
| TYPE_FIELDS (ctx->srecord_type) = field; |
| } |
| } |
| layout_type (ctx->record_type); |
| fixup_child_record_type (ctx); |
| if (ctx->srecord_type) |
| layout_type (ctx->srecord_type); |
| tree t = fold_convert_loc (loc, long_integer_type_node, |
| TYPE_SIZE_UNIT (ctx->record_type)); |
| if (TREE_CODE (t) != INTEGER_CST) |
| { |
| t = unshare_expr (t); |
| walk_tree (&t, finish_taskreg_remap, ctx, NULL); |
| } |
| gimple_omp_task_set_arg_size (ctx->stmt, t); |
| t = build_int_cst (long_integer_type_node, |
| TYPE_ALIGN_UNIT (ctx->record_type)); |
| gimple_omp_task_set_arg_align (ctx->stmt, t); |
| } |
| } |
| |
| /* Find the enclosing offload context. */ |
| |
| static omp_context * |
| enclosing_target_ctx (omp_context *ctx) |
| { |
| for (; ctx; ctx = ctx->outer) |
| if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET) |
| break; |
| |
| return ctx; |
| } |
| |
| /* Return whether CTX's parent compute construct is an OpenACC 'kernels' |
| construct. |
| (This doesn't include OpenACC 'kernels' decomposed parts.) */ |
| |
| static bool |
| ctx_in_oacc_kernels_region (omp_context *ctx) |
| { |
| for (;ctx != NULL; ctx = ctx->outer) |
| { |
| gimple *stmt = ctx->stmt; |
| if (gimple_code (stmt) == GIMPLE_OMP_TARGET |
| && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Check the parallelism clauses inside a OpenACC 'kernels' region. |
| (This doesn't include OpenACC 'kernels' decomposed parts.) |
| Until kernels handling moves to use the same loop indirection |
| scheme as parallel, we need to do this checking early. */ |
| |
| static unsigned |
| check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx) |
| { |
| bool checking = true; |
| unsigned outer_mask = 0; |
| unsigned this_mask = 0; |
| bool has_seq = false, has_auto = false; |
| |
| if (ctx->outer) |
| outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer); |
| if (!stmt) |
| { |
| checking = false; |
| if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR) |
| return outer_mask; |
| stmt = as_a <gomp_for *> (ctx->stmt); |
| } |
| |
| for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| switch (OMP_CLAUSE_CODE (c)) |
| { |
| case OMP_CLAUSE_GANG: |
| this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG); |
| break; |
| case OMP_CLAUSE_WORKER: |
| this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER); |
| break; |
| case OMP_CLAUSE_VECTOR: |
| this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR); |
| break; |
| case OMP_CLAUSE_SEQ: |
| has_seq = true; |
| break; |
| case OMP_CLAUSE_AUTO: |
| has_auto = true; |
| break; |
| default: |
| break; |
| } |
| } |
| |
| if (checking) |
| { |
| if (has_seq && (this_mask || has_auto)) |
| error_at (gimple_location (stmt), "%<seq%> overrides other" |
| " OpenACC loop specifiers"); |
| else if (has_auto && this_mask) |
| error_at (gimple_location (stmt), "%<auto%> conflicts with other" |
| " OpenACC loop specifiers"); |
| |
| if (this_mask & outer_mask) |
| error_at (gimple_location (stmt), "inner loop uses same" |
| " OpenACC parallelism as containing loop"); |
| } |
| |
| return outer_mask | this_mask; |
| } |
| |
| /* Scan a GIMPLE_OMP_FOR. */ |
| |
| static omp_context * |
| scan_omp_for (gomp_for *stmt, omp_context *outer_ctx) |
| { |
| omp_context *ctx; |
| size_t i; |
| tree clauses = gimple_omp_for_clauses (stmt); |
| |
| ctx = new_omp_context (stmt, outer_ctx); |
| |
| if (is_gimple_omp_oacc (stmt)) |
| { |
| omp_context *tgt = enclosing_target_ctx (outer_ctx); |
| |
| if (!(tgt && is_oacc_kernels (tgt))) |
| for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| tree c_op0; |
| switch (OMP_CLAUSE_CODE (c)) |
| { |
| case OMP_CLAUSE_GANG: |
| c_op0 = OMP_CLAUSE_GANG_EXPR (c); |
| break; |
| |
| case OMP_CLAUSE_WORKER: |
| c_op0 = OMP_CLAUSE_WORKER_EXPR (c); |
| break; |
| |
| case OMP_CLAUSE_VECTOR: |
| c_op0 = OMP_CLAUSE_VECTOR_EXPR (c); |
| break; |
| |
| default: |
| continue; |
| } |
| |
| if (c_op0) |
| { |
| /* By construction, this is impossible for OpenACC 'kernels' |
| decomposed parts. */ |
| gcc_assert (!(tgt && is_oacc_kernels_decomposed_part (tgt))); |
| |
| error_at (OMP_CLAUSE_LOCATION (c), |
| "argument not permitted on %qs clause", |
| omp_clause_code_name[OMP_CLAUSE_CODE (c)]); |
| if (tgt) |
| inform (gimple_location (tgt->stmt), |
| "enclosing parent compute construct"); |
| else if (oacc_get_fn_attrib (current_function_decl)) |
| inform (DECL_SOURCE_LOCATION (current_function_decl), |
| "enclosing routine"); |
| else |
| gcc_unreachable (); |
| } |
| } |
| |
| if (tgt && is_oacc_kernels (tgt)) |
| check_oacc_kernel_gwv (stmt, ctx); |
| |
| /* Collect all variables named in reductions on this loop. Ensure |
| that, if this loop has a reduction on some variable v, and there is |
| a reduction on v somewhere in an outer context, then there is a |
| reduction on v on all intervening loops as well. */ |
| tree local_reduction_clauses = NULL; |
| for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c)) |
| { |
| if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) |
| local_reduction_clauses |
| = tree_cons (NULL, c, local_reduction_clauses); |
| } |
| if (ctx->outer_reduction_clauses == NULL && ctx->outer != NULL) |
| ctx->outer_reduction_clauses |
| = chainon (unshare_expr (ctx->outer->local_reduction_clauses), |
| ctx->outer->outer_reduction_clauses); |
| tree outer_reduction_clauses = ctx->outer_reduction_clauses; |
| tree local_iter = local_reduction_clauses; |
| for (; local_iter; local_iter = TREE_CHAIN (local_iter)) |
| { |
| tree local_clause = TREE_VALUE (local_iter); |
| tree local_var = OMP_CLAUSE_DECL (local_clause); |
| tree_code local_op = OMP_CLAUSE_REDUCTION_CODE (local_clause); |
| bool have_outer_reduction = false; |
| tree ctx_iter = outer_reduction_clauses; |
| for (; ctx_iter; ctx_iter = TREE_CHAIN (ctx_iter)) |
| { |
| tree outer_clause = TREE_VALUE (ctx_iter); |
| tree outer_var = OMP_CLAUSE_DECL (outer_clause); |
| tree_code outer_op = OMP_CLAUSE_REDUCTION_CODE (outer_clause); |
| if (outer_var == local_var && outer_op != local_op) |
| { |
| warning_at (OMP_CLAUSE_LOCATION (local_clause), 0, |
| "conflicting reduction operations for %qE", |
| local_var); |
| inform (OMP_CLAUSE_LOCATION (outer_clause), |
| "location of the previous reduction for %qE", |
| outer_var); |
| } |
| if (outer_var == local_var) |
| { |
| have_outer_reduction = true; |
| break; |
| } |
| } |
| if (have_outer_reduction) |
| { |
| /* There is a reduction on outer_var both on this loop and on |
| some enclosing loop. Walk up the context tree until such a |
| loop with a reduction on outer_var is found, and complain |
| about all intervening loops that do not have such a |
| reduction. */ |
| struct omp_context *curr_loop = ctx->outer; |
| bool found = false; |
| while (curr_loop != NULL) |
| { |
| tree curr_iter = curr_loop->local_reduction_clauses; |
| for (; curr_iter; curr_iter = TREE_CHAIN (curr_iter)) |
| { |
| tree curr_clause = TREE_VALUE (curr_iter); |
| tree curr_var = OMP_CLAUSE_DECL (curr_clause); |
| if (curr_var == local_var) |
| { |
| found = true; |
| break; |
| } |
| } |
| if (!found) |
| warning_at (gimple_location (curr_loop->stmt), 0, |
| "nested loop in reduction needs " |
| "reduction clause for %qE", |
| local_var); |
| else |
| break; |
| curr_loop = curr_loop->outer; |
| } |
| } |
| } |
| ctx->local_reduction_clauses = local_reduction_clauses; |
| ctx->outer_reduction_clauses |
| = chainon (unshare_expr (ctx->local_reduction_clauses), |
| ctx->outer_reduction_clauses); |
| |
| if (tgt && is_oacc_kernels (tgt)) |
| { |
| /* Strip out reductions, as they are not handled yet. */ |
| tree *prev_ptr = &clauses; |
| |
| while (tree probe = *prev_ptr) |
| { |
| tree *next_ptr = &OMP_CLAUSE_CHAIN (probe); |
| |
| if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION) |
| *prev_ptr = *next_ptr; |
| else |
| prev_ptr = next_ptr; |
| } |
| |
| gimple_omp_for_set_clauses (stmt, clauses); |
| } |
| } |
| |
| scan_sharing_clauses (clauses, ctx); |
| |
| scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx); |
| for (i = 0; i < gimple_omp_for_collapse (stmt); i++) |
| { |
| scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx); |
| scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx); |
| scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx); |
| scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx); |
| } |
| scan_omp (gimple_omp_body_ptr (stmt), ctx); |
| return ctx; |
| } |
| |
| /* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */ |
| |
| static void |
| scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt, |
| omp_context *outer_ctx) |
| { |
| gbind *bind = gimple_build_bind (NULL, NULL, NULL); |
| gsi_replace (gsi, bind, false); |
| gimple_seq seq = NULL; |
| gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0); |
| tree cond = create_tmp_var_raw (integer_type_node); |
| DECL_CONTEXT (cond) = current_function_decl; |
| DECL_SEEN_IN_BIND_EXPR_P (cond) = 1; |
| gimple_bind_set_vars (bind, cond); |
| gimple_call_set_lhs (g, cond); |
| gimple_seq_add_stmt (&seq, g); |
| tree lab1 = create_artificial_label (UNKNOWN_LOCATION); |
| tree lab2 = create_artificial_label (UNKNOWN_LOCATION); |
| tree lab3 = create_artificial_label (UNKNOWN_LOCATION); |
| g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2); |
| gimple_seq_add_stmt (&seq, g); |
| g = gimple_build_label (lab1); |
| gimple_seq_add_stmt (&seq, g); |
| gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt); |
| gomp_for *new_stmt = as_a <gomp_for *> (new_seq); |
| tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_); |
| OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt); |
| gimple_omp_for_set_clauses (new_stmt, clause); |
| gimple_seq_add_stmt (&seq, new_stmt); |
| g = gimple_build_goto (lab3); |
| gimple_seq_add_stmt (&seq, g); |
| g = gimple_build_label (lab2); |
| gimple_seq_add_stmt (&seq, g); |
| gimple_seq_add_stmt (&seq, stmt); |
| g = gimple_build_label (lab3); |
| gimple_seq_add_stmt (&seq, g); |
| gimple_bind_set_body (bind, seq); |
| update_stmt (bind); |
| scan_omp_for (new_stmt, outer_ctx); |
| scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt; |
| } |
| |
| static tree omp_find_scan (gimple_stmt_iterator *, bool *, |
| struct walk_stmt_info *); |
| static omp_context *maybe_lookup_ctx (gimple *); |
| |
| /* Duplicate #pragma omp simd, one for the scan input phase loop and one |
| for scan phase loop. */ |
| |
| static void |
| scan_omp_simd_scan (gimple_stmt_iterator *gsi, gomp_for *stmt, |
| omp_context *outer_ctx) |
| { |
| /* The only change between inclusive and exclusive scan will be |
| within the first simd loop, so just use inclusive in the |
| worksharing loop. */ |
| outer_ctx->scan_inclusive = true; |
| tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_INCLUSIVE); |
| OMP_CLAUSE_DECL (c) = integer_zero_node; |
| |
| gomp_scan *input_stmt = gimple_build_omp_scan (NULL, NULL_TREE); |
| gomp_scan *scan_stmt = gimple_build_omp_scan (NULL, c); |
| gsi_replace (gsi, input_stmt, false); |
| gimple_seq input_body = NULL; |
| gimple_seq_add_stmt (&input_body, stmt); |
| gsi_insert_after (gsi, scan_stmt, GSI_NEW_STMT); |
| |
| gimple_stmt_iterator input1_gsi = gsi_none (); |
| struct walk_stmt_info wi; |
| memset (&wi, 0, sizeof (wi)); |
| wi.val_only = true; |
| wi.info = (void *) &input1_gsi; |
| walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), omp_find_scan, NULL, &wi); |
| gcc_assert (!gsi_end_p (input1_gsi)); |
| |
| gimple *input_stmt1 = gsi_stmt (input1_gsi); |
| gsi_next (&input1_gsi); |
| gimple *scan_stmt1 = gsi_stmt (input1_gsi); |
| gcc_assert (scan_stmt1 && gimple_code (scan_stmt1) == GIMPLE_OMP_SCAN); |
| c = gimple_omp_scan_clauses (as_a <gomp_scan *> (scan_stmt1)); |
| if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE) |
| std::swap (input_stmt1, scan_stmt1); |
| |
| gimple_seq input_body1 = gimple_omp_body (input_stmt1); |
| gimple_omp_set_body (input_stmt1, NULL); |
| |
| gimple_seq scan_body = copy_gimple_seq_and_replace_locals (stmt); |
| gomp_for *new_stmt = as_a <gomp_for *> (scan_body); |
| |
| gimple_omp_set_body (input_stmt1, input_body1); |
| gimple_omp_set_body (scan_stmt1, NULL); |
| |
| gimple_stmt_iterator input2_gsi = gsi_none (); |
| memset (&wi, 0, sizeof (wi)); |
| wi.val_only = true; |
| wi.info = (void *) &input2_gsi; |
| walk_gimple_seq_mod (gimple_omp_body_ptr (new_stmt), omp_find_scan, |
| NULL, &wi); |
| gcc_assert (!gsi_end_p (input2_gsi)); |
| |
| gimple *input_stmt2 = gsi_stmt (input2_gsi); |
| gsi_next (&input2_gsi); |
| gimple *scan_stmt2 = gsi_stmt (input2_gsi); |
| gcc_assert (scan_stmt2 && gimple_code (scan_stmt2) == GIMPLE_OMP_SCAN); |
| if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE) |
| std::swap (input_stmt2, scan_stmt2); |
| |
| gimple_omp_set_body (input_stmt2, NULL); |
| |
| gimple_omp_set_body (input_stmt, input_body); |
| gimple_omp_set_body (scan_stmt, scan_body); |
| |
| omp_context *ctx = new_omp_context (input_stmt, outer_ctx); |
| scan_omp (gimple_omp_body_ptr (input_stmt), ctx); |
| |
| ctx = new_omp_context (scan_stmt, outer_ctx); |
| scan_omp (gimple_omp_body_ptr (scan_stmt), ctx); |
| |
| maybe_lookup_ctx (new_stmt)->for_simd_scan_phase = true; |
| } |
| |