blob: 9843e5e7cdc68a1d03bec7cdffcd669cc9babc01 [file] [log] [blame]
/* Lowering pass for OpenMP directives. Converts OpenMP directives
into explicit calls to the runtime library (libgomp) and data
marshalling to implement data sharing and copying clauses.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "gimple.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "diagnostic.h"
#include "tree-flow.h"
#include "timevar.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
#include "toplev.h"
#include "tree-pass.h"
#include "ggc.h"
#include "except.h"
#include "splay-tree.h"
#include "optabs.h"
#include "cfgloop.h"
/* Lowering of OpenMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for parallel regions which are then moved to a new
function, to be invoked by the thread library. */
/* Context structure. Used to store information about each parallel
directive in the code. */
typedef struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
gimple stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* These are used just by task contexts, if task firstprivate fn is
needed. srecord_type is used to communicate from the thread
that encountered the task construct to task firstprivate fn,
record_type is allocated by GOMP_task, initialized by task firstprivate
fn and passed to the task body fn. */
splay_tree sfield_map;
tree srecord_type;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* What to do with variables with implicitly determined sharing
attributes. */
enum omp_clause_default_kind default_kind;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
} omp_context;
struct omp_for_data_loop
{
tree v, n1, n2, step;
enum tree_code cond_code;
};
/* A structure describing the main elements of a parallel loop. */
struct omp_for_data
{
struct omp_for_data_loop loop;
tree chunk_size;
gimple for_stmt;
tree pre, iter_type;
int collapse;
bool have_nowait, have_ordered;
enum omp_clause_schedule_kind sched_kind;
struct omp_for_data_loop *loops;
};
static splay_tree all_contexts;
static int taskreg_nesting_level;
struct omp_region *root_omp_region;
static bitmap task_shared_vars;
static void scan_omp (gimple_seq, omp_context *);
static tree scan_omp_1_op (tree *, int *, void *);
#define WALK_SUBSTMTS \
case GIMPLE_BIND: \
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
/* Convenience function for calling scan_omp_1_op on tree operands. */
static inline tree
scan_omp_op (tree *tp, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
static void lower_omp (gimple_seq, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Find an OpenMP clause of type KIND within CLAUSES. */
tree
find_omp_clause (tree clauses, enum omp_clause_code kind)
{
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == kind)
return clauses;
return NULL_TREE;
}
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
}
/* Return true if CTX is for an omp task. */
static inline bool
is_task_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if CTX is for an omp parallel or omp task. */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
|| gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if REGION is a combined parallel+workshare region. */
static inline bool
is_combined_parallel (struct omp_region *region)
{
return region->is_combined_parallel;
}
/* Extract the header elements of parallel loop FOR_STMT and store
them into *FD. */
static void
extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
struct omp_for_data_loop *loops)
{
tree t, var, *collapse_iter, *collapse_count;
tree count = NULL_TREE, iter_type = long_integer_type_node;
struct omp_for_data_loop *loop;
int i;
struct omp_for_data_loop dummy_loop;
fd->for_stmt = for_stmt;
fd->pre = NULL;
fd->collapse = gimple_omp_for_collapse (for_stmt);
if (fd->collapse > 1)
fd->loops = loops;
else
fd->loops = &fd->loop;
fd->have_nowait = fd->have_ordered = false;
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
fd->chunk_size = NULL_TREE;
collapse_iter = NULL;
collapse_count = NULL;
for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_NOWAIT:
fd->have_nowait = true;
break;
case OMP_CLAUSE_ORDERED:
fd->have_ordered = true;
break;
case OMP_CLAUSE_SCHEDULE:
fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
break;
case OMP_CLAUSE_COLLAPSE:
if (fd->collapse > 1)
{
collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
}
default:
break;
}
/* FIXME: for now map schedule(auto) to schedule(static).
There should be analysis to determine whether all iterations
are approximately the same amount of work (then schedule(static)
is best) or if it varies (then schedule(dynamic,N) is better). */
if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
{
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
gcc_assert (fd->chunk_size == NULL);
}
gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
gcc_assert (fd->chunk_size == NULL);
else if (fd->chunk_size == NULL)
{
/* We only need to compute a default chunk size for ordered
static loops and dynamic loops. */
if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
|| fd->have_ordered
|| fd->collapse > 1)
fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
? integer_zero_node : integer_one_node;
}
for (i = 0; i < fd->collapse; i++)
{
if (fd->collapse == 1)
loop = &fd->loop;
else if (loops != NULL)
loop = loops + i;
else
loop = &dummy_loop;
loop->v = gimple_omp_for_index (for_stmt, i);
gcc_assert (SSA_VAR_P (loop->v));
gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
|| TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
loop->n1 = gimple_omp_for_initial (for_stmt, i);
loop->cond_code = gimple_omp_for_cond (for_stmt, i);
loop->n2 = gimple_omp_for_final (for_stmt, i);
switch (loop->cond_code)
{
case LT_EXPR:
case GT_EXPR:
break;
case LE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
loop->n2, size_one_node);
else
loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
build_int_cst (TREE_TYPE (loop->n2), 1));
loop->cond_code = LT_EXPR;
break;
case GE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
loop->n2, size_int (-1));
else
loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
build_int_cst (TREE_TYPE (loop->n2), 1));
loop->cond_code = GT_EXPR;
break;
default:
gcc_unreachable ();
}
t = gimple_omp_for_incr (for_stmt, i);
gcc_assert (TREE_OPERAND (t, 0) == var);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
loop->step = TREE_OPERAND (t, 1);
break;
case MINUS_EXPR:
loop->step = TREE_OPERAND (t, 1);
loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
loop->step);
break;
default:
gcc_unreachable ();
}
if (iter_type != long_long_unsigned_type_node)
{
if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
iter_type = long_long_unsigned_type_node;
else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
&& TYPE_PRECISION (TREE_TYPE (loop->v))
>= TYPE_PRECISION (iter_type))
{
tree n;
if (loop->cond_code == LT_EXPR)
n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
else
n = loop->n1;
if (TREE_CODE (n) != INTEGER_CST
|| tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
iter_type = long_long_unsigned_type_node;
}
else if (TYPE_PRECISION (TREE_TYPE (loop->v))
> TYPE_PRECISION (iter_type))
{
tree n1, n2;
if (loop->cond_code == LT_EXPR)
{
n1 = loop->n1;
n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
}
else
{
n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
n2 = loop->n1;
}
if (TREE_CODE (n1) != INTEGER_CST
|| TREE_CODE (n2) != INTEGER_CST
|| !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
|| !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
iter_type = long_long_unsigned_type_node;
}
}
if (collapse_count && *collapse_count == NULL)
{
if ((i == 0 || count != NULL_TREE)
&& TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
&& TREE_CONSTANT (loop->n1)
&& TREE_CONSTANT (loop->n2)
&& TREE_CODE (loop->step) == INTEGER_CST)
{
tree itype = TREE_TYPE (loop->v);
if (POINTER_TYPE_P (itype))
itype
= lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, loop->step), t);
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype, loop->n2));
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, loop->n1));
if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype,
fold_convert (itype,
loop->step)));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
fold_convert (itype, loop->step));
t = fold_convert (long_long_unsigned_type_node, t);
if (count != NULL_TREE)
count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
count, t);
else
count = t;
if (TREE_CODE (count) != INTEGER_CST)
count = NULL_TREE;
}
else
count = NULL_TREE;
}
}
if (count)
{
if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
iter_type = long_long_unsigned_type_node;
else
iter_type = long_integer_type_node;
}
else if (collapse_iter && *collapse_iter != NULL)
iter_type = TREE_TYPE (*collapse_iter);
fd->iter_type = iter_type;
if (collapse_iter && *collapse_iter == NULL)
*collapse_iter = create_tmp_var (iter_type, ".iter");
if (collapse_count && *collapse_count == NULL)
{
if (count)
*collapse_count = fold_convert (iter_type, count);
else
*collapse_count = create_tmp_var (iter_type, ".count");
}
if (fd->collapse > 1)
{
fd->loop.v = *collapse_iter;
fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
fd->loop.n2 = *collapse_count;
fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
fd->loop.cond_code = LT_EXPR;
}
}
/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
is the immediate dominator of PAR_ENTRY_BB, return true if there
are no data dependencies that would prevent expanding the parallel
directive at PAR_ENTRY_BB as a combined parallel+workshare region.
When expanding a combined parallel+workshare region, the call to
the child function may need additional arguments in the case of
GIMPLE_OMP_FOR regions. In some cases, these arguments are
computed out of variables passed in from the parent to the child
via 'struct .omp_data_s'. For instance:
#pragma omp parallel for schedule (guided, i * 4)
for (j ...)
Is lowered into:
# BLOCK 2 (PAR_ENTRY_BB)
.omp_data_o.i = i;
#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
# BLOCK 3 (WS_ENTRY_BB)
.omp_data_i = &.omp_data_o;
D.1667 = .omp_data_i->i;
D.1598 = D.1667 * 4;
#pragma omp for schedule (guided, D.1598)
When we outline the parallel region, the call to the child function
'bar.omp_fn.0' will need the value D.1598 in its argument list, but
that value is computed *after* the call site. So, in principle we
cannot do the transformation.
To see whether the code in WS_ENTRY_BB blocks the combined
parallel+workshare call, we collect all the variables used in the
GIMPLE_OMP_FOR header check whether they appear on the LHS of any
statement in WS_ENTRY_BB. If so, then we cannot emit the combined
call.
FIXME. If we had the SSA form built at this point, we could merely
hoist the code in block 3 into block 2 and be done with it. But at
this point we don't have dataflow information and though we could
hack something up here, it is really not worth the aggravation. */
static bool
workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
{
struct omp_for_data fd;
gimple par_stmt, ws_stmt;
par_stmt = last_stmt (par_entry_bb);
ws_stmt = last_stmt (ws_entry_bb);
if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
return true;
gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
extract_omp_for_data (ws_stmt, &fd, NULL);
if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
return false;
if (fd.iter_type != long_integer_type_node)
return false;
/* FIXME. We give up too easily here. If any of these arguments
are not constants, they will likely involve variables that have
been mapped into fields of .omp_data_s for sharing with the child
function. With appropriate data flow, it would be possible to
see through this. */
if (!is_gimple_min_invariant (fd.loop.n1)
|| !is_gimple_min_invariant (fd.loop.n2)
|| !is_gimple_min_invariant (fd.loop.step)
|| (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
return false;
return true;
}
/* Collect additional arguments needed to emit a combined
parallel+workshare call. WS_STMT is the workshare directive being
expanded. */
static tree
get_ws_args_for (gimple ws_stmt)
{
tree t;
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
tree ws_args;
extract_omp_for_data (ws_stmt, &fd, NULL);
ws_args = NULL_TREE;
if (fd.chunk_size)
{
t = fold_convert (long_integer_type_node, fd.chunk_size);
ws_args = tree_cons (NULL, t, ws_args);
}
t = fold_convert (long_integer_type_node, fd.loop.step);
ws_args = tree_cons (NULL, t, ws_args);
t = fold_convert (long_integer_type_node, fd.loop.n2);
ws_args = tree_cons (NULL, t, ws_args);
t = fold_convert (long_integer_type_node, fd.loop.n1);
ws_args = tree_cons (NULL, t, ws_args);
return ws_args;
}
else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
{
/* Number of sections is equal to the number of edges from the
GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
the exit of the sections region. */
basic_block bb = single_succ (gimple_bb (ws_stmt));
t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
t = tree_cons (NULL, t, NULL);
return t;
}
gcc_unreachable ();
}
/* Discover whether REGION is a combined parallel+workshare region. */
static void
determine_parallel_type (struct omp_region *region)
{
basic_block par_entry_bb, par_exit_bb;
basic_block ws_entry_bb, ws_exit_bb;
if (region == NULL || region->inner == NULL
|| region->exit == NULL || region->inner->exit == NULL
|| region->inner->cont == NULL)
return;
/* We only support parallel+for and parallel+sections. */
if (region->type != GIMPLE_OMP_PARALLEL
|| (region->inner->type != GIMPLE_OMP_FOR
&& region->inner->type != GIMPLE_OMP_SECTIONS))
return;
/* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
WS_EXIT_BB -> PAR_EXIT_BB. */
par_entry_bb = region->entry;
par_exit_bb = region->exit;
ws_entry_bb = region->inner->entry;
ws_exit_bb = region->inner->exit;
if (single_succ (par_entry_bb) == ws_entry_bb
&& single_succ (ws_exit_bb) == par_exit_bb
&& workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
&& (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
{
gimple ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == GIMPLE_OMP_FOR)
{
/* If this is a combined parallel loop, we need to determine
whether or not to use the combined library calls. There
are two cases where we do not apply the transformation:
static loops and any kind of ordered loop. In the first
case, we already open code the loop so there is no need
to do anything else. In the latter case, the combined
parallel loop call would still need extra synchronization
to implement ordered semantics, so there would not be any
gain in using the combined call. */
tree clauses = gimple_omp_for_clauses (ws_stmt);
tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
if (c == NULL
|| OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
|| find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
{
region->is_combined_parallel = false;
region->inner->is_combined_parallel = false;
return;
}
}
region->is_combined_parallel = true;
region->inner->is_combined_parallel = true;
region->ws_args = get_ws_args_for (ws_stmt);
}
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Return true if DECL is a reference type. */
static inline bool
is_reference (tree decl)
{
return lang_hooks.decls.omp_privatize_by_reference (decl);
}
/* Lookup variables in the decl or field splay trees. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
tree *n;
n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
return *n;
}
static inline tree
maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n;
n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
return n ? *n : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
lookup_sfield (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->sfield_map
? ctx->sfield_map : ctx->field_map,
(splay_tree_key) var);
return (tree) n->value;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return n ? (tree) n->value : NULL_TREE;
}
/* Return true if DECL should be copied by pointer. SHARED_CTX is
the parallel context if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_ctx)
{
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
/* Disallow copy-in/out in nested parallel if
decl is shared in outer parallel, otherwise
each thread could store the shared variable
in its own copy-in location, making the
variable no longer really shared. */
if (!TREE_READONLY (decl) && shared_ctx->is_nested)
{
omp_context *up;
for (up = shared_ctx->outer; up; up = up->outer)
if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
break;
if (up)
{
tree c;
for (c = gimple_omp_taskreg_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
break;
if (c)
return true;
}
}
/* For tasks avoid using copy-in/out, unless they are readonly
(in which case just copy-in is used). As tasks can be
deferred or executed in different thread, when GOMP_task
returns, the task hasn't necessarily terminated. */
if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
{
tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
might need regimplification of everything that uses the
variable. */
if (!task_shared_vars)
task_shared_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (task_shared_vars, DECL_UID (outer));
TREE_ADDRESSABLE (outer) = 1;
}
return true;
}
}
return false;
}
/* Create a new VAR_DECL and copy information from VAR to it. */
tree
copy_var_decl (tree var, tree name, tree type)
{
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
DECL_CONTEXT (copy) = DECL_CONTEXT (var);
DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
TREE_USED (copy) = 1;
DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
return copy;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
TREE_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_fold_indirect_ref (ctx->receiver_decl);
x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
if (by_ref)
x = build_fold_indirect_ref (x);
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx)
{
tree x;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx);
x = build_fold_indirect_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if (ctx->outer)
x = lookup_decl (var, ctx->outer);
else if (is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else
gcc_unreachable ();
if (is_reference (var))
x = build_fold_indirect_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (tree var, omp_context *ctx)
{
tree field = lookup_sfield (var, ctx);
return build3 (COMPONENT_REF, TREE_TYPE (field),
ctx->sender_decl, field, NULL);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
static void
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
{
tree field, type, sfield = NULL_TREE;
gcc_assert ((mask & 1) == 0
|| !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
|| !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
type = TREE_TYPE (var);
if (by_ref)
type = build_pointer_type (type);
else if ((mask & 3) == 1 && is_reference (var))
type = TREE_TYPE (type);
field = build_decl (FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
if (type == TREE_TYPE (var))
{
DECL_ALIGN (field) = DECL_ALIGN (var);
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
}
else
DECL_ALIGN (field) = TYPE_ALIGN (type);
if ((mask & 3) == 3)
{
insert_field_into_struct (ctx->record_type, field);
if (ctx->srecord_type)
{
sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
DECL_ABSTRACT_ORIGIN (sfield) = var;
DECL_ALIGN (sfield) = DECL_ALIGN (field);
DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
insert_field_into_struct (ctx->srecord_type, sfield);
}
}
else
{
if (ctx->srecord_type == NULL_TREE)
{
tree t;
ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
{
sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
insert_field_into_struct (ctx->srecord_type, sfield);
splay_tree_insert (ctx->sfield_map,
(splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
(splay_tree_value) sfield);
}
}
sfield = field;
insert_field_into_struct ((mask & 1) ? ctx->record_type
: ctx->srecord_type, field);
}
if (mask & 1)
splay_tree_insert (ctx->field_map, (splay_tree_key) var,
(splay_tree_value) field);
if ((mask & 2) && ctx->sfield_map)
splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
(splay_tree_value) sfield);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
new_var = create_artificial_label ();
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Return the parallel region associated with STMT. */
/* Debugging dumps for parallel regions. */
void dump_omp_region (FILE *, struct omp_region *, int);
void debug_omp_region (struct omp_region *);
void debug_all_omp_regions (void);
/* Dump the parallel region tree rooted at REGION. */
void
dump_omp_region (FILE *file, struct omp_region *region, int indent)
{
fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
gimple_code_name[region->type]);
if (region->inner)
dump_omp_region (file, region->inner, indent + 4);
if (region->cont)
{
fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
region->cont->index);
}
if (region->exit)
fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
region->exit->index);
else
fprintf (file, "%*s[no exit marker]\n", indent, "");
if (region->next)
dump_omp_region (file, region->next, indent);
}
void
debug_omp_region (struct omp_region *region)
{
dump_omp_region (stderr, region, 0);
}
void
debug_all_omp_regions (void)
{
dump_omp_region (stderr, root_omp_region, 0);
}
/* Create a new parallel region starting at STMT inside region PARENT. */
struct omp_region *
new_omp_region (basic_block bb, enum gimple_code type,
struct omp_region *parent)
{
struct omp_region *region = XCNEW (struct omp_region);
region->outer = parent;
region->entry = bb;
region->type = type;
if (parent)
{
/* This is a nested region. Add it to the list of inner
regions in PARENT. */
region->next = parent->inner;
parent->inner = region;
}
else
{
/* This is a toplevel region. Add it to the list of toplevel
regions in ROOT_OMP_REGION. */
region->next = root_omp_region;
root_omp_region = region;
}
return region;
}
/* Release the memory associated with the region tree rooted at REGION. */
static void
free_omp_region_1 (struct omp_region *region)
{
struct omp_region *i, *n;
for (i = region->inner; i ; i = n)
{
n = i->next;
free_omp_region_1 (i);
}
free (region);
}
/* Release the memory for the entire omp region tree. */
void
free_omp_regions (void)
{
struct omp_region *r, *n;
for (r = root_omp_region; r ; r = n)
{
n = r->next;
free_omp_region_1 (r);
}
root_omp_region = NULL;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (gimple stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node (current_function_decl);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_region = -1;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->depth = 1;
}
ctx->cb.decl_map = pointer_map_create ();
return ctx;
}
static gimple_seq maybe_catch_exception (gimple_seq);
/* Finalize task copyfn. */
static void
finalize_task_copyfn (gimple task_stmt)
{
struct function *child_cfun;
tree child_fn, old_fn;
gimple_seq seq, new_seq;
gimple bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
if (child_fn == NULL_TREE)
return;
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
/* Inform the callgraph about the new function. */
DECL_STRUCT_FUNCTION (child_fn)->curr_properties
= cfun->curr_properties;
old_fn = current_function_decl;
push_cfun (child_cfun);
current_function_decl = child_fn;
bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
pop_cfun ();
current_function_decl = old_fn;
cgraph_add_new_function (child_fn, false);
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
pointer_map_destroy (ctx->cb.decl_map);
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
if (ctx->sfield_map)
splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (is_task_ctx (ctx))
finalize_task_copyfn (ctx->stmt);
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
TREE_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
goto do_private;
else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
by_ref = use_pointer_for_field (decl, ctx);
if (! TREE_READONLY (decl)
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| is_reference (decl))
{
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
do_private:
if (is_variable_sized (decl))
{
if (is_task_ctx (ctx))
install_var_field (decl, false, 1, ctx);
break;
}
else if (is_taskreg_ctx (ctx))
{
bool global
= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
if (is_task_ctx (ctx)
&& (global || by_ref || is_reference (decl)))
{
install_var_field (decl, false, 1, ctx);
if (!global)
install_var_field (decl, by_ref, 2, ctx);
}
else if (!global)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
/* FALLTHRU */
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
break;
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
break;
default:
gcc_unreachable ();
}
}
if (scan_array_reductions)
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
}
/* Create a new name for omp child function. Returns an identifier. */
static GTY(()) unsigned int tmp_ompfn_id_num;
static tree
create_omp_child_function_name (bool task_copy)
{
tree name = DECL_ASSEMBLER_NAME (current_function_decl);
size_t len = IDENTIFIER_LENGTH (name);
char *tmp_name, *prefix;
const char *suffix;
suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
memcpy (prefix, IDENTIFIER_POINTER (name), len);
strcpy (prefix + len, suffix);
#ifndef NO_DOT_IN_LABEL
prefix[len] = '.';
#elif !defined NO_DOLLAR_IN_LABEL
prefix[len] = '$';
#endif
ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
return get_identifier (tmp_name);
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
name = create_omp_child_function_name (task_copy);
if (task_copy)
type = build_function_type_list (void_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
else
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (FUNCTION_DECL, name, type);
decl = lang_hooks.decls.pushdecl (decl);
if (!task_copy)
ctx->cb.dst_fn = decl;
else
gimple_omp_task_set_copy_fn (ctx->stmt, decl);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
if (!task_copy)
ctx->receiver_decl = t;
else
{
t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
cfun->function_end_locus = gimple_location (ctx->stmt);
pop_cfun ();
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
gimple stmt = gsi_stmt (*gsi);
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& find_omp_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_COPYIN) == NULL)
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
ctx = new_omp_context (stmt, outer_ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
scan_omp (gimple_omp_body (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP task directive. */
static void
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name, t;
gimple stmt = gsi_stmt (*gsi);
/* Ignore task directives with empty bodies. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt)))
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
ctx = new_omp_context (stmt, outer_ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
if (ctx->srecord_type)
{
name = create_tmp_var_name (".omp_data_a");
name = build_decl (TYPE_DECL, name, ctx->srecord_type);
TYPE_NAME (ctx->srecord_type) = name;
create_omp_child_function (ctx, true);
}
scan_omp (gimple_omp_body (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
{
ctx->record_type = ctx->receiver_decl = NULL;
t = build_int_cst (long_integer_type_node, 0);
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node, 1);
gimple_omp_task_set_arg_align (stmt, t);
}
else
{
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
|| ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
{
*q = *p;
*p = TREE_CHAIN (*p);
TREE_CHAIN (*q) = NULL_TREE;
q = &TREE_CHAIN (*q);
}
else
p = &TREE_CHAIN (*p);
*p = vla_fields;
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
t = fold_convert (long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node,
TYPE_ALIGN_UNIT (ctx->record_type));
gimple_omp_task_set_arg_align (stmt, t);
}
}
/* Scan an OpenMP loop directive. */
static void
scan_omp_for (gimple stmt, omp_context *outer_ctx)
{
omp_context *ctx;
size_t i;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
scan_omp (gimple_omp_for_pre_body (stmt), ctx);
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
}
scan_omp (gimple_omp_body (stmt), ctx);
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (gimple stmt, omp_context *outer_ctx)
{
omp_context *ctx;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
scan_omp (gimple_omp_body (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (gimple stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
scan_omp (gimple_omp_body (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Check OpenMP nesting restrictions. */
static void
check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
{
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_CALL:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
if (is_gimple_call (stmt))
{
warning (0, "barrier region may not be closely nested inside "
"of work-sharing, critical, ordered, master or "
"explicit task region");
return;
}
warning (0, "work-sharing region may not be closely nested inside "
"of work-sharing, critical, ordered, master or explicit "
"task region");
return;
case GIMPLE_OMP_PARALLEL:
return;
default:
break;
}
break;
case GIMPLE_OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
warning (0, "master region may not be closely nested inside "
"of work-sharing or explicit task region");
return;
case GIMPLE_OMP_PARALLEL:
return;
default:
break;
}
break;
case GIMPLE_OMP_ORDERED:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
warning (0, "ordered region may not be closely nested inside "
"of critical or explicit task region");
return;
case GIMPLE_OMP_FOR:
if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED) == NULL)
warning (0, "ordered region must be closely nested inside "
"a loop region with an ordered clause");
return;
case GIMPLE_OMP_PARALLEL:
return;
default:
break;
}
break;
case GIMPLE_OMP_CRITICAL:
for (; ctx != NULL; ctx = ctx->outer)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
&& (gimple_omp_critical_name (stmt)
== gimple_omp_critical_name (ctx->stmt)))
{
warning (0, "critical region may not be nested inside a critical "
"region with the same name");
return;
}
break;
default:
break;
}
}
/* Helper function scan_omp.
Callback for walk_tree or operators in walk_gimple_stmt used to
scan for OpenMP directives in TP. */
static tree
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
*tp = remap_decl (t, &ctx->cb);
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
*walk_subtrees = 1;
break;
}
return NULL_TREE;
}
/* Helper function for scan_omp.
Callback for walk_gimple_stmt used to scan for OpenMP directives in
the current statement in GSI. */
static tree
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple stmt = gsi_stmt (*gsi);
omp_context *ctx = (omp_context *) wi->info;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
/* Check the OpenMP nesting restrictions. */
if (ctx != NULL)
{
if (is_gimple_omp (stmt))
check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
check_omp_nesting_restrictions (stmt, ctx);
}
}
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_OMP_PARALLEL:
taskreg_nesting_level++;
scan_omp_parallel (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_TASK:
taskreg_nesting_level++;
scan_omp_task (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_FOR:
scan_omp_for (stmt, ctx);
break;
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (stmt, ctx);
break;
case GIMPLE_OMP_SINGLE:
scan_omp_single (stmt, ctx);
break;
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body (stmt), ctx);
break;
case GIMPLE_BIND:
{
tree var;
*handled_ops_p = false;
if (ctx)
for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
default:
*handled_ops_p = false;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at the current statement. CTX
contains context information about the OpenMP directives and
clauses found during the scan. */
static void
scan_omp (gimple_seq body, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
saved_location = input_location;
walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Build a call to GOMP_barrier. */
static tree
build_omp_barrier (void)
{
return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (gimple stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (!ctx->is_nested || t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
switch (OMP_CLAUSE_REDUCTION_CODE (clause))
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return fold_convert (type, integer_zero_node);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert (type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert (type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (TYPE_MODE (type)))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (TYPE_MODE (type)))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx)
{
gimple_stmt_iterator diter;
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
int pass;
*dlist = gimple_seq_alloc ();
diter = gsi_start (*dlist);
copyin_seq = NULL;
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_REDUCTION:
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0)
continue;
}
break;
default:
continue;
}
new_var = var = OMP_CLAUSE_DECL (c);
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gimple stmt;
tree tmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
stmt
= gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
tmp = create_tmp_var_raw (ptr_type_node, NULL);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
gimple_seq_add_stmt (ilist, stmt);
x = fold_convert (TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
else if (is_reference (var))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. Note that this doesn't apply to
C++, since reference types are disallowed in data
sharing clauses there, except for NRV optimized
return values. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
x = build_fold_addr_expr (x);
}
else if (TREE_CONSTANT (x))
{
const char *name = NULL;
if (DECL_NAME (var))
name = IDENTIFIER_POINTER (DECL_NAME (new_var));
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
name);
gimple_add_tmp_var (x);
x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
}
else
{
x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
x = fold_convert (TREE_TYPE (new_var), x);
}
gimplify_assign (new_var, x, ilist);
new_var = build_fold_indirect_ref (new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
x = build_outer_var_ref (var, ctx);
else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
{
if (is_task_ctx (ctx))
x = build_receiver_ref (var, false, ctx);
else
x = build_outer_var_ref (var, ctx);
}
else
x = NULL;
x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
if (x)
gimplify_and_add (x, ilist);
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
if (is_reference (var) || is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
|| use_pointer_for_field (var, NULL))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
goto do_dtor;
}
}
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
gimplify_and_add (x, ilist);
goto do_dtor;
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, &copyin_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
x = build_fold_addr_expr (x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
gimple_seq_add_seq (ilist,
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
gimplify_assign (new_var, x, ilist);
}
break;
default:
gcc_unreachable ();
}
}
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. */
if (copyin_by_ref || lastprivate_firstprivate)
gimplify_and_add (build_omp_barrier (), ilist);
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
omp_context *ctx)
{
tree x, c, label = NULL;
bool par_clauses = false;
/* Early exit if there are no lastprivate clauses. */
clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
par_clauses = true;
}
if (predicate)
{
gimple stmt;
tree label_true, arm1, arm2;
label = create_artificial_label ();
label_true = create_artificial_label ();
arm1 = TREE_OPERAND (predicate, 0);
arm2 = TREE_OPERAND (predicate, 1);
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
label_true, label);
gimple_seq_add_stmt (stmt_list, stmt);
gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
}
for (c = clauses; c ;)
{
tree var, new_var;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
{
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
}
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
new_var = build_fold_indirect_ref (new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, stmt_list);
}
c = OMP_CLAUSE_CHAIN (c);
if (c == NULL && !par_clauses)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, continue looking for the
clauses also on the parallel statement itself. */
if (is_parallel_ctx (ctx))
break;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
break;
c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
par_clauses = true;
}
}
if (label)
gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
}
/* Generate code to implement the REDUCTION clauses. */
static void
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
gimple_seq sub_seq = NULL;
gimple stmt;
tree x, c;
int count = 0;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
/* Never use OMP_ATOMIC for array reductions. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var;
enum tree_code code;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
continue;
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
if (is_reference (var))
new_var = build_fold_indirect_ref (new_var);
ref = build_outer_var_ref (var, ctx);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr (ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
gimplify_and_add (x, stmt_seqp);
return;
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (is_reference (var))
ref = build_fold_addr_expr (ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &sub_seq);
}
}
stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, x;
bool by_ref;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = lookup_decl_in_outer_ctx (var, ctx);
x = by_ref ? build_fold_addr_expr (x) : x;
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, by_ref, ctx);
if (is_reference (var))
{
ref = build_fold_indirect_ref (ref);
var = build_fold_indirect_ref (var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
break;
default:
continue;
}
val = OMP_CLAUSE_DECL (c);
var = lookup_decl_in_outer_ctx (val, ctx);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var))
continue;
if (is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
{
do_out = true;
if (lang_hooks.decls.omp_private_outer_ref (val))
do_in = true;
}
break;
case OMP_CLAUSE_REDUCTION:
do_in = true;
do_out = !(by_ref || is_reference (val));
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr (var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
gimplify_assign (var, ref, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent)
side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
list things that got automatically shared. */
static void
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
{
tree var, ovar, nvar, f, x, record_type;
if (ctx->record_type == NULL)
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (x, var, ilist);
if (!TREE_READONLY (var)
/* We don't need to receive a new reference to a result
or parm decl. In fact we may not store to it as we will
invalidate any pending RSO and generate wrong gimple
during inlining. */
&& !((TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == PARM_DECL)
&& DECL_BY_REFERENCE (var)))
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (var, x, olist);
}
}
}
}
/* A convenience function to build an empty GIMPLE_COND with just the
condition. */
static gimple
gimple_build_cond_empty (tree cond)
{
enum tree_code pred_code;
tree lhs, rhs;
gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
}
/* Build the function calls to GOMP_parallel_start etc to actually
generate the parallel operation. REGION is the parallel region
being expanded. BB is the block where to insert the code. WS_ARGS
will be set if this is a call to a combined parallel+workshare
construct, it contains the list of additional arguments needed by
the workshare construct. */
static void
expand_parallel_call (struct omp_region *region, basic_block bb,
gimple entry_stmt, tree ws_args)
{
tree t, t1, t2, val, cond, c, clauses;
gimple_stmt_iterator gsi;
gimple stmt;
int start_ix;
clauses = gimple_omp_parallel_clauses (entry_stmt);
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
start_ix = BUILT_IN_GOMP_PARALLEL_START;
if (is_combined_parallel (region))
{
switch (region->inner->type)
{
case GIMPLE_OMP_FOR:
gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
+ (region->inner->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
? 3 : region->inner->sched_kind);
break;
case GIMPLE_OMP_SECTIONS:
start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
break;
default:
gcc_unreachable ();
}
}
/* By default, the value of NUM_THREADS is zero (selected at run time)
and there is no conditional. */
cond = NULL_TREE;
val = build_int_cst (unsigned_type_node, 0);
c = find_omp_clause (clauses, OMP_CLAUSE_IF);
if (c)
cond = OMP_CLAUSE_IF_EXPR (c);
c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
if (c)
val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
/* Ensure 'val' is of the correct type. */
val = fold_convert (unsigned_type_node, val);
/* If we found the clause 'if (cond)', build either
(cond != 0) or (cond ? val : 1u). */
if (cond)
{
gimple_stmt_iterator gsi;
cond = gimple_boolify (cond);
if (integer_zerop (val))
val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
edge e, e_then, e_else;
tree tmp_then, tmp_else, tmp_join, tmp_var;
tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
if (gimple_in_ssa_p (cfun))
{
tmp_then = make_ssa_name (tmp_var, NULL);
tmp_else = make_ssa_name (tmp_var, NULL);
tmp_join = make_ssa_name (tmp_var, NULL);
}
else
{
tmp_then = tmp_var;
tmp_else = tmp_var;
tmp_join = tmp_var;
}
e = split_block (bb, NULL);
cond_bb = e->src;
bb = e->dest;
remove_edge (e);
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
stmt = gimple_build_cond_empty (cond);
gsi = gsi_start_bb (cond_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi = gsi_start_bb (then_bb);
stmt = gimple_build_assign (tmp_then, val);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi = gsi_start_bb (else_bb);
stmt = gimple_build_assign
(tmp_else, build_int_cst (unsigned_type_node, 1));
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
if (gimple_in_ssa_p (cfun))
{
gimple phi = create_phi_node (tmp_join, bb);
SSA_NAME_DEF_STMT (tmp_join) = phi;
add_phi_arg (phi, tmp_then, e_then);
add_phi_arg (phi, tmp_else, e_else);
}
val = tmp_join;
}
gsi = gsi_start_bb (bb);
val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
gsi = gsi_last_bb (bb);
t = gimple_omp_parallel_data_arg (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
else
t1 = build_fold_addr_expr (t);
t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
if (ws_args)
{
tree args = tree_cons (NULL, t2,
tree_cons (NULL, t1,
tree_cons (NULL, val, ws_args)));
t = build_function_call_expr (built_in_decls[start_ix], args);
}
else
t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = gimple_omp_parallel_data_arg (entry_stmt);
if (t == NULL)
t = null_pointer_node;
else
t = build_fold_addr_expr (t);
t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* Build the function call to GOMP_task to actually
generate the task operation. BB is the block where to insert the code. */
static void
expand_task_call (basic_block bb, gimple entry_stmt)
{
tree t, t1, t2, t3, flags, cond, c, clauses;
gimple_stmt_iterator gsi;
clauses = gimple_omp_task_clauses (entry_stmt);
c = find_omp_clause (clauses, OMP_CLAUSE_IF);
if (c)
cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
else
cond = boolean_true_node;
c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
gsi = gsi_last_bb (bb);
t = gimple_omp_task_data_arg (entry_stmt);
if (t == NULL)
t2 = null_pointer_node;
else
t2 = build_fold_addr_expr (t);
t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
t = gimple_omp_task_copy_fn (entry_stmt);
if (t == NULL)
t3 = null_pointer_node;
else
t3 = build_fold_addr_expr (t);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
gimple_omp_task_arg_align (entry_stmt), cond, flags);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
catch handler and return it. This prevents programs from violating the
structured block semantics with throws. */
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
gimple f, t;
if (!flag_exceptions)
return body;
if (lang_protect_cleanup_actions)
t = lang_protect_cleanup_actions ();
else
t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
gimple_eh_filter_set_must_not_throw (f, true);
t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
GIMPLE_TRY_CATCH);
return gimple_seq_alloc_with_stmt (t);
}
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
static tree
list2chain (tree list)
{
tree t;
for (t = list; t; t = TREE_CHAIN (t))
{
tree var = TREE_VALUE (t);
if (TREE_CHAIN (t))
TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
else
TREE_CHAIN (var) = NULL_TREE;
}
return list ? TREE_VALUE (list) : NULL_TREE;
}
/* Remove barriers in REGION->EXIT's block. Note that this is only
valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
removed. */
static void
remove_exit_barrier (struct omp_region *region)
{
gimple_stmt_iterator gsi;
basic_block exit_bb;
edge_iterator ei;
edge e;
gimple stmt;
int any_addressable_vars = -1;
exit_bb = region->exit;
/* If the parallel region doesn't return, we don't have REGION->EXIT
block at all. */
if (! exit_bb)
return;
/* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
statements that can appear in between are extremely limited -- no
memory operations at all. Here, we allow nothing at all, so the
only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
gsi = gsi_last_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
gsi_prev (&gsi);
if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return;
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
gsi = gsi_last_bb (e->src);
if (gsi_end_p (gsi))
continue;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) == GIMPLE_OMP_RETURN
&& !gimple_omp_return_nowait_p (stmt))
{
/* OpenMP 3.0 tasks unfortunately prevent this optimization
in many cases. If there could be tasks queued, the barrier
might be needed to let the tasks run before some local
variable of the parallel that the task uses as shared
runs out of scope. The task can be spawned either
from within current function (this would be easy to check)
or from some function it calls and gets passed an address
of such a variable. */
if (any_addressable_vars < 0)
{
gimple parallel_stmt = last_stmt (region->entry);
tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
tree block;
any_addressable_vars = 0;
for (; local_decls; local_decls = TREE_CHAIN (local_decls))
if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
{
any_addressable_vars = 1;
break;
}
for (block = gimple_block (stmt);
!any_addressable_vars
&& block