blob: ae2921f808e5f22303cfeaf7657ce34364d82f19 [file] [log] [blame]
/* Passes for transactional memory support.
Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>
and Aldy Hernandez <aldyh@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "tree-eh.h"
#include "calls.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-inline.h"
#include "demangle.h"
#include "output.h"
#include "trans-mem.h"
#include "langhooks.h"
#include "cfgloop.h"
#include "tree-ssa-address.h"
#include "stringpool.h"
#include "attribs.h"
#include "alloc-pool.h"
#include "symbol-summary.h"
#include "symtab-thunks.h"
#define A_RUNINSTRUMENTEDCODE 0x0001
#define A_RUNUNINSTRUMENTEDCODE 0x0002
#define A_SAVELIVEVARIABLES 0x0004
#define A_RESTORELIVEVARIABLES 0x0008
#define A_ABORTTRANSACTION 0x0010
#define AR_USERABORT 0x0001
#define AR_USERRETRY 0x0002
#define AR_TMCONFLICT 0x0004
#define AR_EXCEPTIONBLOCKABORT 0x0008
#define AR_OUTERABORT 0x0010
#define MODE_SERIALIRREVOCABLE 0x0000
/* The representation of a transaction changes several times during the
lowering process. In the beginning, in the front-end we have the
GENERIC tree TRANSACTION_EXPR. For example,
__transaction {
local++;
if (++global == 10)
__tm_abort;
}
During initial gimplification (gimplify.cc) the TRANSACTION_EXPR node is
trivially replaced with a GIMPLE_TRANSACTION node.
During pass_lower_tm, we examine the body of transactions looking
for aborts. Transactions that do not contain an abort may be
merged into an outer transaction. We also add a TRY-FINALLY node
to arrange for the transaction to be committed on any exit.
[??? Think about how this arrangement affects throw-with-commit
and throw-with-abort operations. In this case we want the TRY to
handle gotos, but not to catch any exceptions because the transaction
will already be closed.]
GIMPLE_TRANSACTION [label=NULL] {
try {
local = local + 1;
t0 = global;
t1 = t0 + 1;
global = t1;
if (t1 == 10)
__builtin___tm_abort ();
} finally {
__builtin___tm_commit ();
}
}
During pass_lower_eh, we create EH regions for the transactions,
intermixed with the regular EH stuff. This gives us a nice persistent
mapping (all the way through rtl) from transactional memory operation
back to the transaction, which allows us to get the abnormal edges
correct to model transaction aborts and restarts:
GIMPLE_TRANSACTION [label=over]
local = local + 1;
t0 = global;
t1 = t0 + 1;
global = t1;
if (t1 == 10)
__builtin___tm_abort ();
__builtin___tm_commit ();
over:
This is the end of all_lowering_passes, and so is what is present
during the IPA passes, and through all of the optimization passes.
During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
functions and mark functions for cloning.
At the end of gimple optimization, before exiting SSA form,
pass_tm_edges replaces statements that perform transactional
memory operations with the appropriate TM builtins, and swap
out function calls with their transactional clones. At this
point we introduce the abnormal transaction restart edges and
complete lowering of the GIMPLE_TRANSACTION node.
x = __builtin___tm_start (MAY_ABORT);
eh_label:
if (x & abort_transaction)
goto over;
local = local + 1;
t0 = __builtin___tm_load (global);
t1 = t0 + 1;
__builtin___tm_store (&global, t1);
if (t1 == 10)
__builtin___tm_abort ();
__builtin___tm_commit ();
over:
*/
static void *expand_regions (struct tm_region *,
void *(*callback)(struct tm_region *, void *),
void *, bool);
/* Return the attributes we want to examine for X, or NULL if it's not
something we examine. We look at function types, but allow pointers
to function types and function decls and peek through. */
static tree
get_attrs_for (const_tree x)
{
if (x == NULL_TREE)
return NULL_TREE;
switch (TREE_CODE (x))
{
case FUNCTION_DECL:
return TYPE_ATTRIBUTES (TREE_TYPE (x));
default:
if (TYPE_P (x))
return NULL_TREE;
x = TREE_TYPE (x);
if (TREE_CODE (x) != POINTER_TYPE)
return NULL_TREE;
/* FALLTHRU */
case POINTER_TYPE:
x = TREE_TYPE (x);
if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
return NULL_TREE;
/* FALLTHRU */
case FUNCTION_TYPE:
case METHOD_TYPE:
return TYPE_ATTRIBUTES (x);
}
}
/* Return true if X has been marked TM_PURE. */
bool
is_tm_pure (const_tree x)
{
unsigned flags;
switch (TREE_CODE (x))
{
case FUNCTION_DECL:
case FUNCTION_TYPE:
case METHOD_TYPE:
break;
default:
if (TYPE_P (x))
return false;
x = TREE_TYPE (x);
if (TREE_CODE (x) != POINTER_TYPE)
return false;
/* FALLTHRU */
case POINTER_TYPE:
x = TREE_TYPE (x);
if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
return false;
break;
}
flags = flags_from_decl_or_type (x);
return (flags & ECF_TM_PURE) != 0;
}
/* Return true if X has been marked TM_IRREVOCABLE. */
static bool
is_tm_irrevocable (tree x)
{
tree attrs = get_attrs_for (x);
if (attrs && lookup_attribute ("transaction_unsafe", attrs))
return true;
/* A call to the irrevocable builtin is by definition,
irrevocable. */
if (TREE_CODE (x) == ADDR_EXPR)
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == FUNCTION_DECL
&& fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE))
return true;
return false;
}
/* Return true if X has been marked TM_SAFE. */
bool
is_tm_safe (const_tree x)
{
if (flag_tm)
{
tree attrs = get_attrs_for (x);
if (attrs)
{
if (lookup_attribute ("transaction_safe", attrs))
return true;
if (lookup_attribute ("transaction_may_cancel_outer", attrs))
return true;
}
}
return false;
}
/* Return true if CALL is const, or tm_pure. */
static bool
is_tm_pure_call (gimple *call)
{
return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
}
/* Return true if X has been marked TM_CALLABLE. */
static bool
is_tm_callable (tree x)
{
tree attrs = get_attrs_for (x);
if (attrs)
{
if (lookup_attribute ("transaction_callable", attrs))
return true;
if (lookup_attribute ("transaction_safe", attrs))
return true;
if (lookup_attribute ("transaction_may_cancel_outer", attrs))
return true;
}
return false;
}
/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
bool
is_tm_may_cancel_outer (tree x)
{
tree attrs = get_attrs_for (x);
if (attrs)
return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
return false;
}
/* Return true for built in functions that "end" a transaction. */
bool
is_tm_ending_fndecl (tree fndecl)
{
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_TM_COMMIT:
case BUILT_IN_TM_COMMIT_EH:
case BUILT_IN_TM_ABORT:
case BUILT_IN_TM_IRREVOCABLE:
return true;
default:
break;
}
return false;
}
/* Return true if STMT is a built in function call that "ends" a
transaction. */
bool
is_tm_ending (gimple *stmt)
{
tree fndecl;
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
fndecl = gimple_call_fndecl (stmt);
return (fndecl != NULL_TREE
&& is_tm_ending_fndecl (fndecl));
}
/* Return true if STMT is a TM load. */
static bool
is_tm_load (gimple *stmt)
{
tree fndecl;
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
fndecl = gimple_call_fndecl (stmt);
return (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
}
/* Same as above, but for simple TM loads, that is, not the
after-write, after-read, etc optimized variants. */
static bool
is_tm_simple_load (gimple *stmt)
{
tree fndecl;
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
fndecl = gimple_call_fndecl (stmt);
if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_LOAD_1
|| fcode == BUILT_IN_TM_LOAD_2
|| fcode == BUILT_IN_TM_LOAD_4
|| fcode == BUILT_IN_TM_LOAD_8
|| fcode == BUILT_IN_TM_LOAD_FLOAT
|| fcode == BUILT_IN_TM_LOAD_DOUBLE
|| fcode == BUILT_IN_TM_LOAD_LDOUBLE
|| fcode == BUILT_IN_TM_LOAD_M64
|| fcode == BUILT_IN_TM_LOAD_M128
|| fcode == BUILT_IN_TM_LOAD_M256);
}
return false;
}
/* Return true if STMT is a TM store. */
static bool
is_tm_store (gimple *stmt)
{
tree fndecl;
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
fndecl = gimple_call_fndecl (stmt);
return (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
}
/* Same as above, but for simple TM stores, that is, not the
after-write, after-read, etc optimized variants. */
static bool
is_tm_simple_store (gimple *stmt)
{
tree fndecl;
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
fndecl = gimple_call_fndecl (stmt);
if (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_STORE_1
|| fcode == BUILT_IN_TM_STORE_2
|| fcode == BUILT_IN_TM_STORE_4
|| fcode == BUILT_IN_TM_STORE_8
|| fcode == BUILT_IN_TM_STORE_FLOAT
|| fcode == BUILT_IN_TM_STORE_DOUBLE
|| fcode == BUILT_IN_TM_STORE_LDOUBLE
|| fcode == BUILT_IN_TM_STORE_M64
|| fcode == BUILT_IN_TM_STORE_M128
|| fcode == BUILT_IN_TM_STORE_M256);
}
return false;
}
/* Return true if FNDECL is BUILT_IN_TM_ABORT. */
static bool
is_tm_abort (tree fndecl)
{
return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT));
}
/* Build a GENERIC tree for a user abort. This is called by front ends
while transforming the __tm_abort statement. */
tree
build_tm_abort_call (location_t loc, bool is_outer)
{
return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
build_int_cst (integer_type_node,
AR_USERABORT
| (is_outer ? AR_OUTERABORT : 0)));
}
/* Map for arbitrary function replacement under TM, as created
by the tm_wrap attribute. */
struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
{
static inline hashval_t hash (tree_map *m) { return m->hash; }
static inline bool
equal (tree_map *a, tree_map *b)
{
return a->base.from == b->base.from;
}
static int
keep_cache_entry (tree_map *&m)
{
return ggc_marked_p (m->base.from);
}
};
static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
void
record_tm_replacement (tree from, tree to)
{
struct tree_map **slot, *h;
/* Do not inline wrapper functions that will get replaced in the TM
pass.
Suppose you have foo() that will get replaced into tmfoo(). Make
sure the inliner doesn't try to outsmart us and inline foo()
before we get a chance to do the TM replacement. */
DECL_UNINLINABLE (from) = 1;
if (tm_wrap_map == NULL)
tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
h = ggc_alloc<tree_map> ();
h->hash = htab_hash_pointer (from);
h->base.from = from;
h->to = to;
slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
*slot = h;
}
/* Return a TM-aware replacement function for DECL. */
static tree
find_tm_replacement_function (tree fndecl)
{
if (tm_wrap_map)
{
struct tree_map *h, in;
in.base.from = fndecl;
in.hash = htab_hash_pointer (fndecl);
h = tm_wrap_map->find_with_hash (&in, in.hash);
if (h)
return h->to;
}
/* ??? We may well want TM versions of most of the common <string.h>
functions. For now, we've already these two defined. */
/* Adjust expand_call_tm() attributes as necessary for the cases
handled here: */
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_MEMCPY:
return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
case BUILT_IN_MEMMOVE:
return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
case BUILT_IN_MEMSET:
return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
default:
return NULL;
}
return NULL;
}
/* When appropriate, record TM replacement for memory allocation functions.
FROM is the FNDECL to wrap. */
void
tm_malloc_replacement (tree from)
{
const char *str;
tree to;
if (TREE_CODE (from) != FUNCTION_DECL)
return;
/* If we have a previous replacement, the user must be explicitly
wrapping malloc/calloc/free. They better know what they're
doing... */
if (find_tm_replacement_function (from))
return;
str = IDENTIFIER_POINTER (DECL_NAME (from));
if (!strcmp (str, "malloc"))
to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
else if (!strcmp (str, "calloc"))
to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
else if (!strcmp (str, "free"))
to = builtin_decl_explicit (BUILT_IN_TM_FREE);
else
return;
TREE_NOTHROW (to) = 0;
record_tm_replacement (from, to);
}
/* Diagnostics for tm_safe functions/regions. Called by the front end
once we've lowered the function to high-gimple. */
/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
Process exactly one statement. WI->INFO is set to non-null when in
the context of a tm_safe function, and null for a __transaction block. */
#define DIAG_TM_OUTER 1
#define DIAG_TM_SAFE 2
#define DIAG_TM_RELAXED 4
struct diagnose_tm
{
unsigned int summary_flags : 8;
unsigned int block_flags : 8;
unsigned int func_flags : 8;
unsigned int saw_volatile : 1;
gimple *stmt;
};
/* Return true if T is a volatile lvalue of some kind. */
static bool
volatile_lvalue_p (tree t)
{
return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
&& TREE_THIS_VOLATILE (TREE_TYPE (t)));
}
/* Tree callback function for diagnose_tm pass. */
static tree
diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
if (TYPE_P (*tp))
*walk_subtrees = false;
else if (volatile_lvalue_p (*tp)
&& !d->saw_volatile)
{
d->saw_volatile = 1;
if (d->block_flags & DIAG_TM_SAFE)
error_at (gimple_location (d->stmt),
"invalid use of volatile lvalue inside transaction");
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (d->stmt),
"invalid use of volatile lvalue inside %<transaction_safe%> "
"function");
}
return NULL_TREE;
}
static inline bool
is_tm_safe_or_pure (const_tree x)
{
return is_tm_safe (x) || is_tm_pure (x);
}
static tree
diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi);
struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
/* Save stmt for use in leaf analysis. */
d->stmt = stmt;
switch (gimple_code (stmt))
{
case GIMPLE_CALL:
{
tree fn = gimple_call_fn (stmt);
if ((d->summary_flags & DIAG_TM_OUTER) == 0
&& is_tm_may_cancel_outer (fn))
error_at (gimple_location (stmt),
"%<transaction_may_cancel_outer%> function call not within"
" outer transaction or %<transaction_may_cancel_outer%>");
if (d->summary_flags & DIAG_TM_SAFE)
{
bool is_safe, direct_call_p;
tree replacement;
if (TREE_CODE (fn) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
{
direct_call_p = true;
replacement = TREE_OPERAND (fn, 0);
replacement = find_tm_replacement_function (replacement);
if (replacement)
fn = replacement;
}
else
{
direct_call_p = false;
replacement = NULL_TREE;
}
if (is_tm_safe_or_pure (fn))
is_safe = true;
else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
{
/* A function explicitly marked transaction_callable as
opposed to transaction_safe is being defined to be
unsafe as part of its ABI, regardless of its contents. */
is_safe = false;
}
else if (direct_call_p)
{
if (IS_TYPE_OR_DECL_P (fn)
&& flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
is_safe = true;
else if (replacement)
{
/* ??? At present we've been considering replacements
merely transaction_callable, and therefore might
enter irrevocable. The tm_wrap attribute has not
yet made it into the new language spec. */
is_safe = false;
}
else
{
/* ??? Diagnostics for unmarked direct calls moved into
the IPA pass. Section 3.2 of the spec details how
functions not marked should be considered "implicitly
safe" based on having examined the function body. */
is_safe = true;
}
}
else
{
/* An unmarked indirect call. Consider it unsafe even
though optimization may yet figure out how to inline. */
is_safe = false;
}
if (!is_safe)
{
if (TREE_CODE (fn) == ADDR_EXPR)
fn = TREE_OPERAND (fn, 0);
if (d->block_flags & DIAG_TM_SAFE)
{
if (direct_call_p)
error_at (gimple_location (stmt),
"unsafe function call %qD within "
"atomic transaction", fn);
else
{
if ((!DECL_P (fn) || DECL_NAME (fn))
&& TREE_CODE (fn) != SSA_NAME)
error_at (gimple_location (stmt),
"unsafe function call %qE within "
"atomic transaction", fn);
else
error_at (gimple_location (stmt),
"unsafe indirect function call within "
"atomic transaction");
}
}
else
{
if (direct_call_p)
error_at (gimple_location (stmt),
"unsafe function call %qD within "
"%<transaction_safe%> function", fn);
else
{
if ((!DECL_P (fn) || DECL_NAME (fn))
&& TREE_CODE (fn) != SSA_NAME)
error_at (gimple_location (stmt),
"unsafe function call %qE within "
"%<transaction_safe%> function", fn);
else
error_at (gimple_location (stmt),
"unsafe indirect function call within "
"%<transaction_safe%> function");
}
}
}
}
}
break;
case GIMPLE_ASM:
/* ??? We ought to come up with a way to add attributes to
asm statements, and then add "transaction_safe" to it.
Either that or get the language spec to resurrect __tm_waiver. */
if (d->block_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"%<asm%> not allowed in atomic transaction");
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"%<asm%> not allowed in %<transaction_safe%> function");
break;
case GIMPLE_TRANSACTION:
{
gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
unsigned char inner_flags = DIAG_TM_SAFE;
if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
{
if (d->block_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"relaxed transaction in atomic transaction");
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"relaxed transaction in %<transaction_safe%> function");
inner_flags = DIAG_TM_RELAXED;
}
else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
{
if (d->block_flags)
error_at (gimple_location (stmt),
"outer transaction in transaction");
else if (d->func_flags & DIAG_TM_OUTER)
error_at (gimple_location (stmt),
"outer transaction in "
"%<transaction_may_cancel_outer%> function");
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"outer transaction in %<transaction_safe%> function");
inner_flags |= DIAG_TM_OUTER;
}
*handled_ops_p = true;
if (gimple_transaction_body (trans_stmt))
{
struct walk_stmt_info wi_inner;
struct diagnose_tm d_inner;
memset (&d_inner, 0, sizeof (d_inner));
d_inner.func_flags = d->func_flags;
d_inner.block_flags = d->block_flags | inner_flags;
d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
memset (&wi_inner, 0, sizeof (wi_inner));
wi_inner.info = &d_inner;
walk_gimple_seq (gimple_transaction_body (trans_stmt),
diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
}
}
break;
default:
break;
}
return NULL_TREE;
}
static unsigned int
diagnose_tm_blocks (void)
{
struct walk_stmt_info wi;
struct diagnose_tm d;
memset (&d, 0, sizeof (d));
if (is_tm_may_cancel_outer (current_function_decl))
d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
else if (is_tm_safe (current_function_decl))
d.func_flags = DIAG_TM_SAFE;
d.summary_flags = d.func_flags;
memset (&wi, 0, sizeof (wi));
wi.info = &d;
walk_gimple_seq (gimple_body (current_function_decl),
diagnose_tm_1, diagnose_tm_1_op, &wi);
return 0;
}
namespace {
const pass_data pass_data_diagnose_tm_blocks =
{
GIMPLE_PASS, /* type */
"*diagnose_tm_blocks", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TRANS_MEM, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_diagnose_tm_blocks : public gimple_opt_pass
{
public:
pass_diagnose_tm_blocks (gcc::context *ctxt)
: gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return flag_tm; }
virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
}; // class pass_diagnose_tm_blocks
} // anon namespace
gimple_opt_pass *
make_pass_diagnose_tm_blocks (gcc::context *ctxt)
{
return new pass_diagnose_tm_blocks (ctxt);
}
/* Instead of instrumenting thread private memory, we save the
addresses in a log which we later use to save/restore the addresses
upon transaction start/restart.
The log is keyed by address, where each element contains individual
statements among different code paths that perform the store.
This log is later used to generate either plain save/restore of the
addresses upon transaction start/restart, or calls to the ITM_L*
logging functions.
So for something like:
struct large { int x[1000]; };
struct large lala = { 0 };
__transaction {
lala.x[i] = 123;
...
}
We can either save/restore:
lala = { 0 };
trxn = _ITM_startTransaction ();
if (trxn & a_saveLiveVariables)
tmp_lala1 = lala.x[i];
else if (a & a_restoreLiveVariables)
lala.x[i] = tmp_lala1;
or use the logging functions:
lala = { 0 };
trxn = _ITM_startTransaction ();
_ITM_LU4 (&lala.x[i]);
Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
far up the dominator tree to shadow all of the writes to a given
location (thus reducing the total number of logging calls), but not
so high as to be called on a path that does not perform a
write. */
/* One individual log entry. We may have multiple statements for the
same location if neither dominate each other (on different
execution paths). */
struct tm_log_entry
{
/* Address to save. */
tree addr;
/* Entry block for the transaction this address occurs in. */
basic_block entry_block;
/* Dominating statements the store occurs in. */
vec<gimple *> stmts;
/* Initially, while we are building the log, we place a nonzero
value here to mean that this address *will* be saved with a
save/restore sequence. Later, when generating the save sequence
we place the SSA temp generated here. */
tree save_var;
};
/* Log entry hashtable helpers. */
struct log_entry_hasher : pointer_hash <tm_log_entry>
{
static inline hashval_t hash (const tm_log_entry *);
static inline bool equal (const tm_log_entry *, const tm_log_entry *);
static inline void remove (tm_log_entry *);
};
/* Htab support. Return hash value for a `tm_log_entry'. */
inline hashval_t
log_entry_hasher::hash (const tm_log_entry *log)
{
return iterative_hash_expr (log->addr, 0);
}
/* Htab support. Return true if two log entries are the same. */
inline bool
log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
{
/* FIXME:
rth: I suggest that we get rid of the component refs etc.
I.e. resolve the reference to base + offset.
We may need to actually finish a merge with mainline for this,
since we'd like to be presented with Richi's MEM_REF_EXPRs more
often than not. But in the meantime your tm_log_entry could save
the results of get_inner_reference.
See: g++.dg/tm/pr46653.C
*/
/* Special case plain equality because operand_equal_p() below will
return FALSE if the addresses are equal but they have
side-effects (e.g. a volatile address). */
if (log1->addr == log2->addr)
return true;
return operand_equal_p (log1->addr, log2->addr, 0);
}
/* Htab support. Free one tm_log_entry. */
inline void
log_entry_hasher::remove (tm_log_entry *lp)
{
lp->stmts.release ();
free (lp);
}
/* The actual log. */
static hash_table<log_entry_hasher> *tm_log;
/* Addresses to log with a save/restore sequence. These should be in
dominator order. */
static vec<tree> tm_log_save_addresses;
enum thread_memory_type
{
mem_non_local = 0,
mem_thread_local,
mem_transaction_local,
mem_max
};
struct tm_new_mem_map
{
/* SSA_NAME being dereferenced. */
tree val;
enum thread_memory_type local_new_memory;
};
/* Hashtable helpers. */
struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
{
static inline hashval_t hash (const tm_new_mem_map *);
static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
};
inline hashval_t
tm_mem_map_hasher::hash (const tm_new_mem_map *v)
{
return (intptr_t)v->val >> 4;
}
inline bool
tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
{
return v->val == c->val;
}
/* Map for an SSA_NAME originally pointing to a non aliased new piece
of memory (malloc, alloc, etc). */
static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
/* Initialize logging data structures. */
static void
tm_log_init (void)
{
tm_log = new hash_table<log_entry_hasher> (10);
tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
tm_log_save_addresses.create (5);
}
/* Free logging data structures. */
static void
tm_log_delete (void)
{
delete tm_log;
tm_log = NULL;
delete tm_new_mem_hash;
tm_new_mem_hash = NULL;
tm_log_save_addresses.release ();
}
/* Return true if MEM is a transaction invariant memory for the TM
region starting at REGION_ENTRY_BLOCK. */
static bool
transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
{
if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
&& TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
{
basic_block def_bb;
def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
return def_bb != region_entry_block
&& dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
}
mem = strip_invariant_refs (mem);
return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
}
/* Given an address ADDR in STMT, find it in the memory log or add it,
making sure to keep only the addresses highest in the dominator
tree.
ENTRY_BLOCK is the entry_block for the transaction.
If we find the address in the log, make sure it's either the same
address, or an equivalent one that dominates ADDR.
If we find the address, but neither ADDR dominates the found
address, nor the found one dominates ADDR, we're on different
execution paths. Add it.
If known, ENTRY_BLOCK is the entry block for the region, otherwise
NULL. */
static void
tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
{
tm_log_entry **slot;
struct tm_log_entry l, *lp;
l.addr = addr;
slot = tm_log->find_slot (&l, INSERT);
if (!*slot)
{
tree type = TREE_TYPE (addr);
lp = XNEW (struct tm_log_entry);
lp->addr = addr;
*slot = lp;
/* Small invariant addresses can be handled as save/restores. */
if (entry_block
&& transaction_invariant_address_p (lp->addr, entry_block)
&& TYPE_SIZE_UNIT (type) != NULL
&& tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
&& ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
< param_tm_max_aggregate_size)
/* We must be able to copy this type normally. I.e., no
special constructors and the like. */
&& !TREE_ADDRESSABLE (type))
{
lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
lp->stmts.create (0);
lp->entry_block = entry_block;
/* Save addresses separately in dominator order so we don't
get confused by overlapping addresses in the save/restore
sequence. */
tm_log_save_addresses.safe_push (lp->addr);
}
else
{
/* Use the logging functions. */
lp->stmts.create (5);
lp->stmts.quick_push (stmt);
lp->save_var = NULL;
}
}
else
{
size_t i;
gimple *oldstmt;
lp = *slot;
/* If we're generating a save/restore sequence, we don't care
about statements. */
if (lp->save_var)
return;
for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
{
if (stmt == oldstmt)
return;
/* We already have a store to the same address, higher up the
dominator tree. Nothing to do. */
if (dominated_by_p (CDI_DOMINATORS,
gimple_bb (stmt), gimple_bb (oldstmt)))
return;
/* We should be processing blocks in dominator tree order. */
gcc_assert (!dominated_by_p (CDI_DOMINATORS,
gimple_bb (oldstmt), gimple_bb (stmt)));
}
/* Store is on a different code path. */
lp->stmts.safe_push (stmt);
}
}
/* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
result, insert the new statements before GSI. */
static tree
gimplify_addr (gimple_stmt_iterator *gsi, tree x)
{
if (TREE_CODE (x) == TARGET_MEM_REF)
x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
else
x = build_fold_addr_expr (x);
return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
}
/* Instrument one address with the logging functions.
ADDR is the address to save.
STMT is the statement before which to place it. */
static void
tm_log_emit_stmt (tree addr, gimple *stmt)
{
tree type = TREE_TYPE (addr);
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
gimple *log;
enum built_in_function code = BUILT_IN_TM_LOG;
if (type == float_type_node)
code = BUILT_IN_TM_LOG_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_LOG_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOG_LDOUBLE;
else if (TYPE_SIZE (type) != NULL
&& tree_fits_uhwi_p (TYPE_SIZE (type)))
{
unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
if (TREE_CODE (type) == VECTOR_TYPE)
{
switch (type_size)
{
case 64:
code = BUILT_IN_TM_LOG_M64;
break;
case 128:
code = BUILT_IN_TM_LOG_M128;
break;
case 256:
code = BUILT_IN_TM_LOG_M256;
break;
default:
goto unhandled_vec;
}
if (!builtin_decl_explicit_p (code))
goto unhandled_vec;
}
else
{
unhandled_vec:
switch (type_size)
{
case 8:
code = BUILT_IN_TM_LOG_1;
break;
case 16:
code = BUILT_IN_TM_LOG_2;
break;
case 32:
code = BUILT_IN_TM_LOG_4;
break;
case 64:
code = BUILT_IN_TM_LOG_8;
break;
}
}
}
if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
code = BUILT_IN_TM_LOG;
tree decl = builtin_decl_explicit (code);
addr = gimplify_addr (&gsi, addr);
if (code == BUILT_IN_TM_LOG)
log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
else
log = gimple_build_call (decl, 1, addr);
gsi_insert_before (&gsi, log, GSI_SAME_STMT);
}
/* Go through the log and instrument address that must be instrumented
with the logging functions. Leave the save/restore addresses for
later. */
static void
tm_log_emit (void)
{
hash_table<log_entry_hasher>::iterator hi;
struct tm_log_entry *lp;
FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
{
size_t i;
gimple *stmt;
if (dump_file)
{
fprintf (dump_file, "TM thread private mem logging: ");
print_generic_expr (dump_file, lp->addr);
fprintf (dump_file, "\n");
}
if (lp->save_var)
{
if (dump_file)
fprintf (dump_file, "DUMPING to variable\n");
continue;
}
else
{
if (dump_file)
fprintf (dump_file, "DUMPING with logging functions\n");
for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
tm_log_emit_stmt (lp->addr, stmt);
}
}
}
/* Emit the save sequence for the corresponding addresses in the log.
ENTRY_BLOCK is the entry block for the transaction.
BB is the basic block to insert the code in. */
static void
tm_log_emit_saves (basic_block entry_block, basic_block bb)
{
size_t i;
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple *stmt;
struct tm_log_entry l, *lp;
for (i = 0; i < tm_log_save_addresses.length (); ++i)
{
l.addr = tm_log_save_addresses[i];
lp = *(tm_log->find_slot (&l, NO_INSERT));
gcc_assert (lp->save_var != NULL);
/* We only care about variables in the current transaction. */
if (lp->entry_block != entry_block)
continue;
stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
/* Make sure we can create an SSA_NAME for this type. For
instance, aggregates aren't allowed, in which case the system
will create a VOP for us and everything will just work. */
if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
{
lp->save_var = make_ssa_name (lp->save_var, stmt);
gimple_assign_set_lhs (stmt, lp->save_var);
}
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
}
}
/* Emit the restore sequence for the corresponding addresses in the log.
ENTRY_BLOCK is the entry block for the transaction.
BB is the basic block to insert the code in. */
static void
tm_log_emit_restores (basic_block entry_block, basic_block bb)
{
int i;
struct tm_log_entry l, *lp;
gimple_stmt_iterator gsi;
gimple *stmt;
for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
{
l.addr = tm_log_save_addresses[i];
lp = *(tm_log->find_slot (&l, NO_INSERT));
gcc_assert (lp->save_var != NULL);
/* We only care about variables in the current transaction. */
if (lp->entry_block != entry_block)
continue;
/* Restores are in LIFO order from the saves in case we have
overlaps. */
gsi = gsi_start_bb (bb);
stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
}
}
static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
/* Evaluate an address X being dereferenced and determine if it
originally points to a non aliased new chunk of memory (malloc,
alloca, etc).
Return MEM_THREAD_LOCAL if it points to a thread-local address.
Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
Return MEM_NON_LOCAL otherwise.
ENTRY_BLOCK is the entry block to the transaction containing the
dereference of X. */
static enum thread_memory_type
thread_private_new_memory (basic_block entry_block, tree x)
{
gimple *stmt = NULL;
enum tree_code code;
tm_new_mem_map **slot;
tm_new_mem_map elt, *elt_p;
tree val = x;
enum thread_memory_type retval = mem_transaction_local;
if (!entry_block
|| TREE_CODE (x) != SSA_NAME
/* Possible uninitialized use, or a function argument. In
either case, we don't care. */
|| SSA_NAME_IS_DEFAULT_DEF (x))
return mem_non_local;
/* Look in cache first. */
elt.val = x;
slot = tm_new_mem_hash->find_slot (&elt, INSERT);
elt_p = *slot;
if (elt_p)
return elt_p->local_new_memory;
/* Optimistically assume the memory is transaction local during
processing. This catches recursion into this variable. */
*slot = elt_p = XNEW (tm_new_mem_map);
elt_p->val = val;
elt_p->local_new_memory = mem_transaction_local;
/* Search DEF chain to find the original definition of this address. */
do
{
if (ptr_deref_may_alias_global_p (x, true))
{
/* Address escapes. This is not thread-private. */
retval = mem_non_local;
goto new_memory_ret;
}
stmt = SSA_NAME_DEF_STMT (x);
/* If the malloc call is outside the transaction, this is
thread-local. */
if (retval != mem_thread_local
&& !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
retval = mem_thread_local;
if (is_gimple_assign (stmt))
{
code = gimple_assign_rhs_code (stmt);
/* x = foo ==> foo */
if (code == SSA_NAME)
x = gimple_assign_rhs1 (stmt);
/* x = foo + n ==> foo */
else if (code == POINTER_PLUS_EXPR)
x = gimple_assign_rhs1 (stmt);
/* x = (cast*) foo ==> foo */
else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
x = gimple_assign_rhs1 (stmt);
/* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
else if (code == COND_EXPR)
{
tree op1 = gimple_assign_rhs2 (stmt);
tree op2 = gimple_assign_rhs3 (stmt);
enum thread_memory_type mem;
retval = thread_private_new_memory (entry_block, op1);
if (retval == mem_non_local)
goto new_memory_ret;
mem = thread_private_new_memory (entry_block, op2);
retval = MIN (retval, mem);
goto new_memory_ret;
}
else
{
retval = mem_non_local;
goto new_memory_ret;
}
}
else
{
if (gimple_code (stmt) == GIMPLE_PHI)
{
unsigned int i;
enum thread_memory_type mem;
tree phi_result = gimple_phi_result (stmt);
/* If any of the ancestors are non-local, we are sure to
be non-local. Otherwise we can avoid doing anything
and inherit what has already been generated. */
retval = mem_max;
for (i = 0; i < gimple_phi_num_args (stmt); ++i)
{
tree op = PHI_ARG_DEF (stmt, i);
/* Exclude self-assignment. */
if (phi_result == op)
continue;
mem = thread_private_new_memory (entry_block, op);
if (mem == mem_non_local)
{
retval = mem;
goto new_memory_ret;
}
retval = MIN (retval, mem);
}
goto new_memory_ret;
}
break;
}
}
while (TREE_CODE (x) == SSA_NAME);
if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
/* Thread-local or transaction-local. */
;
else
retval = mem_non_local;
new_memory_ret:
elt_p->local_new_memory = retval;
return retval;
}
/* Determine whether X has to be instrumented using a read
or write barrier.
ENTRY_BLOCK is the entry block for the region where stmt resides
in. NULL if unknown.
STMT is the statement in which X occurs in. It is used for thread
private memory instrumentation. If no TPM instrumentation is
desired, STMT should be null. */
static bool
requires_barrier (basic_block entry_block, tree x, gimple *stmt)
{
tree orig = x;
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
switch (TREE_CODE (x))
{
case INDIRECT_REF:
case MEM_REF:
{
enum thread_memory_type ret;
ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
if (ret == mem_non_local)
return true;
if (stmt && ret == mem_thread_local)
/* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
tm_log_add (entry_block, orig, stmt);
/* Transaction-locals require nothing at all. For malloc, a
transaction restart frees the memory and we reallocate.
For alloca, the stack pointer gets reset by the retry and
we reallocate. */
return false;
}
case TARGET_MEM_REF:
if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
return true;
x = TREE_OPERAND (TMR_BASE (x), 0);
if (TREE_CODE (x) == PARM_DECL)
return false;
gcc_assert (VAR_P (x));
/* FALLTHRU */
case PARM_DECL:
case RESULT_DECL:
case VAR_DECL:
if (DECL_BY_REFERENCE (x))
{
/* ??? This value is a pointer, but aggregate_value_p has been
jigged to return true which confuses needs_to_live_in_memory.
This ought to be cleaned up generically.
FIXME: Verify this still happens after the next mainline
merge. Testcase ie g++.dg/tm/pr47554.C.
*/
return false;
}
if (is_global_var (x))
return !TREE_READONLY (x);
if (/* FIXME: This condition should actually go below in the
tm_log_add() call, however is_call_clobbered() depends on
aliasing info which is not available during
gimplification. Since requires_barrier() gets called
during lower_sequence_tm/gimplification, leave the call
to needs_to_live_in_memory until we eliminate
lower_sequence_tm altogether. */
needs_to_live_in_memory (x))
return true;
else
{
/* For local memory that doesn't escape (aka thread private
memory), we can either save the value at the beginning of
the transaction and restore on restart, or call a tm
function to dynamically save and restore on restart
(ITM_L*). */
if (stmt)
tm_log_add (entry_block, orig, stmt);
return false;
}
default:
return false;
}
}
/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
a transaction region. */
static void
examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
gimple *stmt = gsi_stmt (*gsi);
if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
*state |= GTMA_HAVE_LOAD;
if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
*state |= GTMA_HAVE_STORE;
}
/* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
static void
examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
gimple *stmt = gsi_stmt (*gsi);
tree fn;
if (is_tm_pure_call (stmt))
return;
/* Check if this call is a transaction abort. */
fn = gimple_call_fndecl (stmt);
if (is_tm_abort (fn))
*state |= GTMA_HAVE_ABORT;
/* Note that something may happen. */
*state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
}
/* Iterate through the statements in the sequence, moving labels
(and thus edges) of transactions from "label_norm" to "label_uninst". */
static tree
make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *)
{
gimple *stmt = gsi_stmt (*gsi);
if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
{
*handled_ops_p = true;
txn->label_uninst = txn->label_norm;
txn->label_norm = NULL;
}
else
*handled_ops_p = !gimple_has_substatements (stmt);
return NULL_TREE;
}
/* Lower a GIMPLE_TRANSACTION statement. */
static void
lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
{
gimple *g;
gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
unsigned int *outer_state = (unsigned int *) wi->info;
unsigned int this_state = 0;
struct walk_stmt_info this_wi;
/* First, lower the body. The scanning that we do inside gives
us some idea of what we're dealing with. */
memset (&this_wi, 0, sizeof (this_wi));
this_wi.info = (void *) &this_state;
walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
lower_sequence_tm, NULL, &this_wi);
/* If there was absolutely nothing transaction related inside the
transaction, we may elide it. Likewise if this is a nested
transaction and does not contain an abort. */
if (this_state == 0
|| (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
{
if (outer_state)
*outer_state |= this_state;
gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
GSI_SAME_STMT);
gimple_transaction_set_body (stmt, NULL);
gsi_remove (gsi, true);
wi->removed_stmt = true;
return;
}
/* Wrap the body of the transaction in a try-finally node so that
the commit call is always properly called. */
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
if (flag_exceptions)
{
tree ptr;
gimple_seq n_seq, e_seq;
n_seq = gimple_seq_alloc_with_stmt (g);
e_seq = NULL;
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1, integer_zero_node);
ptr = create_tmp_var (ptr_type_node);
gimple_call_set_lhs (g, ptr);
gimple_seq_add_stmt (&e_seq, g);
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1, ptr);
gimple_seq_add_stmt (&e_seq, g);
g = gimple_build_eh_else (n_seq, e_seq);
}
g = gimple_build_try (gimple_transaction_body (stmt),
gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
/* For a (potentially) outer transaction, create two paths. */
gimple_seq uninst = NULL;
if (outer_state == NULL)
{
uninst = copy_gimple_seq_and_replace_locals (g);
/* In the uninstrumented copy, reset inner transactions to have only
an uninstrumented code path. */
memset (&this_wi, 0, sizeof (this_wi));
walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
}
tree label1 = create_artificial_label (UNKNOWN_LOCATION);
gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
gimple_transaction_set_label_norm (stmt, label1);
/* If the transaction calls abort or if this is an outer transaction,
add an "over" label afterwards. */
tree label3 = NULL;
if ((this_state & GTMA_HAVE_ABORT)
|| outer_state == NULL
|| (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
{
label3 = create_artificial_label (UNKNOWN_LOCATION);
gimple_transaction_set_label_over (stmt, label3);
}
if (uninst != NULL)
{
gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
tree label2 = create_artificial_label (UNKNOWN_LOCATION);
gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
gimple_transaction_set_label_uninst (stmt, label2);
}
if (label3 != NULL)
gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
gimple_transaction_set_body (stmt, NULL);
/* Record the set of operations found for use later. */
this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
gimple_transaction_set_subcode (stmt, this_state);
}
/* Iterate through the statements in the sequence, lowering them all
as appropriate for being in a transaction. */
static tree
lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
unsigned int *state = (unsigned int *) wi->info;
gimple *stmt = gsi_stmt (*gsi);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
/* Only memory reads/writes need to be instrumented. */
if (gimple_assign_single_p (stmt))
examine_assign_tm (state, gsi);
break;
case GIMPLE_CALL:
examine_call_tm (state, gsi);
break;
case GIMPLE_ASM:
*state |= GTMA_MAY_ENTER_IRREVOCABLE;
break;
case GIMPLE_TRANSACTION:
lower_transaction (gsi, wi);
break;
default:
*handled_ops_p = !gimple_has_substatements (stmt);
break;
}
return NULL_TREE;
}
/* Iterate through the statements in the sequence, lowering them all
as appropriate for being outside of a transaction. */
static tree
lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info * wi)
{
gimple *stmt = gsi_stmt (*gsi);
if (gimple_code (stmt) == GIMPLE_TRANSACTION)
{
*handled_ops_p = true;
lower_transaction (gsi, wi);
}
else
*handled_ops_p = !gimple_has_substatements (stmt);
return NULL_TREE;
}
/* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
been moved out, and all the data required for constructing a proper
CFG has been recorded. */
static unsigned int
execute_lower_tm (void)
{
struct walk_stmt_info wi;
gimple_seq body;
/* Transactional clones aren't created until a later pass. */
gcc_assert (!decl_is_tm_clone (current_function_decl));
body = gimple_body (current_function_decl);
memset (&wi, 0, sizeof (wi));
walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
gimple_set_body (current_function_decl, body);
return 0;
}
namespace {
const pass_data pass_data_lower_tm =
{
GIMPLE_PASS, /* type */
"tmlower", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TRANS_MEM, /* tv_id */
PROP_gimple_lcf, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_lower_tm : public gimple_opt_pass
{
public:
pass_lower_tm (gcc::context *ctxt)
: gimple_opt_pass (pass_data_lower_tm, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return flag_tm; }
virtual unsigned int execute (function *) { return execute_lower_tm (); }
}; // class pass_lower_tm
} // anon namespace
gimple_opt_pass *
make_pass_lower_tm (gcc::context *ctxt)
{
return new pass_lower_tm (ctxt);
}
/* Collect region information for each transaction. */
struct tm_region
{
public:
/* The field "transaction_stmt" is initially a gtransaction *,
but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
Helper method to get it as a gtransaction *, with code-checking
in a checked-build. */
gtransaction *
get_transaction_stmt () const
{
return as_a <gtransaction *> (transaction_stmt);
}
public:
/* Link to the next unnested transaction. */
struct tm_region *next;
/* Link to the next inner transaction. */
struct tm_region *inner;
/* Link to the next outer transaction. */
struct tm_region *outer;
/* The GIMPLE_TRANSACTION statement beginning this transaction.
After TM_MARK, this gets replaced by a call to
BUILT_IN_TM_START.
Hence this will be either a gtransaction *or a gcall *. */
gimple *transaction_stmt;
/* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
BUILT_IN_TM_START, this field is true if the transaction is an
outer transaction. */
bool original_transaction_was_outer;
/* Return value from BUILT_IN_TM_START. */
tree tm_state;
/* The entry block to this region. This will always be the first
block of the body of the transaction. */
basic_block entry_block;
/* The first block after an expanded call to _ITM_beginTransaction. */
basic_block restart_block;
/* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
These blocks are still a part of the region (i.e., the border is
inclusive). Note that this set is only complete for paths in the CFG
starting at ENTRY_BLOCK, and that there is no exit block recorded for
the edge to the "over" label. */
bitmap exit_blocks;
/* The set of all blocks that have an TM_IRREVOCABLE call. */
bitmap irr_blocks;
};
/* True if there are pending edge statements to be committed for the
current function being scanned in the tmmark pass. */
bool pending_edge_inserts_p;
static struct tm_region *all_tm_regions;
static bitmap_obstack tm_obstack;
/* A subroutine of tm_region_init. Record the existence of the
GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
static struct tm_region *
tm_region_init_0 (struct tm_region *outer, basic_block bb,
gtransaction *stmt)
{
struct tm_region *region;
region = (struct tm_region *)
obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
if (outer)
{
region->next = outer->inner;
outer->inner = region;
}
else
{
region->next = all_tm_regions;
all_tm_regions = region;
}
region->inner = NULL;
region->outer = outer;
region->transaction_stmt = stmt;
region->original_transaction_was_outer = false;
region->tm_state = NULL;
/* There are either one or two edges out of the block containing
the GIMPLE_TRANSACTION, one to the actual region and one to the
"over" label if the region contains an abort. The former will
always be the one marked FALLTHRU. */
region->entry_block = FALLTHRU_EDGE (bb)->dest;
region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
return region;
}
/* A subroutine of tm_region_init. Record all the exit and
irrevocable blocks in BB into the region's exit_blocks and
irr_blocks bitmaps. Returns the new region being scanned. */
static struct tm_region *
tm_region_init_1 (struct tm_region *region, basic_block bb)
{
gimple_stmt_iterator gsi;
gimple *g;
if (!region
|| (!region->irr_blocks && !region->exit_blocks))
return region;
/* Check to see if this is the end of a region by seeing if it
contains a call to __builtin_tm_commit{,_eh}. Note that the
outermost region for DECL_IS_TM_CLONE need not collect this. */
for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
{
g = gsi_stmt (gsi);
if (gimple_code (g) == GIMPLE_CALL)
{
tree fn = gimple_call_fndecl (g);
if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL))
{
if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
|| DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
&& region->exit_blocks)
{
bitmap_set_bit (region->exit_blocks, bb->index);
region = region->outer;
break;
}
if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
bitmap_set_bit (region->irr_blocks, bb->index);
}
}
}
return region;
}
/* Collect all of the transaction regions within the current function
and record them in ALL_TM_REGIONS. The REGION parameter may specify
an "outermost" region for use by tm clones. */
static void
tm_region_init (struct tm_region *region)
{
gimple *g;
edge_iterator ei;
edge e;
basic_block bb;
auto_vec<basic_block> queue;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
auto_vec<tm_region *> bb_regions;
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
using bb->aux. */
bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
queue.safe_push (bb);
bitmap_set_bit (visited_blocks, bb->index);
bb_regions[bb->index] = region;
do
{
bb = queue.pop ();
region = bb_regions[bb->index];
bb_regions[bb->index] = NULL;
/* Record exit and irrevocable blocks. */
region = tm_region_init_1 (region, bb);
/* Check for the last statement in the block beginning a new region. */
g = last_stmt (bb);
old_region = region;
if (g)
if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
region = tm_region_init_0 (region, bb, trans_stmt);
/* Process subsequent blocks. */
FOR_EACH_EDGE (e, ei, bb->succs)
if (!bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
queue.safe_push (e->dest);
/* If the current block started a new region, make sure that only
the entry block of the new region is associated with this region.
Other successors are still part of the old region. */
if (old_region != region && e->dest != region->entry_block)
bb_regions[e->dest->index] = old_region;
else
bb_regions[e->dest->index] = region;
}
}
while (!queue.is_empty ());
BITMAP_FREE (visited_blocks);
}
/* The "gate" function for all transactional memory expansion and optimization
passes. We collect region information for each top-level transaction, and
if we don't find any, we skip all of the TM passes. Each region will have
all of the exit blocks recorded, and the originating statement. */
static bool
gate_tm_init (void)
{
if (!flag_tm)
return false;
calculate_dominance_info (CDI_DOMINATORS);
bitmap_obstack_initialize (&tm_obstack);
/* If the function is a TM_CLONE, then the entire function is the region. */
if (decl_is_tm_clone (current_function_decl))
{
struct tm_region *region = (struct tm_region *)
obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
memset (region, 0, sizeof (*region));
region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* For a clone, the entire function is the region. But even if
we don't need to record any exit blocks, we may need to
record irrevocable blocks. */
region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
tm_region_init (region);
}
else
{
tm_region_init (NULL);
/* If we didn't find any regions, cleanup and skip the whole tree
of tm-related optimizations. */
if (all_tm_regions == NULL)
{
bitmap_obstack_release (&tm_obstack);
return false;
}
}
return true;
}
namespace {
const pass_data pass_data_tm_init =
{
GIMPLE_PASS, /* type */
"*tminit", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TRANS_MEM, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_tm_init : public gimple_opt_pass
{
public:
pass_tm_init (gcc::context *ctxt)
: gimple_opt_pass (pass_data_tm_init, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return gate_tm_init (); }
}; // class pass_tm_init
} // anon namespace
gimple_opt_pass *
make_pass_tm_init (gcc::context *ctxt)
{
return new pass_tm_init (ctxt);
}
/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
represented by STATE. */
static inline void
transaction_subcode_ior (struct tm_region *region, unsigned flags)
{
if (region && region->transaction_stmt)
{
gtransaction *transaction_stmt = region->get_transaction_stmt ();
flags |= gimple_transaction_subcode (transaction_stmt);
gimple_transaction_set_subcode (transaction_stmt, flags);
}
}
/* Construct a memory load in a transactional context. Return the
gimple statement performing the load, or NULL if there is no
TM_LOAD builtin of the appropriate size to do the load.
LOC is the location to use for the new statement(s). */
static gcall *
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
tree t, type = TREE_TYPE (rhs);
gcall *gcall;
built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_LOAD_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_LOAD_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOAD_LDOUBLE;
else
{
if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
return NULL;
unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
if (TREE_CODE (type) == VECTOR_TYPE)
{
switch (type_size)
{
case 64:
code = BUILT_IN_TM_LOAD_M64;
break;
case 128:
code = BUILT_IN_TM_LOAD_M128;
break;
case 256:
code = BUILT_IN_TM_LOAD_M256;
break;
default:
goto unhandled_vec;
}
if (!builtin_decl_explicit_p (code))
goto unhandled_vec;
}
else
{
unhandled_vec:
switch (type_size)
{
case 8:
code = BUILT_IN_TM_LOAD_1;
break;
case 16:
code = BUILT_IN_TM_LOAD_2;
break;
case 32:
code = BUILT_IN_TM_LOAD_4;
break;
case 64:
code = BUILT_IN_TM_LOAD_8;
break;
default:
return NULL;
}
}
}
tree decl = builtin_decl_explicit (code);
gcc_assert (decl);
t = gimplify_addr (gsi, rhs);
gcall = gimple_build_call (decl, 1, t);
gimple_set_location (gcall, loc);
t = TREE_TYPE (TREE_TYPE (decl));
if (useless_type_conversion_p (type, t))
{
gimple_call_set_lhs (gcall, lhs);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
else
{
gimple *g;
tree temp;
temp = create_tmp_reg (t);
gimple_call_set_lhs (gcall, temp);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
g = gimple_build_assign (lhs, t);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
}
return gcall;
}
/* Similarly for storing TYPE in a transactional context. */
static gcall *
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
tree t, fn, type = TREE_TYPE (rhs), simple_type;
gcall *gcall;
built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_STORE_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_STORE_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_STORE_LDOUBLE;
else
{
if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
return NULL;
unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
if (TREE_CODE (type) == VECTOR_TYPE)
{
switch (type_size)
{
case 64:
code = BUILT_IN_TM_STORE_M64;
break;
case 128:
code = BUILT_IN_TM_STORE_M128;
break;
case 256:
code = BUILT_IN_TM_STORE_M256;
break;
default:
goto unhandled_vec;
}
if (!builtin_decl_explicit_p (code))
goto unhandled_vec;
}
else
{
unhandled_vec:
switch (type_size)
{
case 8:
code = BUILT_IN_TM_STORE_1;
break;
case 16:
code = BUILT_IN_TM_STORE_2;
break;
case 32:
code = BUILT_IN_TM_STORE_4;
break;
case 64:
code = BUILT_IN_TM_STORE_8;
break;
default:
return NULL;
}
}
}
fn = builtin_decl_explicit (code);
gcc_assert (fn);
simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
if (TREE_CODE (rhs) == CONSTRUCTOR)
{
/* Handle the easy initialization to zero. */
if (!CONSTRUCTOR_ELTS (rhs))
rhs = build_int_cst (simple_type, 0);
else
{
/* ...otherwise punt to the caller and probably use
BUILT_IN_TM_MEMMOVE, because we can't wrap a
VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
valid gimple. */
return NULL;
}
}
else if (!useless_type_conversion_p (simple_type, type))
{
gimple *g;
tree temp;
temp = create_tmp_reg (simple_type);
t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
g = gimple_build_assign (temp, t);
gimple_set_location (g, loc);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
rhs = temp;
}
t = gimplify_addr (gsi, lhs);
gcall = gimple_build_call (fn, 2, t, rhs);
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
return gcall;
}
/* Expand an assignment statement into transactional builtins. */
static void
expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
{
gimple *stmt = gsi_stmt (*gsi);
location_t loc = gimple_location (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
bool store_p = requires_barrier (region->entry_block, lhs, NULL);
bool load_p = requires_barrier (region->entry_block, rhs, NULL);
gimple *gcall = NULL;
if (!load_p && !store_p)
{
/* Add thread private addresses to log if applicable. */
requires_barrier (region->entry_block, lhs, stmt);
gsi_next (gsi);
return;
}
if (load_p)
transaction_subcode_ior (region, GTMA_HAVE_LOAD);
if (store_p)
transaction_subcode_ior (region, GTMA_HAVE_STORE);
// Remove original load/store statement.
gsi_remove (gsi, true);
// Attempt to use a simple load/store helper function.
if (load_p && !store_p)
gcall = build_tm_load (loc, lhs, rhs, gsi);
else if (store_p && !load_p)
gcall = build_tm_store (loc, lhs, rhs, gsi);
// If gcall has not been set, then we do not have a simple helper
// function available for the type. This may be true of larger
// structures, vectors, and non-standard float types.
if (!gcall)
{
tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
// If this is a type that we couldn't handle above, but it's
// in a register, we must spill it to memory for the copy.
if (is_gimple_reg (lhs))
{
ltmp = create_tmp_var (TREE_TYPE (lhs));
lhs_addr = build_fold_addr_expr (ltmp);
}
else
lhs_addr = gimplify_addr (gsi, lhs);
if (is_gimple_reg (rhs))
{
tree rtmp = create_tmp_var (TREE_TYPE (rhs));
TREE_ADDRESSABLE (rtmp) = 1;
rhs_addr = build_fold_addr_expr (rtmp);
gcall = gimple_build_assign (rtmp, rhs);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
else
rhs_addr = gimplify_addr (gsi, rhs);
// Choose the appropriate memory transfer function.
if (load_p && store_p)
{
// ??? Figure out if there's any possible overlap between
// the LHS and the RHS and if not, use MEMCPY.
copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
}
else if (load_p)
{
// Note that the store is non-transactional and cannot overlap.
copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
}
else
{
// Note that the load is non-transactional and cannot overlap.
copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
}
gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
if (ltmp)
{
gcall = gimple_build_assign (lhs, ltmp);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
}
// Now that we have the load/store in its instrumented form, add
// thread private addresses to the log if applicable.
if (!store_p)
requires_barrier (region->entry_block, lhs, gcall);
}
/* Expand a call statement as appropriate for a transaction. That is,
either verify that the call does not affect the transaction, or
redirect the call to a clone that handles transactions, or change
the transaction state to IRREVOCABLE. Return true if the call is
one of the builtins that end a transaction. */
static bool
expand_call_tm (struct tm_region *region,
gimple_stmt_iterator *gsi)
{
gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
tree lhs = gimple_call_lhs (stmt);
tree fn_decl;
struct cgraph_node *node;
bool retval = false;
fn_decl = gimple_call_fndecl (stmt);
if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
|| fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
transaction_subcode_ior (region, GTMA_HAVE_STORE);
if (is_tm_pure_call (stmt))
return false;
if (fn_decl)
retval = is_tm_ending_fndecl (fn_decl);
if (!retval)
{
/* Assume all non-const/pure calls write to memory, except
transaction ending builtins. */
transaction_subcode_ior (region, GTMA_HAVE_STORE);
}
/* For indirect calls, we already generated a call into the runtime. */
if (!fn_decl)
{
tree fn = gimple_call_fn (stmt);
/* We are guaranteed never to go irrevocable on a safe or pure
call, and the pure call was handled above. */
if (is_tm_safe (fn))
return false;
else
transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
return false;
}
node = cgraph_node::get (fn_decl);
/* All calls should have cgraph here. */
if (!node)
{
/* We can have a nodeless call here if some pass after IPA-tm
added uninstrumented calls. For example, loop distribution
can transform certain loop constructs into __builtin_mem*
calls. In this case, see if we have a suitable TM
replacement and fill in the gaps. */
gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
gcc_assert (code == BUILT_IN_MEMCPY
|| code == BUILT_IN_MEMMOVE
|| code == BUILT_IN_MEMSET);
tree repl = find_tm_replacement_function (fn_decl);
if (repl)
{
gimple_call_set_fndecl (stmt, repl);
update_stmt (stmt);
node = cgraph_node::create (repl);
node->tm_may_enter_irr = false;
return expand_call_tm (region, gsi);
}
gcc_unreachable ();
}
if (node->tm_may_enter_irr)
transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
if (is_tm_abort (fn_decl))
{
transaction_subcode_ior (region, GTMA_HAVE_ABORT);
return true;
}
/* Instrument the store if needed.
If the assignment happens inside the function call (return slot
optimization), there is no instrumentation to be done, since
the callee should have done the right thing. */
if (lhs && requires_barrier (region->entry_block, lhs, stmt)
&& !gimple_call_return_slot_opt_p (stmt))
{
tree tmp = create_tmp_reg (TREE_TYPE (lhs));
location_t loc = gimple_location (stmt);
edge fallthru_edge = NULL;
gassign *assign_stmt;
/* Remember if the call was going to throw. */
if (stmt_can_throw_internal (cfun, stmt))
{
edge_iterator ei;
edge e;
basic_block bb = gimple_bb (stmt);
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_FALLTHRU)
{
fallthru_edge = e;
break;
}
}
gimple_call_set_lhs (stmt, tmp);
update_stmt (stmt);
assign_stmt = gimple_build_assign (lhs, tmp);
gimple_set_location (assign_stmt, loc);
/* We cannot throw in the middle of a BB. If the call was going
to throw, place the instrumentation on the fallthru edge, so
the call remains the last statement in the block. */
if (fallthru_edge)
{
gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
expand_assign_tm (region, &fallthru_gsi);
gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
pending_edge_inserts_p = true;
}
else
{
gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
expand_assign_tm (region, gsi);
}
transaction_subcode_ior (region, GTMA_HAVE_STORE);
}
return retval;
}
/* Expand all statements in BB as appropriate for being inside
a transaction. */
static void
expand_block_tm (struct tm_region *region, basic_block bb)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
gimple *stmt = gsi_stmt (gsi);
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
/* Only memory reads/writes need to be instrumented. */
if (gimple_assign_single_p (stmt)
&& !gimple_clobber_p (stmt))
{
expand_assign_tm (region, &gsi);
continue;
}
break;
case GIMPLE_CALL:
if (expand_call_tm (region, &gsi))
return;
break;
case GIMPLE_ASM:
gcc_unreachable ();
default:
break;
}
if (!gsi_end_p (gsi))
gsi_next (&gsi);
}
}
/* Return the list of basic-blocks in REGION.
STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
following a TM_IRREVOCABLE call.
INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
uninstrumented code path blocks in the list of basic blocks
returned, false otherwise. */
static vec<basic_block>
get_tm_region_blocks (basic_block entry_block,
bitmap exit_blocks,
bitmap irr_blocks,
bitmap all_region_blocks,
bool stop_at_irrevocable_p,
bool include_uninstrumented_p = true)
{
vec<basic_block> bbs = vNULL;
unsigned i;
edge e;
edge_iterator ei;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
i = 0;
bbs.safe_push (entry_block);
bitmap_set_bit (visited_blocks, entry_block->index);
do
{
basic_block bb = bbs[i++];
if (exit_blocks &&
bitmap_bit_p (exit_blocks, bb->index))
continue;
if (stop_at_irrevocable_p
&& irr_blocks
&& bitmap_bit_p (irr_blocks, bb->index))
continue;
FOR_EACH_EDGE (e, ei, bb->succs)
if ((include_uninstrumented_p
|| !(e->flags & EDGE_TM_UNINSTRUMENTED))
&& !bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
bbs.safe_push (e->dest);
}
}
while (i < bbs.length ());
if (all_region_blocks)
bitmap_ior_into (all_region_blocks, visited_blocks);
BITMAP_FREE (visited_blocks);
return bbs;
}
// Callback data for collect_bb2reg.
struct bb2reg_stuff
{
vec<tm_region *> *bb2reg;
bool include_uninstrumented_p;
};
// Callback for expand_regions, collect innermost region data for each bb.
static void *
collect_bb2reg (struct tm_region *region, void *data)
{
struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
vec<tm_region *> *bb2reg = stuff->bb2reg;
vec<basic_block> queue;
unsigned int i;
basic_block bb;
queue = get_tm_region_blocks (region->entry_block,
region->exit_blocks,
region->irr_blocks,
NULL,
/*stop_at_irr_p=*/true,
stuff->include_uninstrumented_p);
// We expect expand_region to perform a post-order traversal of the region
// tree. Therefore the last region seen for any bb is the innermost.
FOR_EACH_VEC_ELT (queue, i, bb)
(*bb2reg)[bb->index] = region;
queue.release ();
return NULL;
}
// Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
// which a basic block belongs. Note that we only consider the instrumented
// code paths for the region; the uninstrumented code paths are ignored if
// INCLUDE_UNINSTRUMENTED_P is false.
//
// ??? This data is very similar to the bb_regions array that is collected
// during tm_region_init. Or, rather, this data is similar to what could
// be used within tm_region_init. The actual computation in tm_region_init
// begins and ends with bb_regions entirely full of NULL pointers, due to
// the way in which pointers are swapped in and out of the array.
//
// ??? Our callers expect that blocks are not shared between transactions.
// When the optimizers get too smart, and blocks are shared, then during
// the tm_mark phase we'll add log entries to only one of the two transactions,
// and in the tm_edge phase we'll add edges to the CFG that create invalid
// cycles. The symptom being SSA defs that do not dominate their uses.
// Note that the optimizers were locally correct with their transformation,
// as we have no info within the program that suggests that the blocks cannot
// be shared.
//
// ??? There is currently a hack inside tree-ssa-pre.cc to work around the
// only known instance of this block sharing.
static vec<tm_region *>
get_bb_regions_instrumented (bool traverse_clones,
bool include_uninstrumented_p)
{
unsigned n = last_basic_block_for_fn (cfun);
struct bb2reg_stuff stuff;
vec<tm_region *> ret;
ret.create (n);
ret.safe_grow_cleared (n, true);
stuff.bb2reg = &ret;
stuff.include_uninstrumented_p = include_uninstrumented_p;
expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
return ret;
}
/* Set the IN_TRANSACTION for all gimple statements that appear in a
transaction. */
void
compute_transaction_bits (void)
{
struct tm_region *region;
vec<basic_block> queue;
unsigned int i;
basic_block bb;
/* ?? Perhaps we need to abstract gate_tm_init further, because we
certainly don't need it to calculate CDI_DOMINATOR info. */
gate_tm_init ();
FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_IN_TRANSACTION;
for (region = all_tm_regions; region; region = region->next)
{
queue = get_tm_region_blocks (region->entry_block,
region->exit_blocks,
region->irr_blocks,
NULL,
/*stop_at_irr_p=*/true);
for (i = 0; queue.iterate (i, &bb); ++i)
bb->flags |= BB_IN_TRANSACTION;
queue.release ();
}
if (all_tm_regions)
bitmap_obstack_release (&tm_obstack);
}
/* Replace the GIMPLE_TRANSACTION in this region with the corresponding
call to BUILT_IN_TM_START. */
static void *
expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
{
tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
basic_block transaction_bb = gimple_bb (region->transaction_stmt);
tree tm_state = region->tm_state;
tree tm_state_type = TREE_TYPE (tm_state);
edge abort_edge = NULL;
edge inst_edge = NULL;
edge uninst_edge = NULL;
edge fallthru_edge = NULL;
// Identify the various successors of the transaction start.
{
edge_iterator i;
edge e;
FOR_EACH_EDGE (e, i, transaction_bb->succs)
{
if (e->flags & EDGE_TM_ABORT)
abort_edge = e;
else if (e->flags & EDGE_TM_UNINSTRUMENTED)
uninst_edge = e;
else
inst_edge = e;
if (e->flags & EDGE_FALLTHRU)
fallthru_edge = e;
}
}
/* ??? There are plenty of bits here we're not computing. */
{
int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
int flags = 0;
if (subcode & GTMA_DOES_GO_IRREVOCABLE)
flags |= PR_DOESGOIRREVOCABLE;
if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
flags |= PR_HASNOIRREVOCABLE;
/* If the transaction does not have an abort in lexical scope and is not
marked as an outer transaction, then it will never abort. */
if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
flags |= PR_HASNOABORT;
if ((subcode & GTMA_HAVE_STORE) == 0)
flags |= PR_READONLY;
if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
flags |= PR_INSTRUMENTEDCODE;
if (uninst_edge)
flags |= PR_UNINSTRUMENTEDCODE;
if (subcode & GTMA_IS_OUTER)
region->original_transaction_was_outer = true;
tree t = build_int_cst (tm_state_type, flags);
gcall *call = gimple_build_call (tm_start, 1, t);
gimple_call_set_lhs (call, tm_state);
gimple_set_location (call, gimple_location (region->transaction_stmt));
// Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
gsi_remove (&gsi, true);
region->transaction_stmt = call;
}
// Generate log saves.
if (!tm_log_save_addresses.is_empty ())
tm_log_emit_saves (region->entry_block, transaction_bb);
// In the beginning, we've no tests to perform on transaction restart.
// Note that after this point, transaction_bb becomes the "most recent
// block containing tests for the transaction".
region->restart_block = region->entry_block;
// Generate log restores.
if (!tm_log_save_addresses.is_empty ())
{
basic_block test_bb = create_empty_bb (transaction_bb);
basic_block code_bb = create_empty_bb (test_bb);
basic_block join_bb = create_empty_bb (code_bb);
add_bb_to_loop (test_bb, transaction_bb->loop_father);
add_bb_to_loop (code_bb, transaction_bb->loop_father);
add_bb_to_loop (join_bb, transaction_bb->loop_father);
if (region->restart_block == region->entry_block)
region->restart_block = test_bb;
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
t2 = build_int_cst (tm_state_type, 0);
stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
tm_log_emit_restores (region->entry_block, code_bb);
edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
redirect_edge_pred (fallthru_edge, join_bb);
join_bb->count = test_bb->count = transaction_bb->count;
ei->probability = profile_probability::always ();
et->probability = profile_probability::likely ();
ef->probability = profile_probability::unlikely ();
code_bb->count = et->count ();
transaction_bb = join_bb;
}
// If we have an ABORT edge, create a test to perform the abort.
if (abort_edge)
{
basic_block test_bb = create_empty_bb (transaction_bb);
add_bb_to_loop (test_bb, transaction_bb->loop_father);
if (region->restart_block == region->entry_block)
region->restart_block = test_bb;
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
t2 = build_int_cst (tm_state_type, 0);
stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
test_bb->count = transaction_bb->count;
ei->probability = profile_probability::always ();
// Not abort edge. If both are live, chose one at random as we'll
// we'll be fixing that up below.
redirect_edge_pred (fallthru_edge, test_bb);
fallthru_edge->flags = EDGE_FALSE_VALUE;
fallthru_edge->probability = profile_probability::very_likely ();
// Abort/over edge.
redirect_edge_pred (abort_edge, test_bb);
abort_edge->flags = EDGE_TRUE_VALUE;
abort_edge->probability = profile_probability::unlikely ();
transaction_bb = test_bb;
}
// If we have both instrumented and uninstrumented code paths, select one.
if (inst_edge && uninst_edge)
{
basic_block test_bb = create_empty_bb (transaction_bb);
add_bb_to_loop (test_bb, transaction_bb->loop_father);
if (region->restart_block == region->entry_block)
region->restart_block = test_bb;
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
t2 = build_int_cst (tm_state_type, 0);
stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
// Create the edge into test_bb first, as we want to copy values
// out of the fallthru edge.
edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
e->probability = fallthru_edge->probability;
test_bb->count = fallthru_edge->count ();
// Now update the edges to the inst/uninist implementations.
// For now assume that the paths are equally likely. When using HTM,
// we'll try the uninst path first and fallback to inst path if htm
// buffers are exceeded. Without HTM we start with the inst path and
// use the uninst path when falling back to serial mode.
redirect_edge_pred (inst_edge, test_bb);
inst_edge->flags = EDGE_FALSE_VALUE;
inst_edge->probability = profile_probability::even ();
redirect_edge_pred (uninst_edge, test_bb);
uninst_edge->flags = EDGE_TRUE_VALUE;
uninst_edge->probability = profile_probability::even ();
}
// If we have no previous special cases, and we have PHIs at the beginning
// of the atomic region, this means we have a loop at the beginning of the
// atomic region that shares the first block. This can cause problems with
// the transaction restart abnormal edges to be added in the tm_edges pass.
// Solve this by adding a new empty block to receive the abnormal edges.
if (region->restart_block == region->entry_block
&& phi_nodes (region->entry_block))
{
basic_block empty_bb = create_empty_bb (transaction_bb);
region->restart_block = empty_bb;
add_bb_to_loop (empty_bb, transaction_bb->loop_father);
redirect_edge_pred (fallthru_edge, empty_bb);
make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
}
return NULL;
}
/* Generate the temporary to be used for the return value of
BUILT_IN_TM_START. */
static void *
generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
{
tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
region->tm_state =
create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
// Reset the subcode, post optimizations. We'll fill this in
// again as we process blocks.
if (region->exit_blocks)
{
gtransaction *transaction_stmt = region->get_transaction_stmt ();
unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
if (subcode & GTMA_DOES_GO_IRREVOCABLE)
subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
| GTMA_MAY_ENTER_IRREVOCABLE
| GTMA_HAS_NO_INSTRUMENTATION);
else
subcode &= GTMA_DECLARATION_MASK;
gimple_transaction_set_subcode (transaction_stmt, subcode);
}
return NULL;
}
// Propagate flags from inner transactions outwards.
static void
propagate_tm_flags_out (struct tm_region *region)
{
if (region == NULL)
return;
propagate_tm_flags_out (region->inner);
if (region->outer && region->outer->transaction_stmt)
{
unsigned s
= gimple_transaction_subcode (region->get_transaction_stmt ());
s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
| GTMA_MAY_ENTER_IRREVOCABLE);
s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
s);
}
propagate_tm_flags_out (region->next);
}
/* Entry point to the MARK phase of TM expansion. Here we replace
transactional memory statements with calls to builtins, and function
calls with their transactional clones (if available). But we don't
yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
static unsigned int
execute_tm_mark (void)
{
pending_edge_inserts_p = false;
expand_regions (all_tm_regions, generate_tm_state, NULL,
/*traverse_clones=*/true);
tm_log_init ();
vec<tm_region *> bb_regions
= get_bb_regions_instrumented (/*traverse_clones=*/true,
/*include_uninstrumented_p=*/false);
struct tm_region *r;
unsigned i;
// Expand memory operations into calls into the runtime.
// This collects log entries as well.
FOR_EACH_VEC_ELT (bb_regions, i, r)
{
if (r != NULL)
{
if (r->transaction_stmt)
{
unsigned sub
= gimple_transaction_subcode (r->get_transaction_stmt ());
/* If we're sure to go irrevocable, there won't be
anything to expand, since the run-time will go
irrevocable right away. */
if (sub & GTMA_DOES_GO_IRREVOCABLE
&& sub & GTMA_MAY_ENTER_IRREVOCABLE)
continue;
}
expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
}
}
bb_regions.release ();
// Propagate flags from inner transactions outwards.
propagate_tm_flags_out (all_tm_regions);
// Expand GIMPLE_TRANSACTIONs into calls into the runtime.
expand_regions (all_tm_regions, expand_transaction, NULL,
/*traverse_clones=*/false);
tm_log_emit ();
tm_log_delete ();
if (pending_edge_inserts_p)
gsi_commit_edge_inserts ();
free_dominance_info (CDI_DOMINATORS);
return 0;
}
namespace {
const pass_data pass_data_tm_mark =
{
GIMPLE_PASS, /* type */
"tmmark", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TRANS_MEM, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_update_ssa, /* todo_flags_finish */
};
class pass_tm_mark : public gimple_opt_pass
{
public:
pass_tm_mark (gcc::context *ctxt)
: gimple_opt_pass (pass_data_tm_mark, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_tm_mark (); }
}; // class pass_tm_mark
} // anon namespace
gimple_opt_pass *
make_pass_tm_mark (gcc::context *ctxt)
{
return new pass_tm_mark (ctxt);
}
/* Create an abnormal edge from STMT at iter, splitting the block
as necessary. Adjust *PNEXT as needed for the split block. */
static inline void
split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
{
basic_block bb = gimple_bb (stmt);
if (!gsi_one_before_end_p (iter))
{
edge e = split_block (bb, stmt);
*pnext = gsi_start_bb (e->dest);
}
edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL);
if (e)
e->probability = profile_probability::guessed_never ();
// Record the need for the edge for the benefit of the rtl passes.
if (cfun->gimple_df->tm_restart == NULL)
cfun->gimple_df->tm_restart
= hash_table<tm_restart_hasher>::create_ggc (31);
struct tm_restart_node dummy;
dummy.stmt = stmt;
dummy.label_or_list = gimple_block_label (dest_bb);
tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
INSERT);
struct tm_restart_node *n = *slot;
if (n == NULL)
{
n = ggc_alloc<tm_restart_node> ();
*n = dummy;
}
else
{
tree old = n->label_or_list;
if (TREE_CODE (old) == LABEL_DECL)
old = tree_cons (NULL, old, NULL);
n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
}
}
/* Split block BB as necessary for every builtin function we added, and
wire up the abnormal back edges implied by the transaction restart. */
static void
expand_block_edges (struct tm_region *const region, basic_block bb)
{
gimple_stmt_iterator gsi, next_gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
{
gimple *stmt = gsi_stmt (gsi);
gcall *call_stmt;
next_gsi = gsi;
gsi_next (&next_gsi);
// ??? Shouldn't we split for any non-pure, non-irrevocable function?
call_stmt = dyn_cast <gcall *> (stmt);
if ((!call_stmt)
|| (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
continue;
if (gimple_call_builtin_p (call_stmt, BUILT_IN_TM_ABORT))
{
// If we have a ``_transaction_cancel [[outer]]'', there is only
// one abnormal edge: to the transaction marked OUTER.
// All compiler-generated instances of BUILT_IN_TM_ABORT have a
// constant argument, which we can examine here. Users invoking
// TM_ABORT directly get what they deserve.
tree arg = gimple_call_arg (call_stmt, 0);
if (TREE_CODE (arg) == INTEGER_CST
&& (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
&& !decl_is_tm_clone (current_function_decl))
{
// Find the GTMA_IS_OUTER transaction.
for (struct tm_region *o = region; o; o = o->outer)
if (o->original_transaction_was_outer)
{
split_bb_make_tm_edge (call_stmt, o->restart_block,
gsi, &next_gsi);
break;
}
// Otherwise, the front-end should have semantically checked
// outer aborts, but in either case the target region is not
// within this function.
continue;
}
// Non-outer, TM aborts have an abnormal edge to the inner-most
// transaction, the one being aborted;
split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
&next_gsi);
}
// All TM builtins have an abnormal edge to the outer-most transaction.
// We never restart inner transactions. For tm clones, we know a-priori
// that the outer-most transaction is outside the function.
if (decl_is_tm_clone (current_function_decl))
continue;
if (cfun->gimple_df->tm_restart == NULL)
cfun->gimple_df->tm_restart
= hash_table<tm_restart_hasher>::create_ggc (31);
// All TM builtins have an abnormal edge to the outer-most transaction.
// We never restart inner transactions.
for (struct tm_region *o = region; o; o = o->outer)
if (!o->outer)
{
split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
break;
}
// Delete any tail-call annotation that may have been added.
// The tail-call pass may have mis-identified the commit as being
// a candidate because we had not yet added this restart edge.
gimple_call_set_tail (call_stmt, false);
}
}
/* Entry point to the final expansion of transactional nodes. */
namespace {
const pass_data pass_data_tm_edges =
{
GIMPLE_PASS, /* type */
"tmedge", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TRANS_MEM, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_update_ssa, /* todo_flags_finish */
};
class pass_tm_edges : public gimple_opt_pass
{
public:
pass_tm_edges (gcc::context *ctxt)
: gimple_opt_pass (pass_data_tm_edges, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *);
};