blob: 5d4bc1979c75baacb04b51d5fbf655487651de6a [file] [log] [blame]
/* A pass for lowering gimple to HSAIL
Copyright (C) 2013-2019 Free Software Foundation, Inc.
Contributed by Martin Jambor <mjambor@suse.cz> and
Martin Liska <mliska@suse.cz>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "tm.h"
#include "is-a.h"
#include "hash-table.h"
#include "vec.h"
#include "tree.h"
#include "tree-pass.h"
#include "function.h"
#include "basic-block.h"
#include "cfg.h"
#include "fold-const.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "bitmap.h"
#include "dumpfile.h"
#include "gimple-pretty-print.h"
#include "diagnostic-core.h"
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "stringpool.h"
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "tree-dfa.h"
#include "ssa-iterators.h"
#include "cgraph.h"
#include "print-tree.h"
#include "symbol-summary.h"
#include "hsa-common.h"
#include "cfghooks.h"
#include "tree-cfg.h"
#include "cfgloop.h"
#include "cfganal.h"
#include "builtins.h"
#include "params.h"
#include "gomp-constants.h"
#include "internal-fn.h"
#include "builtins.h"
#include "stor-layout.h"
#include "stringpool.h"
#include "attribs.h"
/* Print a warning message and set that we have seen an error. */
#define HSA_SORRY_ATV(location, message, ...) \
do \
{ \
hsa_fail_cfun (); \
auto_diagnostic_group d; \
if (warning_at (EXPR_LOCATION (hsa_cfun->m_decl), OPT_Whsa, \
HSA_SORRY_MSG)) \
inform (location, message, __VA_ARGS__); \
} \
while (false)
/* Same as previous, but highlight a location. */
#define HSA_SORRY_AT(location, message) \
do \
{ \
hsa_fail_cfun (); \
auto_diagnostic_group d; \
if (warning_at (EXPR_LOCATION (hsa_cfun->m_decl), OPT_Whsa, \
HSA_SORRY_MSG)) \
inform (location, message); \
} \
while (false)
/* Default number of threads used by kernel dispatch. */
#define HSA_DEFAULT_NUM_THREADS 64
/* Following structures are defined in the final version
of HSA specification. */
/* HSA queue packet is shadow structure, originally provided by AMD. */
struct hsa_queue_packet
{
uint16_t header;
uint16_t setup;
uint16_t workgroup_size_x;
uint16_t workgroup_size_y;
uint16_t workgroup_size_z;
uint16_t reserved0;
uint32_t grid_size_x;
uint32_t grid_size_y;
uint32_t grid_size_z;
uint32_t private_segment_size;
uint32_t group_segment_size;
uint64_t kernel_object;
void *kernarg_address;
uint64_t reserved2;
uint64_t completion_signal;
};
/* HSA queue is shadow structure, originally provided by AMD. */
struct hsa_queue
{
int type;
uint32_t features;
void *base_address;
uint64_t doorbell_signal;
uint32_t size;
uint32_t reserved1;
uint64_t id;
};
static struct obstack hsa_obstack;
/* List of pointers to all instructions that come from an object allocator. */
static vec <hsa_insn_basic *> hsa_instructions;
/* List of pointers to all operands that come from an object allocator. */
static vec <hsa_op_base *> hsa_operands;
hsa_symbol::hsa_symbol ()
: m_decl (NULL_TREE), m_name (NULL), m_name_number (0),
m_directive_offset (0), m_type (BRIG_TYPE_NONE),
m_segment (BRIG_SEGMENT_NONE), m_linkage (BRIG_LINKAGE_NONE), m_dim (0),
m_cst_value (NULL), m_global_scope_p (false), m_seen_error (false),
m_allocation (BRIG_ALLOCATION_AUTOMATIC), m_emitted_to_brig (false)
{
}
hsa_symbol::hsa_symbol (BrigType16_t type, BrigSegment8_t segment,
BrigLinkage8_t linkage, bool global_scope_p,
BrigAllocation allocation, BrigAlignment8_t align)
: m_decl (NULL_TREE), m_name (NULL), m_name_number (0),
m_directive_offset (0), m_type (type), m_segment (segment),
m_linkage (linkage), m_dim (0), m_cst_value (NULL),
m_global_scope_p (global_scope_p), m_seen_error (false),
m_allocation (allocation), m_emitted_to_brig (false), m_align (align)
{
}
unsigned HOST_WIDE_INT
hsa_symbol::total_byte_size ()
{
unsigned HOST_WIDE_INT s
= hsa_type_bit_size (~BRIG_TYPE_ARRAY_MASK & m_type);
gcc_assert (s % BITS_PER_UNIT == 0);
s /= BITS_PER_UNIT;
if (m_dim)
s *= m_dim;
return s;
}
/* Forward declaration. */
static BrigType16_t
hsa_type_for_tree_type (const_tree type, unsigned HOST_WIDE_INT *dim_p,
bool min32int);
void
hsa_symbol::fillup_for_decl (tree decl)
{
m_decl = decl;
m_type = hsa_type_for_tree_type (TREE_TYPE (decl), &m_dim, false);
if (hsa_seen_error ())
{
m_seen_error = true;
return;
}
m_align = MAX (m_align, hsa_natural_alignment (m_type));
}
/* Constructor of class representing global HSA function/kernel information and
state. FNDECL is function declaration, KERNEL_P is true if the function
is going to become a HSA kernel. If the function has body, SSA_NAMES_COUNT
should be set to number of SSA names used in the function.
MODIFIED_CFG is set to true in case we modified control-flow graph
of the function. */
hsa_function_representation::hsa_function_representation
(tree fdecl, bool kernel_p, unsigned ssa_names_count, bool modified_cfg)
: m_name (NULL),
m_reg_count (0), m_input_args (vNULL),
m_output_arg (NULL), m_spill_symbols (vNULL), m_global_symbols (vNULL),
m_private_variables (vNULL), m_called_functions (vNULL),
m_called_internal_fns (vNULL), m_hbb_count (0),
m_in_ssa (true), m_kern_p (kernel_p), m_declaration_p (false),
m_decl (fdecl), m_internal_fn (NULL), m_shadow_reg (NULL),
m_kernel_dispatch_count (0), m_maximum_omp_data_size (0),
m_seen_error (false), m_temp_symbol_count (0), m_ssa_map (),
m_modified_cfg (modified_cfg)
{
int sym_init_len = (vec_safe_length (cfun->local_decls) / 2) + 1;
m_local_symbols = new hash_table <hsa_noop_symbol_hasher> (sym_init_len);
m_ssa_map.safe_grow_cleared (ssa_names_count);
}
/* Constructor of class representing HSA function information that
is derived for an internal function. */
hsa_function_representation::hsa_function_representation (hsa_internal_fn *fn)
: m_reg_count (0), m_input_args (vNULL),
m_output_arg (NULL), m_local_symbols (NULL),
m_spill_symbols (vNULL), m_global_symbols (vNULL),
m_private_variables (vNULL), m_called_functions (vNULL),
m_called_internal_fns (vNULL), m_hbb_count (0),
m_in_ssa (true), m_kern_p (false), m_declaration_p (true), m_decl (NULL),
m_internal_fn (fn), m_shadow_reg (NULL), m_kernel_dispatch_count (0),
m_maximum_omp_data_size (0), m_seen_error (false), m_temp_symbol_count (0),
m_ssa_map () {}
/* Destructor of class holding function/kernel-wide information and state. */
hsa_function_representation::~hsa_function_representation ()
{
/* Kernel names are deallocated at the end of BRIG output when deallocating
hsa_decl_kernel_mapping. */
if (!m_kern_p || m_seen_error)
free (m_name);
for (unsigned i = 0; i < m_input_args.length (); i++)
delete m_input_args[i];
m_input_args.release ();
delete m_output_arg;
delete m_local_symbols;
for (unsigned i = 0; i < m_spill_symbols.length (); i++)
delete m_spill_symbols[i];
m_spill_symbols.release ();
hsa_symbol *sym;
for (unsigned i = 0; i < m_global_symbols.iterate (i, &sym); i++)
if (sym->m_linkage != BRIG_ALLOCATION_PROGRAM)
delete sym;
m_global_symbols.release ();
for (unsigned i = 0; i < m_private_variables.length (); i++)
delete m_private_variables[i];
m_private_variables.release ();
m_called_functions.release ();
m_ssa_map.release ();
for (unsigned i = 0; i < m_called_internal_fns.length (); i++)
delete m_called_internal_fns[i];
}
hsa_op_reg *
hsa_function_representation::get_shadow_reg ()
{
/* If we compile a function with kernel dispatch and does not set
an optimization level, the function won't be inlined and
we return NULL. */
if (!m_kern_p)
return NULL;
if (m_shadow_reg)
return m_shadow_reg;
/* Append the shadow argument. */
hsa_symbol *shadow = new hsa_symbol (BRIG_TYPE_U64, BRIG_SEGMENT_KERNARG,
BRIG_LINKAGE_FUNCTION);
m_input_args.safe_push (shadow);
shadow->m_name = "hsa_runtime_shadow";
hsa_op_reg *r = new hsa_op_reg (BRIG_TYPE_U64);
hsa_op_address *addr = new hsa_op_address (shadow);
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, BRIG_TYPE_U64, r, addr);
hsa_bb_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun))->append_insn (mem);
m_shadow_reg = r;
return r;
}
bool hsa_function_representation::has_shadow_reg_p ()
{
return m_shadow_reg != NULL;
}
void
hsa_function_representation::init_extra_bbs ()
{
hsa_init_new_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
hsa_init_new_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
void
hsa_function_representation::update_dominance ()
{
if (m_modified_cfg)
{
free_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_DOMINATORS);
}
}
hsa_symbol *
hsa_function_representation::create_hsa_temporary (BrigType16_t type)
{
hsa_symbol *s = new hsa_symbol (type, BRIG_SEGMENT_PRIVATE,
BRIG_LINKAGE_FUNCTION);
s->m_name_number = m_temp_symbol_count++;
hsa_cfun->m_private_variables.safe_push (s);
return s;
}
BrigLinkage8_t
hsa_function_representation::get_linkage ()
{
if (m_internal_fn)
return BRIG_LINKAGE_PROGRAM;
return m_kern_p || TREE_PUBLIC (m_decl) ?
BRIG_LINKAGE_PROGRAM : BRIG_LINKAGE_MODULE;
}
/* Hash map of simple OMP builtins. */
static hash_map <nofree_string_hash, omp_simple_builtin> *omp_simple_builtins
= NULL;
/* Warning messages for OMP builtins. */
#define HSA_WARN_LOCK_ROUTINE "support for HSA does not implement OpenMP " \
"lock routines"
#define HSA_WARN_TIMING_ROUTINE "support for HSA does not implement OpenMP " \
"timing routines"
#define HSA_WARN_MEMORY_ROUTINE "OpenMP device memory library routines have " \
"undefined semantics within target regions, support for HSA ignores them"
#define HSA_WARN_AFFINITY "Support for HSA does not implement OpenMP " \
"affinity feateres"
/* Initialize hash map with simple OMP builtins. */
static void
hsa_init_simple_builtins ()
{
if (omp_simple_builtins != NULL)
return;
omp_simple_builtins
= new hash_map <nofree_string_hash, omp_simple_builtin> ();
omp_simple_builtin omp_builtins[] =
{
omp_simple_builtin ("omp_get_initial_device", NULL, false,
new hsa_op_immed (GOMP_DEVICE_HOST,
(BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_is_initial_device", NULL, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_dynamic", NULL, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_set_dynamic", NULL, false, NULL),
omp_simple_builtin ("omp_init_lock", HSA_WARN_LOCK_ROUTINE, true),
omp_simple_builtin ("omp_init_lock_with_hint", HSA_WARN_LOCK_ROUTINE,
true),
omp_simple_builtin ("omp_init_nest_lock_with_hint", HSA_WARN_LOCK_ROUTINE,
true),
omp_simple_builtin ("omp_destroy_lock", HSA_WARN_LOCK_ROUTINE, true),
omp_simple_builtin ("omp_set_lock", HSA_WARN_LOCK_ROUTINE, true),
omp_simple_builtin ("omp_unset_lock", HSA_WARN_LOCK_ROUTINE, true),
omp_simple_builtin ("omp_test_lock", HSA_WARN_LOCK_ROUTINE, true),
omp_simple_builtin ("omp_get_wtime", HSA_WARN_TIMING_ROUTINE, true),
omp_simple_builtin ("omp_get_wtick", HSA_WARN_TIMING_ROUTINE, true),
omp_simple_builtin ("omp_target_alloc", HSA_WARN_MEMORY_ROUTINE, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_U64)),
omp_simple_builtin ("omp_target_free", HSA_WARN_MEMORY_ROUTINE, false),
omp_simple_builtin ("omp_target_is_present", HSA_WARN_MEMORY_ROUTINE,
false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_target_memcpy", HSA_WARN_MEMORY_ROUTINE, false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_target_memcpy_rect", HSA_WARN_MEMORY_ROUTINE,
false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_target_associate_ptr", HSA_WARN_MEMORY_ROUTINE,
false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_target_disassociate_ptr",
HSA_WARN_MEMORY_ROUTINE,
false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_set_max_active_levels",
"Support for HSA only allows only one active level, "
"call to omp_set_max_active_levels will be ignored "
"in the generated HSAIL",
false, NULL),
omp_simple_builtin ("omp_get_max_active_levels", NULL, false,
new hsa_op_immed (1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_in_final", NULL, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_proc_bind", HSA_WARN_AFFINITY, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_num_places", HSA_WARN_AFFINITY, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_place_num_procs", HSA_WARN_AFFINITY, false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_place_proc_ids", HSA_WARN_AFFINITY, false,
NULL),
omp_simple_builtin ("omp_get_place_num", HSA_WARN_AFFINITY, false,
new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_partition_num_places", HSA_WARN_AFFINITY,
false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_partition_place_nums", HSA_WARN_AFFINITY,
false, NULL),
omp_simple_builtin ("omp_set_default_device",
"omp_set_default_device has undefined semantics "
"within target regions, support for HSA ignores it",
false, NULL),
omp_simple_builtin ("omp_get_default_device",
"omp_get_default_device has undefined semantics "
"within target regions, support for HSA ignores it",
false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_num_devices",
"omp_get_num_devices has undefined semantics "
"within target regions, support for HSA ignores it",
false,
new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)),
omp_simple_builtin ("omp_get_num_procs", NULL, true, NULL),
omp_simple_builtin ("omp_get_cancellation", NULL, true, NULL),
omp_simple_builtin ("omp_set_nested", NULL, true, NULL),
omp_simple_builtin ("omp_get_nested", NULL, true, NULL),
omp_simple_builtin ("omp_set_schedule", NULL, true, NULL),
omp_simple_builtin ("omp_get_schedule", NULL, true, NULL),
omp_simple_builtin ("omp_get_thread_limit", NULL, true, NULL),
omp_simple_builtin ("omp_get_team_size", NULL, true, NULL),
omp_simple_builtin ("omp_get_ancestor_thread_num", NULL, true, NULL),
omp_simple_builtin ("omp_get_max_task_priority", NULL, true, NULL)
};
unsigned count = sizeof (omp_builtins) / sizeof (omp_simple_builtin);
for (unsigned i = 0; i < count; i++)
omp_simple_builtins->put (omp_builtins[i].m_name, omp_builtins[i]);
}
/* Allocate HSA structures that we need only while generating with this. */
static void
hsa_init_data_for_cfun ()
{
hsa_init_compilation_unit_data ();
gcc_obstack_init (&hsa_obstack);
}
/* Deinitialize HSA subsystem and free all allocated memory. */
static void
hsa_deinit_data_for_cfun (void)
{
basic_block bb;
FOR_ALL_BB_FN (bb, cfun)
if (bb->aux)
{
hsa_bb *hbb = hsa_bb_for_bb (bb);
hbb->~hsa_bb ();
bb->aux = NULL;
}
for (unsigned int i = 0; i < hsa_operands.length (); i++)
hsa_destroy_operand (hsa_operands[i]);
hsa_operands.release ();
for (unsigned i = 0; i < hsa_instructions.length (); i++)
hsa_destroy_insn (hsa_instructions[i]);
hsa_instructions.release ();
if (omp_simple_builtins != NULL)
{
delete omp_simple_builtins;
omp_simple_builtins = NULL;
}
obstack_free (&hsa_obstack, NULL);
delete hsa_cfun;
}
/* Return the type which holds addresses in the given SEGMENT. */
static BrigType16_t
hsa_get_segment_addr_type (BrigSegment8_t segment)
{
switch (segment)
{
case BRIG_SEGMENT_NONE:
gcc_unreachable ();
case BRIG_SEGMENT_FLAT:
case BRIG_SEGMENT_GLOBAL:
case BRIG_SEGMENT_READONLY:
case BRIG_SEGMENT_KERNARG:
return hsa_machine_large_p () ? BRIG_TYPE_U64 : BRIG_TYPE_U32;
case BRIG_SEGMENT_GROUP:
case BRIG_SEGMENT_PRIVATE:
case BRIG_SEGMENT_SPILL:
case BRIG_SEGMENT_ARG:
return BRIG_TYPE_U32;
}
gcc_unreachable ();
}
/* Return integer brig type according to provided SIZE in bytes. If SIGN
is set to true, return signed integer type. */
static BrigType16_t
get_integer_type_by_bytes (unsigned size, bool sign)
{
if (sign)
switch (size)
{
case 1:
return BRIG_TYPE_S8;
case 2:
return BRIG_TYPE_S16;
case 4:
return BRIG_TYPE_S32;
case 8:
return BRIG_TYPE_S64;
default:
break;
}
else
switch (size)
{
case 1:
return BRIG_TYPE_U8;
case 2:
return BRIG_TYPE_U16;
case 4:
return BRIG_TYPE_U32;
case 8:
return BRIG_TYPE_U64;
default:
break;
}
return 0;
}
/* If T points to an integral type smaller than 32 bits, change it to a 32bit
equivalent and return the result. Otherwise just return the result. */
static BrigType16_t
hsa_extend_inttype_to_32bit (BrigType16_t t)
{
if (t == BRIG_TYPE_U8 || t == BRIG_TYPE_U16)
return BRIG_TYPE_U32;
else if (t == BRIG_TYPE_S8 || t == BRIG_TYPE_S16)
return BRIG_TYPE_S32;
return t;
}
/* Return HSA type for tree TYPE, which has to fit into BrigType16_t. Pointers
are assumed to use flat addressing. If min32int is true, always expand
integer types to one that has at least 32 bits. */
static BrigType16_t
hsa_type_for_scalar_tree_type (const_tree type, bool min32int)
{
HOST_WIDE_INT bsize;
const_tree base;
BrigType16_t res = BRIG_TYPE_NONE;
gcc_checking_assert (TYPE_P (type));
gcc_checking_assert (!AGGREGATE_TYPE_P (type));
if (POINTER_TYPE_P (type))
return hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT);
if (TREE_CODE (type) == VECTOR_TYPE)
base = TREE_TYPE (type);
else if (TREE_CODE (type) == COMPLEX_TYPE)
{
base = TREE_TYPE (type);
min32int = true;
}
else
base = type;
if (!tree_fits_uhwi_p (TYPE_SIZE (base)))
{
HSA_SORRY_ATV (EXPR_LOCATION (type),
"support for HSA does not implement huge or "
"variable-sized type %qT", type);
return res;
}
bsize = tree_to_uhwi (TYPE_SIZE (base));
unsigned byte_size = bsize / BITS_PER_UNIT;
if (INTEGRAL_TYPE_P (base))
res = get_integer_type_by_bytes (byte_size, !TYPE_UNSIGNED (base));
else if (SCALAR_FLOAT_TYPE_P (base))
{
switch (bsize)
{
case 16:
res = BRIG_TYPE_F16;
break;
case 32:
res = BRIG_TYPE_F32;
break;
case 64:
res = BRIG_TYPE_F64;
break;
default:
break;
}
}
if (res == BRIG_TYPE_NONE)
{
HSA_SORRY_ATV (EXPR_LOCATION (type),
"support for HSA does not implement type %qT", type);
return res;
}
if (TREE_CODE (type) == VECTOR_TYPE)
{
HOST_WIDE_INT tsize = tree_to_uhwi (TYPE_SIZE (type));
if (bsize == tsize)
{
HSA_SORRY_ATV (EXPR_LOCATION (type),
"support for HSA does not implement a vector type "
"where a type and unit size are equal: %qT", type);
return res;
}
switch (tsize)
{
case 32:
res |= BRIG_TYPE_PACK_32;
break;
case 64:
res |= BRIG_TYPE_PACK_64;
break;
case 128:
res |= BRIG_TYPE_PACK_128;
break;
default:
HSA_SORRY_ATV (EXPR_LOCATION (type),
"support for HSA does not implement type %qT", type);
}
}
if (min32int)
/* Registers/immediate operands can only be 32bit or more except for
f16. */
res = hsa_extend_inttype_to_32bit (res);
if (TREE_CODE (type) == COMPLEX_TYPE)
{
unsigned bsize = 2 * hsa_type_bit_size (res);
res = hsa_bittype_for_bitsize (bsize);
}
return res;
}
/* Returns the BRIG type we need to load/store entities of TYPE. */
static BrigType16_t
mem_type_for_type (BrigType16_t type)
{
/* HSA has non-intuitive constraints on load/store types. If it's
a bit-type it _must_ be B128, if it's not a bit-type it must be
64bit max. So for loading entities of 128 bits (e.g. vectors)
we have to use B128, while for loading the rest we have to use the
input type (??? or maybe also flattened to a equally sized non-vector
unsigned type?). */
if ((type & BRIG_TYPE_PACK_MASK) == BRIG_TYPE_PACK_128)
return BRIG_TYPE_B128;
else if (hsa_btype_p (type) || hsa_type_packed_p (type))
{
unsigned bitsize = hsa_type_bit_size (type);
if (bitsize < 128)
return hsa_uint_for_bitsize (bitsize);
else
return hsa_bittype_for_bitsize (bitsize);
}
return type;
}
/* Return HSA type for tree TYPE. If it cannot fit into BrigType16_t, some
kind of array will be generated, setting DIM appropriately. Otherwise, it
will be set to zero. */
static BrigType16_t
hsa_type_for_tree_type (const_tree type, unsigned HOST_WIDE_INT *dim_p = NULL,
bool min32int = false)
{
gcc_checking_assert (TYPE_P (type));
if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
{
HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not "
"implement huge or variable-sized type %qT", type);
return BRIG_TYPE_NONE;
}
if (RECORD_OR_UNION_TYPE_P (type))
{
if (dim_p)
*dim_p = tree_to_uhwi (TYPE_SIZE_UNIT (type));
return BRIG_TYPE_U8 | BRIG_TYPE_ARRAY;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* We try to be nice and use the real base-type when this is an array of
scalars and only resort to an array of bytes if the type is more
complex. */
unsigned HOST_WIDE_INT dim = 1;
while (TREE_CODE (type) == ARRAY_TYPE)
{
tree domain = TYPE_DOMAIN (type);
if (!TYPE_MIN_VALUE (domain)
|| !TYPE_MAX_VALUE (domain)
|| !tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
|| !tree_fits_shwi_p (TYPE_MAX_VALUE (domain)))
{
HSA_SORRY_ATV (EXPR_LOCATION (type),
"support for HSA does not implement array "
"%qT with unknown bounds", type);
return BRIG_TYPE_NONE;
}
HOST_WIDE_INT min = tree_to_shwi (TYPE_MIN_VALUE (domain));
HOST_WIDE_INT max = tree_to_shwi (TYPE_MAX_VALUE (domain));
dim = dim * (unsigned HOST_WIDE_INT) (max - min + 1);
type = TREE_TYPE (type);
}
BrigType16_t res;
if (RECORD_OR_UNION_TYPE_P (type))
{
dim = dim * tree_to_uhwi (TYPE_SIZE_UNIT (type));
res = BRIG_TYPE_U8;
}
else
res = hsa_type_for_scalar_tree_type (type, false);
if (dim_p)
*dim_p = dim;
return res | BRIG_TYPE_ARRAY;
}
/* Scalar case: */
if (dim_p)
*dim_p = 0;
return hsa_type_for_scalar_tree_type (type, min32int);
}
/* Returns true if converting from STYPE into DTYPE needs the _CVT
opcode. If false a normal _MOV is enough. */
static bool
hsa_needs_cvt (BrigType16_t dtype, BrigType16_t stype)
{
if (hsa_btype_p (dtype))
return false;
/* float <-> int conversions are real converts. */
if (hsa_type_float_p (dtype) != hsa_type_float_p (stype))
return true;
/* When both types have different size, then we need CVT as well. */
if (hsa_type_bit_size (dtype) != hsa_type_bit_size (stype))
return true;
return false;
}
/* Return declaration name if it exists or create one from UID if it does not.
If DECL is a local variable, make UID part of its name. */
const char *
hsa_get_declaration_name (tree decl)
{
if (!DECL_NAME (decl))
{
char buf[64];
snprintf (buf, 64, "__hsa_anon_%u", DECL_UID (decl));
size_t len = strlen (buf);
char *copy = (char *) obstack_alloc (&hsa_obstack, len + 1);
memcpy (copy, buf, len + 1);
return copy;
}
tree name_tree;
if (TREE_CODE (decl) == FUNCTION_DECL
|| (TREE_CODE (decl) == VAR_DECL && is_global_var (decl)))
name_tree = DECL_ASSEMBLER_NAME (decl);
else
name_tree = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (name_tree);
/* User-defined assembly names have prepended asterisk symbol. */
if (name[0] == '*')
name++;
if ((TREE_CODE (decl) == VAR_DECL)
&& decl_function_context (decl))
{
size_t len = strlen (name);
char *buf = (char *) alloca (len + 32);
snprintf (buf, len + 32, "%s_%u", name, DECL_UID (decl));
len = strlen (buf);
char *copy = (char *) obstack_alloc (&hsa_obstack, len + 1);
memcpy (copy, buf, len + 1);
return copy;
}
else
return name;
}
/* Lookup or create the associated hsa_symbol structure with a given VAR_DECL
or lookup the hsa_structure corresponding to a PARM_DECL. */
static hsa_symbol *
get_symbol_for_decl (tree decl)
{
hsa_symbol **slot;
hsa_symbol dummy (BRIG_TYPE_NONE, BRIG_SEGMENT_NONE, BRIG_LINKAGE_NONE);
gcc_assert (TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL
|| TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == CONST_DECL);
dummy.m_decl = decl;
bool is_in_global_vars = ((TREE_CODE (decl) == VAR_DECL)
&& !decl_function_context (decl));
if (is_in_global_vars)
slot = hsa_global_variable_symbols->find_slot (&dummy, INSERT);
else
slot = hsa_cfun->m_local_symbols->find_slot (&dummy, INSERT);
gcc_checking_assert (slot);
if (*slot)
{
hsa_symbol *sym = (*slot);
/* If the symbol is problematic, mark current function also as
problematic. */
if (sym->m_seen_error)
hsa_fail_cfun ();
/* PR hsa/70234: If a global variable was marked to be emitted,
but HSAIL generation of a function using the variable fails,
we should retry to emit the variable in context of a different
function.
Iterate elements whether a symbol is already in m_global_symbols
of not. */
if (is_in_global_vars && !sym->m_emitted_to_brig)
{
for (unsigned i = 0; i < hsa_cfun->m_global_symbols.length (); i++)
if (hsa_cfun->m_global_symbols[i] == sym)
return *slot;
hsa_cfun->m_global_symbols.safe_push (sym);
}
return *slot;
}
else
{
hsa_symbol *sym;
/* PARM_DECLs and RESULT_DECL should be already in m_local_symbols. */
gcc_assert (TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == CONST_DECL);
BrigAlignment8_t align = hsa_object_alignment (decl);
if (is_in_global_vars)
{
gcc_checking_assert (TREE_CODE (decl) != CONST_DECL);
sym = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_GLOBAL,
BRIG_LINKAGE_PROGRAM, true,
BRIG_ALLOCATION_PROGRAM, align);
hsa_cfun->m_global_symbols.safe_push (sym);
sym->fillup_for_decl (decl);
if (sym->m_align > align)
{
sym->m_seen_error = true;
HSA_SORRY_ATV (EXPR_LOCATION (decl),
"HSA specification requires that %E is at least "
"naturally aligned", decl);
}
}
else
{
/* As generation of efficient memory copy instructions relies
on alignment greater or equal to 8 bytes,
we need to increase alignment of all aggregate types.. */
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
align = MAX ((BrigAlignment8_t) BRIG_ALIGNMENT_8, align);
BrigAllocation allocation = BRIG_ALLOCATION_AUTOMATIC;
BrigSegment8_t segment;
if (TREE_CODE (decl) == CONST_DECL)
{
segment = BRIG_SEGMENT_READONLY;
allocation = BRIG_ALLOCATION_AGENT;
}
else if (lookup_attribute ("hsa_group_segment",
DECL_ATTRIBUTES (decl)))
segment = BRIG_SEGMENT_GROUP;
else if (TREE_STATIC (decl))
{
segment = BRIG_SEGMENT_GLOBAL;
allocation = BRIG_ALLOCATION_PROGRAM;
}
else if (lookup_attribute ("hsa_global_segment",
DECL_ATTRIBUTES (decl)))
segment = BRIG_SEGMENT_GLOBAL;
else
segment = BRIG_SEGMENT_PRIVATE;
sym = new hsa_symbol (BRIG_TYPE_NONE, segment, BRIG_LINKAGE_FUNCTION,
false, allocation, align);
sym->fillup_for_decl (decl);
hsa_cfun->m_private_variables.safe_push (sym);
}
sym->m_name = hsa_get_declaration_name (decl);
*slot = sym;
return sym;
}
}
/* For a given HSA function declaration, return a host
function declaration. */
tree
hsa_get_host_function (tree decl)
{
hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (decl));
gcc_assert (s->m_gpu_implementation_p);
return s->m_bound_function ? s->m_bound_function->decl : NULL;
}
/* Return true if function DECL has a host equivalent function. */
static char *
get_brig_function_name (tree decl)
{
tree d = decl;
hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (d));
if (s != NULL
&& s->m_gpu_implementation_p
&& s->m_bound_function)
d = s->m_bound_function->decl;
/* IPA split can create a function that has no host equivalent. */
if (d == NULL)
d = decl;
char *name = xstrdup (hsa_get_declaration_name (d));
hsa_sanitize_name (name);
return name;
}
/* Create a spill symbol of type TYPE. */
hsa_symbol *
hsa_get_spill_symbol (BrigType16_t type)
{
hsa_symbol *sym = new hsa_symbol (type, BRIG_SEGMENT_SPILL,
BRIG_LINKAGE_FUNCTION);
hsa_cfun->m_spill_symbols.safe_push (sym);
return sym;
}
/* Create a symbol for a read-only string constant. */
hsa_symbol *
hsa_get_string_cst_symbol (tree string_cst)
{
gcc_checking_assert (TREE_CODE (string_cst) == STRING_CST);
hsa_symbol **slot = hsa_cfun->m_string_constants_map.get (string_cst);
if (slot)
return *slot;
hsa_op_immed *cst = new hsa_op_immed (string_cst);
hsa_symbol *sym = new hsa_symbol (cst->m_type, BRIG_SEGMENT_GLOBAL,
BRIG_LINKAGE_MODULE, true,
BRIG_ALLOCATION_AGENT);
sym->m_cst_value = cst;
sym->m_dim = TREE_STRING_LENGTH (string_cst);
sym->m_name_number = hsa_cfun->m_global_symbols.length ();
hsa_cfun->m_global_symbols.safe_push (sym);
hsa_cfun->m_string_constants_map.put (string_cst, sym);
return sym;
}
/* Make the type of a MOV instruction larger if mandated by HSAIL rules. */
static void
hsa_fixup_mov_insn_type (hsa_insn_basic *insn)
{
insn->m_type = hsa_extend_inttype_to_32bit (insn->m_type);
if (insn->m_type == BRIG_TYPE_B8 || insn->m_type == BRIG_TYPE_B16)
insn->m_type = BRIG_TYPE_B32;
}
/* Constructor of the ancestor of all operands. K is BRIG kind that identified
what the operator is. */
hsa_op_base::hsa_op_base (BrigKind16_t k)
: m_next (NULL), m_brig_op_offset (0), m_kind (k)
{
hsa_operands.safe_push (this);
}
/* Constructor of ancestor of all operands which have a type. K is BRIG kind
that identified what the operator is. T is the type of the operator. */
hsa_op_with_type::hsa_op_with_type (BrigKind16_t k, BrigType16_t t)
: hsa_op_base (k), m_type (t)
{
}
hsa_op_with_type *
hsa_op_with_type::get_in_type (BrigType16_t dtype, hsa_bb *hbb)
{
if (m_type == dtype)
return this;
hsa_op_reg *dest;
if (hsa_needs_cvt (dtype, m_type))
{
dest = new hsa_op_reg (dtype);
hbb->append_insn (new hsa_insn_cvt (dest, this));
}
else if (is_a <hsa_op_reg *> (this))
{
/* In the end, HSA registers do not really have types, only sizes, so if
the sizes match, we can use the register directly. */
gcc_checking_assert (hsa_type_bit_size (dtype)
== hsa_type_bit_size (m_type));
return this;
}
else
{
dest = new hsa_op_reg (m_type);
hsa_insn_basic *mov = new hsa_insn_basic (2, BRIG_OPCODE_MOV,
dest->m_type, dest, this);
hsa_fixup_mov_insn_type (mov);
hbb->append_insn (mov);
/* We cannot simply for instance: 'mov_u32 $_3, 48 (s32)' because
type of the operand must be same as type of the instruction. */
dest->m_type = dtype;
}
return dest;
}
/* If this operand has integer type smaller than 32 bits, extend it to 32 bits,
adding instructions to HBB if needed. */
hsa_op_with_type *
hsa_op_with_type::extend_int_to_32bit (hsa_bb *hbb)
{
if (m_type == BRIG_TYPE_U8 || m_type == BRIG_TYPE_U16)
return get_in_type (BRIG_TYPE_U32, hbb);
else if (m_type == BRIG_TYPE_S8 || m_type == BRIG_TYPE_S16)
return get_in_type (BRIG_TYPE_S32, hbb);
else
return this;
}
/* Constructor of class representing HSA immediate values. TREE_VAL is the
tree representation of the immediate value. If min32int is true,
always expand integer types to one that has at least 32 bits. */
hsa_op_immed::hsa_op_immed (tree tree_val, bool min32int)
: hsa_op_with_type (BRIG_KIND_OPERAND_CONSTANT_BYTES,
hsa_type_for_tree_type (TREE_TYPE (tree_val), NULL,
min32int))
{
if (hsa_seen_error ())
return;
gcc_checking_assert ((is_gimple_min_invariant (tree_val)
&& (!POINTER_TYPE_P (TREE_TYPE (tree_val))
|| TREE_CODE (tree_val) == INTEGER_CST))
|| TREE_CODE (tree_val) == CONSTRUCTOR);
m_tree_value = tree_val;
/* Verify that all elements of a constructor are constants. */
if (TREE_CODE (m_tree_value) == CONSTRUCTOR)
for (unsigned i = 0; i < CONSTRUCTOR_NELTS (m_tree_value); i++)
{
tree v = CONSTRUCTOR_ELT (m_tree_value, i)->value;
if (!CONSTANT_CLASS_P (v))
{
HSA_SORRY_AT (EXPR_LOCATION (tree_val),
"HSA ctor should have only constants");
return;
}
}
}
/* Constructor of class representing HSA immediate values. INTEGER_VALUE is the
integer representation of the immediate value. TYPE is BRIG type. */
hsa_op_immed::hsa_op_immed (HOST_WIDE_INT integer_value, BrigType16_t type)
: hsa_op_with_type (BRIG_KIND_OPERAND_CONSTANT_BYTES, type),
m_tree_value (NULL)
{
gcc_assert (hsa_type_integer_p (type));
m_int_value = integer_value;
}
hsa_op_immed::hsa_op_immed ()
: hsa_op_with_type (BRIG_KIND_NONE, BRIG_TYPE_NONE)
{
}
/* New operator to allocate immediate operands from obstack. */
void *
hsa_op_immed::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
/* Destructor. */
hsa_op_immed::~hsa_op_immed ()
{
}
/* Change type of the immediate value to T. */
void
hsa_op_immed::set_type (BrigType16_t t)
{
m_type = t;
}
/* Constructor of class representing HSA registers and pseudo-registers. T is
the BRIG type of the new register. */
hsa_op_reg::hsa_op_reg (BrigType16_t t)
: hsa_op_with_type (BRIG_KIND_OPERAND_REGISTER, t), m_gimple_ssa (NULL_TREE),
m_def_insn (NULL), m_spill_sym (NULL), m_order (hsa_cfun->m_reg_count++),
m_lr_begin (0), m_lr_end (0), m_reg_class (0), m_hard_num (0)
{
}
/* New operator to allocate a register from obstack. */
void *
hsa_op_reg::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
/* Verify register operand. */
void
hsa_op_reg::verify_ssa ()
{
/* Verify that each HSA register has a definition assigned.
Exceptions are VAR_DECL and PARM_DECL that are a default
definition. */
gcc_checking_assert (m_def_insn
|| (m_gimple_ssa != NULL
&& (!SSA_NAME_VAR (m_gimple_ssa)
|| (TREE_CODE (SSA_NAME_VAR (m_gimple_ssa))
!= PARM_DECL))
&& SSA_NAME_IS_DEFAULT_DEF (m_gimple_ssa)));
/* Verify that every use of the register is really present
in an instruction. */
for (unsigned i = 0; i < m_uses.length (); i++)
{
hsa_insn_basic *use = m_uses[i];
bool is_visited = false;
for (unsigned j = 0; j < use->operand_count (); j++)
{
hsa_op_base *u = use->get_op (j);
hsa_op_address *addr; addr = dyn_cast <hsa_op_address *> (u);
if (addr && addr->m_reg)
u = addr->m_reg;
if (u == this)
{
bool r = !addr && use->op_output_p (j);
if (r)
{
error ("HSA SSA name defined by instruction that is supposed "
"to be using it");
debug_hsa_operand (this);
debug_hsa_insn (use);
internal_error ("HSA SSA verification failed");
}
is_visited = true;
}
}
if (!is_visited)
{
error ("HSA SSA name not among operands of instruction that is "
"supposed to use it");
debug_hsa_operand (this);
debug_hsa_insn (use);
internal_error ("HSA SSA verification failed");
}
}
}
hsa_op_address::hsa_op_address (hsa_symbol *sym, hsa_op_reg *r,
HOST_WIDE_INT offset)
: hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (sym), m_reg (r),
m_imm_offset (offset)
{
}
hsa_op_address::hsa_op_address (hsa_symbol *sym, HOST_WIDE_INT offset)
: hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (sym), m_reg (NULL),
m_imm_offset (offset)
{
}
hsa_op_address::hsa_op_address (hsa_op_reg *r, HOST_WIDE_INT offset)
: hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (NULL), m_reg (r),
m_imm_offset (offset)
{
}
/* New operator to allocate address operands from obstack. */
void *
hsa_op_address::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
/* Constructor of an operand referring to HSAIL code. */
hsa_op_code_ref::hsa_op_code_ref () : hsa_op_base (BRIG_KIND_OPERAND_CODE_REF),
m_directive_offset (0)
{
}
/* Constructor of an operand representing a code list. Set it up so that it
can contain ELEMENTS number of elements. */
hsa_op_code_list::hsa_op_code_list (unsigned elements)
: hsa_op_base (BRIG_KIND_OPERAND_CODE_LIST)
{
m_offsets.create (1);
m_offsets.safe_grow_cleared (elements);
}
/* New operator to allocate code list operands from obstack. */
void *
hsa_op_code_list::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
/* Constructor of an operand representing an operand list.
Set it up so that it can contain ELEMENTS number of elements. */
hsa_op_operand_list::hsa_op_operand_list (unsigned elements)
: hsa_op_base (BRIG_KIND_OPERAND_OPERAND_LIST)
{
m_offsets.create (elements);
m_offsets.safe_grow (elements);
}
/* New operator to allocate operand list operands from obstack. */
void *
hsa_op_operand_list::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
hsa_op_operand_list::~hsa_op_operand_list ()
{
m_offsets.release ();
}
hsa_op_reg *
hsa_function_representation::reg_for_gimple_ssa (tree ssa)
{
hsa_op_reg *hreg;
gcc_checking_assert (TREE_CODE (ssa) == SSA_NAME);
if (m_ssa_map[SSA_NAME_VERSION (ssa)])
return m_ssa_map[SSA_NAME_VERSION (ssa)];
hreg = new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (ssa),
false));
hreg->m_gimple_ssa = ssa;
m_ssa_map[SSA_NAME_VERSION (ssa)] = hreg;
return hreg;
}
void
hsa_op_reg::set_definition (hsa_insn_basic *insn)
{
if (hsa_cfun->m_in_ssa)
{
gcc_checking_assert (!m_def_insn);
m_def_insn = insn;
}
else
m_def_insn = NULL;
}
/* Constructor of the class which is the bases of all instructions and directly
represents the most basic ones. NOPS is the number of operands that the
operand vector will contain (and which will be cleared). OP is the opcode
of the instruction. This constructor does not set type. */
hsa_insn_basic::hsa_insn_basic (unsigned nops, int opc)
: m_prev (NULL),
m_next (NULL), m_bb (NULL), m_opcode (opc), m_number (0),
m_type (BRIG_TYPE_NONE), m_brig_offset (0)
{
if (nops > 0)
m_operands.safe_grow_cleared (nops);
hsa_instructions.safe_push (this);
}
/* Make OP the operand number INDEX of operands of this instruction. If OP is a
register or an address containing a register, then either set the definition
of the register to this instruction if it an output operand or add this
instruction to the uses if it is an input one. */
void
hsa_insn_basic::set_op (int index, hsa_op_base *op)
{
/* Each address operand is always use. */
hsa_op_address *addr = dyn_cast <hsa_op_address *> (op);
if (addr && addr->m_reg)
addr->m_reg->m_uses.safe_push (this);
else
{
hsa_op_reg *reg = dyn_cast <hsa_op_reg *> (op);
if (reg)
{
if (op_output_p (index))
reg->set_definition (this);
else
reg->m_uses.safe_push (this);
}
}
m_operands[index] = op;
}
/* Get INDEX-th operand of the instruction. */
hsa_op_base *
hsa_insn_basic::get_op (int index)
{
return m_operands[index];
}
/* Get address of INDEX-th operand of the instruction. */
hsa_op_base **
hsa_insn_basic::get_op_addr (int index)
{
return &m_operands[index];
}
/* Get number of operands of the instruction. */
unsigned int
hsa_insn_basic::operand_count ()
{
return m_operands.length ();
}
/* Constructor of the class which is the bases of all instructions and directly
represents the most basic ones. NOPS is the number of operands that the
operand vector will contain (and which will be cleared). OPC is the opcode
of the instruction, T is the type of the instruction. */
hsa_insn_basic::hsa_insn_basic (unsigned nops, int opc, BrigType16_t t,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2, hsa_op_base *arg3)
: m_prev (NULL), m_next (NULL), m_bb (NULL), m_opcode (opc),m_number (0),
m_type (t), m_brig_offset (0)
{
if (nops > 0)
m_operands.safe_grow_cleared (nops);
if (arg0 != NULL)
{
gcc_checking_assert (nops >= 1);
set_op (0, arg0);
}
if (arg1 != NULL)
{
gcc_checking_assert (nops >= 2);
set_op (1, arg1);
}
if (arg2 != NULL)
{
gcc_checking_assert (nops >= 3);
set_op (2, arg2);
}
if (arg3 != NULL)
{
gcc_checking_assert (nops >= 4);
set_op (3, arg3);
}
hsa_instructions.safe_push (this);
}
/* New operator to allocate basic instruction from obstack. */
void *
hsa_insn_basic::operator new (size_t size)
{
return obstack_alloc (&hsa_obstack, size);
}
/* Verify the instruction. */
void
hsa_insn_basic::verify ()
{
hsa_op_address *addr;
hsa_op_reg *reg;
/* Iterate all register operands and verify that the instruction
is set in uses of the register. */
for (unsigned i = 0; i < operand_count (); i++)
{
hsa_op_base *use = get_op (i);
if ((addr = dyn_cast <hsa_op_address *> (use)) && addr->m_reg)
{
gcc_assert (addr->m_reg->m_def_insn != this);
use = addr->m_reg;
}
if ((reg = dyn_cast <hsa_op_reg *> (use)) && !op_output_p (i))
{
unsigned j;
for (j = 0; j < reg->m_uses.length (); j++)
{
if (reg->m_uses[j] == this)
break;
}
if (j == reg->m_uses.length ())
{
error ("HSA instruction uses a register but is not among "
"recorded register uses");
debug_hsa_operand (reg);
debug_hsa_insn (this);
internal_error ("HSA instruction verification failed");
}
}
}
}
/* Constructor of an instruction representing a PHI node. NOPS is the number
of operands (equal to the number of predecessors). */
hsa_insn_phi::hsa_insn_phi (unsigned nops, hsa_op_reg *dst)
: hsa_insn_basic (nops, HSA_OPCODE_PHI), m_dest (dst)
{
dst->set_definition (this);
}
/* Constructor of class representing instructions for control flow and
sychronization, */
hsa_insn_br::hsa_insn_br (unsigned nops, int opc, BrigType16_t t,
BrigWidth8_t width, hsa_op_base *arg0,
hsa_op_base *arg1, hsa_op_base *arg2,
hsa_op_base *arg3)
: hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3),
m_width (width)
{
}
/* Constructor of class representing instruction for conditional jump, CTRL is
the control register determining whether the jump will be carried out, the
new instruction is automatically added to its uses list. */
hsa_insn_cbr::hsa_insn_cbr (hsa_op_reg *ctrl)
: hsa_insn_br (1, BRIG_OPCODE_CBR, BRIG_TYPE_B1, BRIG_WIDTH_1, ctrl)
{
}
/* Constructor of class representing instruction for switch jump, CTRL is
the index register. */
hsa_insn_sbr::hsa_insn_sbr (hsa_op_reg *index, unsigned jump_count)
: hsa_insn_basic (1, BRIG_OPCODE_SBR, BRIG_TYPE_B1, index),
m_width (BRIG_WIDTH_1), m_jump_table (vNULL),
m_label_code_list (new hsa_op_code_list (jump_count))
{
}
/* Replace all occurrences of OLD_BB with NEW_BB in the statements
jump table. */
void
hsa_insn_sbr::replace_all_labels (basic_block old_bb, basic_block new_bb)
{
for (unsigned i = 0; i < m_jump_table.length (); i++)
if (m_jump_table[i] == old_bb)
m_jump_table[i] = new_bb;
}
hsa_insn_sbr::~hsa_insn_sbr ()
{
m_jump_table.release ();
}
/* Constructor of comparison instruction. CMP is the comparison operation and T
is the result type. */
hsa_insn_cmp::hsa_insn_cmp (BrigCompareOperation8_t cmp, BrigType16_t t,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2)
: hsa_insn_basic (3 , BRIG_OPCODE_CMP, t, arg0, arg1, arg2), m_compare (cmp)
{
}
/* Constructor of classes representing memory accesses. OPC is the opcode (must
be BRIG_OPCODE_ST or BRIG_OPCODE_LD) and T is the type. The instruction
operands are provided as ARG0 and ARG1. */
hsa_insn_mem::hsa_insn_mem (int opc, BrigType16_t t, hsa_op_base *arg0,
hsa_op_base *arg1)
: hsa_insn_basic (2, opc, t, arg0, arg1),
m_align (hsa_natural_alignment (t)), m_equiv_class (0)
{
gcc_checking_assert (opc == BRIG_OPCODE_LD || opc == BRIG_OPCODE_ST);
}
/* Constructor for descendants allowing different opcodes and number of
operands, it passes its arguments directly to hsa_insn_basic
constructor. The instruction operands are provided as ARG[0-3]. */
hsa_insn_mem::hsa_insn_mem (unsigned nops, int opc, BrigType16_t t,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2, hsa_op_base *arg3)
: hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3),
m_align (hsa_natural_alignment (t)), m_equiv_class (0)
{
}
/* Constructor of class representing atomic instructions. OPC is the principal
opcode, AOP is the specific atomic operation opcode. T is the type of the
instruction. The instruction operands are provided as ARG[0-3]. */
hsa_insn_atomic::hsa_insn_atomic (int nops, int opc,
enum BrigAtomicOperation aop,
BrigType16_t t, BrigMemoryOrder memorder,
hsa_op_base *arg0,
hsa_op_base *arg1, hsa_op_base *arg2,
hsa_op_base *arg3)
: hsa_insn_mem (nops, opc, t, arg0, arg1, arg2, arg3), m_atomicop (aop),
m_memoryorder (memorder),
m_memoryscope (BRIG_MEMORY_SCOPE_SYSTEM)
{
gcc_checking_assert (opc == BRIG_OPCODE_ATOMICNORET ||
opc == BRIG_OPCODE_ATOMIC ||
opc == BRIG_OPCODE_SIGNAL ||
opc == BRIG_OPCODE_SIGNALNORET);
}
/* Constructor of class representing signal instructions. OPC is the prinicpal
opcode, SOP is the specific signal operation opcode. T is the type of the
instruction. The instruction operands are provided as ARG[0-3]. */
hsa_insn_signal::hsa_insn_signal (int nops, int opc,
enum BrigAtomicOperation sop,
BrigType16_t t, BrigMemoryOrder memorder,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2, hsa_op_base *arg3)
: hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3),
m_memory_order (memorder), m_signalop (sop)
{
}
/* Constructor of class representing segment conversion instructions. OPC is
the opcode which must be either BRIG_OPCODE_STOF or BRIG_OPCODE_FTOS. DEST
and SRCT are destination and source types respectively, SEG is the segment
we are converting to or from. The instruction operands are
provided as ARG0 and ARG1. */
hsa_insn_seg::hsa_insn_seg (int opc, BrigType16_t dest, BrigType16_t srct,
BrigSegment8_t seg, hsa_op_base *arg0,
hsa_op_base *arg1)
: hsa_insn_basic (2, opc, dest, arg0, arg1), m_src_type (srct),
m_segment (seg)
{
gcc_checking_assert (opc == BRIG_OPCODE_STOF || opc == BRIG_OPCODE_FTOS);
}
/* Constructor of class representing a call instruction. CALLEE is the tree
representation of the function being called. */
hsa_insn_call::hsa_insn_call (tree callee)
: hsa_insn_basic (0, BRIG_OPCODE_CALL), m_called_function (callee),
m_output_arg (NULL), m_args_code_list (NULL), m_result_code_list (NULL)
{
}
hsa_insn_call::hsa_insn_call (hsa_internal_fn *fn)
: hsa_insn_basic (0, BRIG_OPCODE_CALL), m_called_function (NULL),
m_called_internal_fn (fn), m_output_arg (NULL), m_args_code_list (NULL),
m_result_code_list (NULL)
{
}
hsa_insn_call::~hsa_insn_call ()
{
for (unsigned i = 0; i < m_input_args.length (); i++)
delete m_input_args[i];
delete m_output_arg;
m_input_args.release ();
m_input_arg_insns.release ();
}
/* Constructor of class representing the argument block required to invoke
a call in HSAIL. */
hsa_insn_arg_block::hsa_insn_arg_block (BrigKind brig_kind,
hsa_insn_call * call)
: hsa_insn_basic (0, HSA_OPCODE_ARG_BLOCK), m_kind (brig_kind),
m_call_insn (call)
{
}
hsa_insn_comment::hsa_insn_comment (const char *s)
: hsa_insn_basic (0, BRIG_KIND_DIRECTIVE_COMMENT)
{
unsigned l = strlen (s);
/* Append '// ' to the string. */
char *buf = XNEWVEC (char, l + 4);
sprintf (buf, "// %s", s);
m_comment = buf;
}
hsa_insn_comment::~hsa_insn_comment ()
{
gcc_checking_assert (m_comment);
free (m_comment);
m_comment = NULL;
}
/* Constructor of class representing the queue instruction in HSAIL. */
hsa_insn_queue::hsa_insn_queue (int nops, int opcode, BrigSegment segment,
BrigMemoryOrder memory_order,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2, hsa_op_base *arg3)
: hsa_insn_basic (nops, opcode, BRIG_TYPE_U64, arg0, arg1, arg2, arg3),
m_segment (segment), m_memory_order (memory_order)
{
}
/* Constructor of class representing the source type instruction in HSAIL. */
hsa_insn_srctype::hsa_insn_srctype (int nops, BrigOpcode opcode,
BrigType16_t destt, BrigType16_t srct,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2 = NULL)
: hsa_insn_basic (nops, opcode, destt, arg0, arg1, arg2),
m_source_type (srct)
{}
/* Constructor of class representing the packed instruction in HSAIL. */
hsa_insn_packed::hsa_insn_packed (int nops, BrigOpcode opcode,
BrigType16_t destt, BrigType16_t srct,
hsa_op_base *arg0, hsa_op_base *arg1,
hsa_op_base *arg2)
: hsa_insn_srctype (nops, opcode, destt, srct, arg0, arg1, arg2)
{
m_operand_list = new hsa_op_operand_list (nops - 1);
}
/* Constructor of class representing the convert instruction in HSAIL. */
hsa_insn_cvt::hsa_insn_cvt (hsa_op_with_type *dest, hsa_op_with_type *src)
: hsa_insn_basic (2, BRIG_OPCODE_CVT, dest->m_type, dest, src)
{
}
/* Constructor of class representing the alloca in HSAIL. */
hsa_insn_alloca::hsa_insn_alloca (hsa_op_with_type *dest,
hsa_op_with_type *size, unsigned alignment)
: hsa_insn_basic (2, BRIG_OPCODE_ALLOCA, dest->m_type, dest, size),
m_align (BRIG_ALIGNMENT_8)
{
gcc_assert (dest->m_type == BRIG_TYPE_U32);
if (alignment)
m_align = hsa_alignment_encoding (alignment);
}
/* Append an instruction INSN into the basic block. */
void
hsa_bb::append_insn (hsa_insn_basic *insn)
{
gcc_assert (insn->m_opcode != 0 || insn->operand_count () == 0);
gcc_assert (!insn->m_bb);
insn->m_bb = m_bb;
insn->m_prev = m_last_insn;
insn->m_next = NULL;
if (m_last_insn)
m_last_insn->m_next = insn;
m_last_insn = insn;
if (!m_first_insn)
m_first_insn = insn;
}
void
hsa_bb::append_phi (hsa_insn_phi *hphi)
{
hphi->m_bb = m_bb;
hphi->m_prev = m_last_phi;
hphi->m_next = NULL;
if (m_last_phi)
m_last_phi->m_next = hphi;
m_last_phi = hphi;
if (!m_first_phi)
m_first_phi = hphi;
}
/* Insert HSA instruction NEW_INSN immediately before an existing instruction
OLD_INSN. */
static void
hsa_insert_insn_before (hsa_insn_basic *new_insn, hsa_insn_basic *old_insn)
{
hsa_bb *hbb = hsa_bb_for_bb (old_insn->m_bb);
if (hbb->m_first_insn == old_insn)
hbb->m_first_insn = new_insn;
new_insn->m_prev = old_insn->m_prev;
new_insn->m_next = old_insn;
if (old_insn->m_prev)
old_insn->m_prev->m_next = new_insn;
old_insn->m_prev = new_insn;
}
/* Append HSA instruction NEW_INSN immediately after an existing instruction
OLD_INSN. */
static void
hsa_append_insn_after (hsa_insn_basic *new_insn, hsa_insn_basic *old_insn)
{
hsa_bb *hbb = hsa_bb_for_bb (old_insn->m_bb);
if (hbb->m_last_insn == old_insn)
hbb->m_last_insn = new_insn;
new_insn->m_prev = old_insn;
new_insn->m_next = old_insn->m_next;
if (old_insn->m_next)
old_insn->m_next->m_prev = new_insn;
old_insn->m_next = new_insn;
}
/* Return a register containing the calculated value of EXP which must be an
expression consisting of PLUS_EXPRs, MULT_EXPRs, NOP_EXPRs, SSA_NAMEs and
integer constants as returned by get_inner_reference.
Newly generated HSA instructions will be appended to HBB.
Perform all calculations in ADDRTYPE. */
static hsa_op_with_type *
gen_address_calculation (tree exp, hsa_bb *hbb, BrigType16_t addrtype)
{
int opcode;
if (TREE_CODE (exp) == NOP_EXPR)
exp = TREE_OPERAND (exp, 0);
switch (TREE_CODE (exp))
{
case SSA_NAME:
return hsa_cfun->reg_for_gimple_ssa (exp)->get_in_type (addrtype, hbb);
case INTEGER_CST:
{
hsa_op_immed *imm = new hsa_op_immed (exp);
if (addrtype != imm->m_type)
imm->m_type = addrtype;
return imm;
}
case PLUS_EXPR:
opcode = BRIG_OPCODE_ADD;
break;
case MULT_EXPR:
opcode = BRIG_OPCODE_MUL;
break;
default:
gcc_unreachable ();
}
hsa_op_reg *res = new hsa_op_reg (addrtype);
hsa_insn_basic *insn = new hsa_insn_basic (3, opcode, addrtype);
insn->set_op (0, res);
hsa_op_with_type *op1 = gen_address_calculation (TREE_OPERAND (exp, 0), hbb,
addrtype);
hsa_op_with_type *op2 = gen_address_calculation (TREE_OPERAND (exp, 1), hbb,
addrtype);
insn->set_op (1, op1);
insn->set_op (2, op2);
hbb->append_insn (insn);
return res;
}
/* If R1 is NULL, just return R2, otherwise append an instruction adding them
to HBB and return the register holding the result. */
static hsa_op_reg *
add_addr_regs_if_needed (hsa_op_reg *r1, hsa_op_reg *r2, hsa_bb *hbb)
{
gcc_checking_assert (r2);
if (!r1)
return r2;
hsa_op_reg *res = new hsa_op_reg (r1->m_type);
gcc_assert (!hsa_needs_cvt (r1->m_type, r2->m_type));
hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_ADD, res->m_type);
insn->set_op (0, res);
insn->set_op (1, r1);
insn->set_op (2, r2);
hbb->append_insn (insn);
return res;
}
/* Helper of gen_hsa_addr. Update *SYMBOL, *ADDRTYPE, *REG and *OFFSET to
reflect BASE which is the first operand of a MEM_REF or a TARGET_MEM_REF. */
static void
process_mem_base (tree base, hsa_symbol **symbol, BrigType16_t *addrtype,
hsa_op_reg **reg, offset_int *offset, hsa_bb *hbb)
{
if (TREE_CODE (base) == SSA_NAME)
{
gcc_assert (!*reg);
hsa_op_with_type *ssa
= hsa_cfun->reg_for_gimple_ssa (base)->get_in_type (*addrtype, hbb);
*reg = dyn_cast <hsa_op_reg *> (ssa);
}
else if (TREE_CODE (base) == ADDR_EXPR)
{
tree decl = TREE_OPERAND (base, 0);
if (!DECL_P (decl) || TREE_CODE (decl) == FUNCTION_DECL)
{
HSA_SORRY_AT (EXPR_LOCATION (base),
"support for HSA does not implement a memory reference "
"to a non-declaration type");
return;
}
gcc_assert (!*symbol);
*symbol = get_symbol_for_decl (decl);
*addrtype = hsa_get_segment_addr_type ((*symbol)->m_segment);
}
else if (TREE_CODE (base) == INTEGER_CST)
*offset += wi::to_offset (base);
else
gcc_unreachable ();
}
/* Forward declaration of a function. */
static void
gen_hsa_addr_insns (tree val, hsa_op_reg *dest, hsa_bb *hbb);
/* Generate HSA address operand for a given tree memory reference REF. If
instructions need to be created to calculate the address, they will be added
to the end of HBB. If a caller provider OUTPUT_BITSIZE and OUTPUT_BITPOS,
the function assumes that the caller will handle possible
bit-field references. Otherwise if we reference a bit-field, sorry message
is displayed. */
static hsa_op_address *
gen_hsa_addr (tree ref, hsa_bb *hbb, HOST_WIDE_INT *output_bitsize = NULL,
HOST_WIDE_INT *output_bitpos = NULL)
{
hsa_symbol *symbol = NULL;
hsa_op_reg *reg = NULL;
offset_int offset = 0;
tree origref = ref;
tree varoffset = NULL_TREE;
BrigType16_t addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT);
HOST_WIDE_INT bitsize = 0, bitpos = 0;
BrigType16_t flat_addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT);
if (TREE_CODE (ref) == STRING_CST)
{
symbol = hsa_get_string_cst_symbol (ref);
goto out;
}
else if (TREE_CODE (ref) == BIT_FIELD_REF
&& (!multiple_p (bit_field_size (ref), BITS_PER_UNIT)
|| !multiple_p (bit_field_offset (ref), BITS_PER_UNIT)))
{
HSA_SORRY_ATV (EXPR_LOCATION (origref),
"support for HSA does not implement "
"bit field references such as %E", ref);
goto out;
}
if (handled_component_p (ref))
{
machine_mode mode;
int unsignedp, volatilep, preversep;
poly_int64 pbitsize, pbitpos;
tree new_ref;
new_ref = get_inner_reference (ref, &pbitsize, &pbitpos, &varoffset,
&mode, &unsignedp, &preversep,
&volatilep);
/* When this isn't true, the switch below will report an
appropriate error. */
if (pbitsize.is_constant () && pbitpos.is_constant ())
{
bitsize = pbitsize.to_constant ();
bitpos = pbitpos.to_constant ();
ref = new_ref;
offset = bitpos;
offset = wi::rshift (offset, LOG2_BITS_PER_UNIT, SIGNED);
}
}
switch (TREE_CODE (ref))
{
case ADDR_EXPR:
{
addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_PRIVATE);
symbol = hsa_cfun->create_hsa_temporary (flat_addrtype);
hsa_op_reg *r = new hsa_op_reg (flat_addrtype);
gen_hsa_addr_insns (ref, r, hbb);
hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, r->m_type,
r, new hsa_op_address (symbol)));
break;
}
case SSA_NAME:
{
addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_PRIVATE);
hsa_op_with_type *r = hsa_cfun->reg_for_gimple_ssa (ref);
if (r->m_type == BRIG_TYPE_B1)
r = r->get_in_type (BRIG_TYPE_U32, hbb);
symbol = hsa_cfun->create_hsa_temporary (r->m_type);
hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, r->m_type,
r, new hsa_op_address (symbol)));
break;
}
case PARM_DECL:
case VAR_DECL:
case RESULT_DECL:
case CONST_DECL:
gcc_assert (!symbol);
symbol = get_symbol_for_decl (ref);
addrtype = hsa_get_segment_addr_type (symbol->m_segment);
break;
case MEM_REF:
process_mem_base (TREE_OPERAND (ref, 0), &symbol, &addrtype, &reg,
&offset, hbb);
if (!integer_zerop (TREE_OPERAND (ref, 1)))
offset += wi::to_offset (TREE_OPERAND (ref, 1));
break;
case TARGET_MEM_REF:
process_mem_base (TMR_BASE (ref), &symbol, &addrtype, &reg, &offset, hbb);
if (TMR_INDEX (ref))
{
hsa_op_reg *disp1;
hsa_op_base *idx = hsa_cfun->reg_for_gimple_ssa
(TMR_INDEX (ref))->get_in_type (addrtype, hbb);
if (TMR_STEP (ref) && !integer_onep (TMR_STEP (ref)))
{
disp1 = new hsa_op_reg (addrtype);
hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_MUL,
addrtype);
/* As step must respect addrtype, we overwrite the type
of an immediate value. */
hsa_op_immed *step = new hsa_op_immed (TMR_STEP (ref));
step->m_type = addrtype;
insn->set_op (0, disp1);
insn->set_op (1, idx);
insn->set_op (2, step);
hbb->append_insn (insn);
}
else
disp1 = as_a <hsa_op_reg *> (idx);
reg = add_addr_regs_if_needed (reg, disp1, hbb);
}
if (TMR_INDEX2 (ref))
{
if (TREE_CODE (TMR_INDEX2 (ref)) == SSA_NAME)
{
hsa_op_base *disp2 = hsa_cfun->reg_for_gimple_ssa
(TMR_INDEX2 (ref))->get_in_type (addrtype, hbb);
reg = add_addr_regs_if_needed (reg, as_a <hsa_op_reg *> (disp2),
hbb);
}
else if (TREE_CODE (TMR_INDEX2 (ref)) == INTEGER_CST)
offset += wi::to_offset (TMR_INDEX2 (ref));
else
gcc_unreachable ();
}
offset += wi::to_offset (TMR_OFFSET (ref));
break;
case FUNCTION_DECL:
HSA_SORRY_AT (EXPR_LOCATION (origref),
"support for HSA does not implement function pointers");
goto out;
default:
HSA_SORRY_ATV (EXPR_LOCATION (origref), "support for HSA does "
"not implement memory access to %E", origref);
goto out;
}
if (varoffset)
{
if (TREE_CODE (varoffset) == INTEGER_CST)
offset += wi::to_offset (varoffset);
else
{
hsa_op_base *off_op = gen_address_calculation (varoffset, hbb,
addrtype);
reg = add_addr_regs_if_needed (reg, as_a <hsa_op_reg *> (off_op),
hbb);
}
}
gcc_checking_assert ((symbol
&& addrtype
== hsa_get_segment_addr_type (symbol->m_segment))
|| (!symbol
&& addrtype
== hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT)));
out:
HOST_WIDE_INT hwi_offset = offset.to_shwi ();
/* Calculate remaining bitsize offset (if presented). */
bitpos %= BITS_PER_UNIT;
/* If bitsize is a power of two that is greater or equal to BITS_PER_UNIT, it
is not a reason to think this is a bit-field access. */
if (bitpos == 0
&& (bitsize >= BITS_PER_UNIT)
&& !(bitsize & (bitsize - 1)))
bitsize = 0;
if ((bitpos || bitsize) && (output_bitpos == NULL || output_bitsize == NULL))
HSA_SORRY_ATV (EXPR_LOCATION (origref), "support for HSA does not "
"implement unhandled bit field reference such as %E", ref);
if (output_bitsize != NULL && output_bitpos != NULL)
{
*output_bitsize = bitsize;
*output_bitpos = bitpos;
}
return new hsa_op_address (symbol, reg, hwi_offset);
}
/* Generate HSA address operand for a given tree memory reference REF. If
instructions need to be created to calculate the address, they will be added
to the end of HBB. OUTPUT_ALIGN is alignment of the created address. */
static hsa_op_address *
gen_hsa_addr_with_align (tree ref, hsa_bb *hbb, BrigAlignment8_t *output_align)
{
hsa_op_address *addr = gen_hsa_addr (ref, hbb);
if (addr->m_reg || !addr->m_symbol)
*output_align = hsa_object_alignment (ref);
else
{
/* If the address consists only of a symbol and an offset, we
compute the alignment ourselves to take into account any alignment
promotions we might have done for the HSA symbol representation. */
unsigned align = hsa_byte_alignment (addr->m_symbol->m_align);
unsigned misalign = addr->m_imm_offset & (align - 1);
if (misalign)
align = least_bit_hwi (misalign);
*output_align = hsa_alignment_encoding (BITS_PER_UNIT * align);
}
return addr;
}
/* Generate HSA address for a function call argument of given TYPE.
INDEX is used to generate corresponding name of the arguments.
Special value -1 represents fact that result value is created. */
static hsa_op_address *
gen_hsa_addr_for_arg (tree tree_type, int index)
{
hsa_symbol *sym = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG,
BRIG_LINKAGE_ARG);
sym->m_type = hsa_type_for_tree_type (tree_type, &sym->m_dim);
if (index == -1) /* Function result. */
sym->m_name = "res";
else /* Function call arguments. */
{
sym->m_name = NULL;
sym->m_name_number = index;
}
return new hsa_op_address (sym);
}
/* Generate HSA instructions that process all necessary conversions
of an ADDR to flat addressing and place the result into DEST.
Instructions are appended to HBB. */
static void
convert_addr_to_flat_segment (hsa_op_address *addr, hsa_op_reg *dest,
hsa_bb *hbb)
{
hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_LDA);
insn->set_op (1, addr);
if (addr->m_symbol && addr->m_symbol->m_segment != BRIG_SEGMENT_GLOBAL)
{
/* LDA produces segment-relative address, we need to convert
it to the flat one. */
hsa_op_reg *tmp;
tmp = new hsa_op_reg (hsa_get_segment_addr_type
(addr->m_symbol->m_segment));
hsa_insn_seg *seg;
seg = new hsa_insn_seg (BRIG_OPCODE_STOF,
hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT),
tmp->m_type, addr->m_symbol->m_segment, dest,
tmp);
insn->set_op (0, tmp);
insn->m_type = tmp->m_type;
hbb->append_insn (insn);
hbb->append_insn (seg);
}
else
{
insn->set_op (0, dest);
insn->m_type = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT);
hbb->append_insn (insn);
}
}
/* Generate HSA instructions that calculate address of VAL including all
necessary conversions to flat addressing and place the result into DEST.
Instructions are appended to HBB. */
static void
gen_hsa_addr_insns (tree val, hsa_op_reg *dest, hsa_bb *hbb)
{
/* Handle cases like tmp = NULL, where we just emit a move instruction
to a register. */
if (TREE_CODE (val) == INTEGER_CST)
{
hsa_op_immed *c = new hsa_op_immed (val);
hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV,
dest->m_type, dest, c);
hbb->append_insn (insn);
return;
}
hsa_op_address *addr;
gcc_assert (dest->m_type == hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT));
if (TREE_CODE (val) == ADDR_EXPR)
val = TREE_OPERAND (val, 0);
addr = gen_hsa_addr (val, hbb);
if (TREE_CODE (val) == CONST_DECL
&& is_gimple_reg_type (TREE_TYPE (val)))
{
gcc_assert (addr->m_symbol
&& addr->m_symbol->m_segment == BRIG_SEGMENT_READONLY);
/* CONST_DECLs are in readonly segment which however does not have
addresses convertible to flat segments. So copy it to a private one
and take address of that. */
BrigType16_t csttype
= mem_type_for_type (hsa_type_for_scalar_tree_type (TREE_TYPE (val),
false));
hsa_op_reg *r = new hsa_op_reg (csttype);
hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_LD, csttype, r,
new hsa_op_address (addr->m_symbol)));
hsa_symbol *copysym = hsa_cfun->create_hsa_temporary (csttype);
hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, csttype, r,
new hsa_op_address (copysym)));
addr->m_symbol = copysym;
}
else if (addr->m_symbol && addr->m_symbol->m_segment == BRIG_SEGMENT_READONLY)
{
HSA_SORRY_ATV (EXPR_LOCATION (val), "support for HSA does "
"not implement taking addresses of complex "
"CONST_DECLs such as %E", val);
return;
}
convert_addr_to_flat_segment (addr, dest, hbb);
}
/* Return an HSA register or HSA immediate value operand corresponding to
gimple operand OP. */
static hsa_op_with_type *
hsa_reg_or_immed_for_gimple_op (tree op, hsa_bb *hbb)
{
hsa_op_reg *tmp;
if (TREE_CODE (op) == SSA_NAME)
tmp = hsa_cfun->reg_for_gimple_ssa (op);
else if (!POINTER_TYPE_P (TREE_TYPE (op)))
return new hsa_op_immed (op);
else
{
tmp = new hsa_op_reg (hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT));
gen_hsa_addr_insns (op, tmp, hbb);
}
return tmp;
}
/* Create a simple movement instruction with register destination DEST and
register or immediate source SRC and append it to the end of HBB. */
void
hsa_build_append_simple_mov (hsa_op_reg *dest, hsa_op_base *src, hsa_bb *hbb)
{
/* Moves of packed data between registers need to adhere to the same type
rules like when dealing with memory. */
BrigType16_t tp = mem_type_for_type (dest->m_type);
hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV, tp, dest, src);
hsa_fixup_mov_insn_type (insn);
unsigned dest_size = hsa_type_bit_size (dest->m_type);
if (hsa_op_reg *sreg = dyn_cast <hsa_op_reg *> (src))
gcc_assert (dest_size == hsa_type_bit_size (sreg->m_type));
else
{
unsigned imm_size
= hsa_type_bit_size (as_a <hsa_op_immed *> (src)->m_type);
gcc_assert ((dest_size == imm_size)
/* Eventually < 32bit registers will be promoted to 32bit. */
|| (dest_size < 32 && imm_size == 32));
}
hbb->append_insn (insn);
}
/* Generate HSAIL instructions loading a bit field into register DEST.
VALUE_REG is a register of a SSA name that is used in the bit field
reference. To identify a bit field BITPOS is offset to the loaded memory
and BITSIZE is number of bits of the bit field.
Add instructions to HBB. */
static void
gen_hsa_insns_for_bitfield (hsa_op_reg *dest, hsa_op_reg *value_reg,
HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
hsa_bb *hbb)
{
unsigned type_bitsize
= hsa_type_bit_size (hsa_extend_inttype_to_32bit (dest->m_type));
unsigned left_shift = type_bitsize - (bitsize + bitpos);
unsigned right_shift = left_shift + bitpos;
if (left_shift)
{
hsa_op_reg *value_reg_2
= new hsa_op_reg (hsa_extend_inttype_to_32bit (dest->m_type));
hsa_op_immed *c = new hsa_op_immed (left_shift, BRIG_TYPE_U32);
hsa_insn_basic *lshift
= new hsa_insn_basic (3, BRIG_OPCODE_SHL, value_reg_2->m_type,
value_reg_2, value_reg, c);
hbb->append_insn (lshift);
value_reg = value_reg_2;
}
if (right_shift)
{
hsa_op_reg *value_reg_2
= new hsa_op_reg (hsa_extend_inttype_to_32bit (dest->m_type));
hsa_op_immed *c = new hsa_op_immed (right_shift, BRIG_TYPE_U32);
hsa_insn_basic *rshift
= new hsa_insn_basic (3, BRIG_OPCODE_SHR, value_reg_2->m_type,
value_reg_2, value_reg, c);
hbb->append_insn (rshift);
value_reg = value_reg_2;
}
hsa_insn_basic *assignment
= new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, NULL, value_reg);
hsa_fixup_mov_insn_type (assignment);
hbb->append_insn (assignment);
assignment->set_output_in_type (dest, 0, hbb);
}
/* Generate HSAIL instructions loading a bit field into register DEST. ADDR is
prepared memory address which is used to load the bit field. To identify a
bit field BITPOS is offset to the loaded memory and BITSIZE is number of
bits of the bit field. Add instructions to HBB. Load must be performed in
alignment ALIGN. */
static void
gen_hsa_insns_for_bitfield_load (hsa_op_reg *dest, hsa_op_address *addr,
HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
hsa_bb *hbb, BrigAlignment8_t align)
{
hsa_op_reg *value_reg = new hsa_op_reg (dest->m_type);
hsa_insn_mem *mem
= new hsa_insn_mem (BRIG_OPCODE_LD,
hsa_extend_inttype_to_32bit (dest->m_type),
value_reg, addr);
mem->set_align (align);
hbb->append_insn (mem);
gen_hsa_insns_for_bitfield (dest, value_reg, bitsize, bitpos, hbb);
}
/* Return the alignment of base memory accesses we issue to perform bit-field
memory access REF. */
static BrigAlignment8_t
hsa_bitmemref_alignment (tree ref)
{
unsigned HOST_WIDE_INT bit_offset = 0;
while (true)
{
if (TREE_CODE (ref) == BIT_FIELD_REF)
{
if (!tree_fits_uhwi_p (TREE_OPERAND (ref, 2)))
return BRIG_ALIGNMENT_1;
bit_offset += tree_to_uhwi (TREE_OPERAND (ref, 2));
}
else if (TREE_CODE (ref) == COMPONENT_REF
&& DECL_BIT_FIELD (TREE_OPERAND (ref, 1)))
bit_offset += int_bit_position (TREE_OPERAND (ref, 1));
else
break;
ref = TREE_OPERAND (ref, 0);
}
unsigned HOST_WIDE_INT bits = bit_offset % BITS_PER_UNIT;
unsigned HOST_WIDE_INT byte_bits = bit_offset - bits;
BrigAlignment8_t base = hsa_object_alignment (ref);
if (byte_bits == 0)
return base;
return MIN (base, hsa_alignment_encoding (least_bit_hwi (byte_bits)));
}
/* Generate HSAIL instructions loading something into register DEST. RHS is
tree representation of the loaded data, which are loaded as type TYPE. Add
instructions to HBB. */
static void
gen_hsa_insns_for_load (hsa_op_reg *dest, tree rhs, tree type, hsa_bb *hbb)
{
/* The destination SSA name will give us the type. */
if (TREE_CODE (rhs) == VIEW_CONVERT_EXPR)
rhs = TREE_OPERAND (rhs, 0);
if (TREE_CODE (rhs) == SSA_NAME)
{
hsa_op_reg *src = hsa_cfun->reg_for_gimple_ssa (rhs);
hsa_build_append_simple_mov (dest, src, hbb);
}
else if (is_gimple_min_invariant (rhs)
|| TREE_CODE (rhs) == ADDR_EXPR)
{
if (POINTER_TYPE_P (TREE_TYPE (rhs)))
{
if (dest->m_type != hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT))
{
HSA_SORRY_ATV (EXPR_LOCATION (rhs),
"support for HSA does not implement conversion "
"of %E to the requested non-pointer type.", rhs);
return;
}
gen_hsa_addr_insns (rhs, dest, hbb);
}
else if (TREE_CODE (rhs) == COMPLEX_CST)
{
hsa_op_immed *real_part = new hsa_op_immed (TREE_REALPART (rhs));
hsa_op_immed *imag_part = new hsa_op_immed (TREE_IMAGPART (rhs));
hsa_op_reg *real_part_reg
= new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (type),
true));
hsa_op_reg *imag_part_reg
= new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (type),
true));
hsa_build_append_simple_mov (real_part_reg, real_part, hbb);
hsa_build_append_simple_mov (imag_part_reg, imag_part, hbb);
BrigType16_t src_type = hsa_bittype_for_type (real_part_reg->m_type);
hsa_insn_packed *insn
= new hsa_insn_packed (3, BRIG_OPCODE_COMBINE, dest->m_type,
src_type, dest, real_part_reg,
imag_part_reg);
hbb->append_insn (insn);
}
else
{
hsa_op_immed *imm = new hsa_op_immed (rhs);
hsa_build_append_simple_mov (dest, imm, hbb);
}
}
else if (TREE_CODE (rhs) == REALPART_EXPR || TREE_CODE (rhs) == IMAGPART_EXPR)
{
tree pack_type = TREE_TYPE (TREE_OPERAND (rhs, 0));
hsa_op_reg *packed_reg
= new hsa_op_reg (hsa_type_for_scalar_tree_type (pack_type, true));
tree complex_rhs = TREE_OPERAND (rhs, 0);
gen_hsa_insns_for_load (packed_reg, complex_rhs, TREE_TYPE (complex_rhs),
hbb);
hsa_op_reg *real_reg
= new hsa_op_reg (hsa_type_for_scalar_tree_type (type, true));
hsa_op_reg *imag_reg
= new hsa_op_reg (hsa_type_for_scalar_tree_type (type, true));
BrigKind16_t brig_type = packed_reg->m_type;
hsa_insn_packed *packed
= new hsa_insn_packed (3, BRIG_OPCODE_EXPAND,
hsa_bittype_for_type (real_reg->m_type),
brig_type, real_reg, imag_reg, packed_reg);
hbb->append_insn (packed);
hsa_op_reg *source = TREE_CODE (rhs) == REALPART_EXPR ?
real_reg : imag_reg;
hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV,
dest->m_type, NULL, source);
hsa_fixup_mov_insn_type (insn);
hbb->append_insn (insn);
insn->set_output_in_type (dest, 0, hbb);
}
else if (TREE_CODE (rhs) == BIT_FIELD_REF
&& TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME)
{
tree ssa_name = TREE_OPERAND (rhs, 0);
HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs, 1));
HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs, 2));
hsa_op_reg *imm_value = hsa_cfun->reg_for_gimple_ssa (ssa_name);
gen_hsa_insns_for_bitfield (dest, imm_value, bitsize, bitpos, hbb);
}
else if (DECL_P (rhs) || TREE_CODE (rhs) == MEM_REF
|| TREE_CODE (rhs) == TARGET_MEM_REF
|| handled_component_p (rhs))
{
HOST_WIDE_INT bitsize, bitpos;
/* Load from memory. */
hsa_op_address *addr;
addr = gen_hsa_addr (rhs, hbb, &bitsize, &bitpos);
/* Handle load of a bit field. */
if (bitsize > 64)
{
HSA_SORRY_AT (EXPR_LOCATION (rhs),
"support for HSA does not implement load from a bit "
"field bigger than 64 bits");
return;
}
if (bitsize || bitpos)
gen_hsa_insns_for_bitfield_load (dest, addr, bitsize, bitpos, hbb,
hsa_bitmemref_alignment (rhs));
else
{
BrigType16_t mtype;
/* Not dest->m_type, that's possibly extended. */
mtype = mem_type_for_type (hsa_type_for_scalar_tree_type (type,
false));
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, mtype, dest,
addr);
mem->set_align (hsa_object_alignment (rhs));
hbb->append_insn (mem);
}
}
else
HSA_SORRY_ATV (EXPR_LOCATION (rhs),
"support for HSA does not implement loading "
"of expression %E",
rhs);
}
/* Return number of bits necessary for representation of a bit field,
starting at BITPOS with size of BITSIZE. */
static unsigned
get_bitfield_size (unsigned bitpos, unsigned bitsize)
{
unsigned s = bitpos + bitsize;
unsigned sizes[] = {8, 16, 32, 64};
for (unsigned i = 0; i < 4; i++)
if (s <= sizes[i])
return sizes[i];
gcc_unreachable ();
return 0;
}
/* Generate HSAIL instructions storing into memory. LHS is the destination of
the store, SRC is the source operand. Add instructions to HBB. */
static void
gen_hsa_insns_for_store (tree lhs, hsa_op_base *src, hsa_bb *hbb)
{
HOST_WIDE_INT bitsize = 0, bitpos = 0;
BrigAlignment8_t req_align;
BrigType16_t mtype;
mtype = mem_type_for_type (hsa_type_for_scalar_tree_type (TREE_TYPE (lhs),
false));
hsa_op_address *addr;
addr = gen_hsa_addr (lhs, hbb, &bitsize, &bitpos);
/* Handle store to a bit field. */
if (bitsize > 64)
{
HSA_SORRY_AT (EXPR_LOCATION (lhs),
"support for HSA does not implement store to a bit field "
"bigger than 64 bits");
return;
}
unsigned type_bitsize = get_bitfield_size (bitpos, bitsize);
/* HSAIL does not support MOV insn with 16-bits integers. */
if (type_bitsize < 32)
type_bitsize = 32;
if (bitpos || (bitsize && type_bitsize != bitsize))
{
unsigned HOST_WIDE_INT mask = 0;
BrigType16_t mem_type
= get_integer_type_by_bytes (type_bitsize / BITS_PER_UNIT,
!TYPE_UNSIGNED (TREE_TYPE (lhs)));
for (unsigned i = 0; i < type_bitsize; i++)
if (i < bitpos || i >= bitpos + bitsize)
mask |= ((unsigned HOST_WIDE_INT)1 << i);
hsa_op_reg *value_reg = new hsa_op_reg (mem_type);
req_align = hsa_bitmemref_alignment (lhs);
/* Load value from memory. */
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, mem_type,
value_reg, addr);
mem->set_align (req_align);
hbb->append_insn (mem);
/* AND the loaded value with prepared mask. */
hsa_op_reg *cleared_reg = new hsa_op_reg (mem_type);
BrigType16_t t
= get_integer_type_by_bytes (type_bitsize / BITS_PER_UNIT, false);
hsa_op_immed *c = new hsa_op_immed (mask, t);
hsa_insn_basic *clearing
= new hsa_insn_basic (3, BRIG_OPCODE_AND, mem_type, cleared_reg,
value_reg, c);
hbb->append_insn (clearing);
/* Shift to left a value that is going to be stored. */
hsa_op_reg *new_value_reg = new hsa_op_reg (mem_type);
hsa_insn_basic *basic = new hsa_insn_basic (2, BRIG_OPCODE_MOV, mem_type,
new_value_reg, src);
hsa_fixup_mov_insn_type (basic);
hbb->append_insn (basic);
if (bitpos)
{
hsa_op_reg *shifted_value_reg = new hsa_op_reg (mem_type);
c = new hsa_op_immed (bitpos, BRIG_TYPE_U32);
hsa_insn_basic *basic
= new hsa_insn_basic (3, BRIG_OPCODE_SHL, mem_type,
shifted_value_reg, new_value_reg, c);
hbb->append_insn (basic);
new_value_reg = shifted_value_reg;
}
/* OR the prepared value with prepared chunk loaded from memory. */
hsa_op_reg *prepared_reg= new hsa_op_reg (mem_type);
basic = new hsa_insn_basic (3, BRIG_OPCODE_OR, mem_type, prepared_reg,
new_value_reg, cleared_reg);
hbb->append_insn (basic);
src = prepared_reg;
mtype = mem_type;
}
else
req_align = hsa_object_alignment (lhs);
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, mtype, src, addr);
mem->set_align (req_align);
/* The HSAIL verifier has another constraint: if the source is an immediate
then it must match the destination type. If it's a register the low bits
will be used for sub-word stores. We're always allocating new operands so
we can modify the above in place. */
if (hsa_op_immed *imm = dyn_cast <hsa_op_immed *> (src))
{
if (!hsa_type_packed_p (imm->m_type))
imm->m_type = mem->m_type;
else
{
/* ...and all vector immediates apparently need to be vectors of
unsigned bytes. */
unsigned bs = hsa_type_bit_size (imm->m_type);
gcc_assert (bs == hsa_type_bit_size (mem->m_type));
switch (bs)
{
case 32:
imm->m_type = BRIG_TYPE_U8X4;
break;
case 64:
imm->m_type = BRIG_TYPE_U8X8;
break;
case 128:
imm->m_type = BRIG_TYPE_U8X16;
break;
default:
gcc_unreachable ();
}
}
}
hbb->append_insn (mem);
}
/* Generate memory copy instructions that are going to be used
for copying a SRC memory to TARGET memory,
represented by pointer in a register. MIN_ALIGN is minimal alignment
of provided HSA addresses. */
static void
gen_hsa_memory_copy (hsa_bb *hbb, hsa_op_address *target, hsa_op_address *src,
unsigned size, BrigAlignment8_t min_align)
{
hsa_op_address *addr;
hsa_insn_mem *mem;
unsigned offset = 0;
unsigned min_byte_align = hsa_byte_alignment (min_align);
while (size)
{
unsigned s;
if (size >= 8)
s = 8;
else if (size >= 4)
s = 4;
else if (size >= 2)
s = 2;
else
s = 1;
if (s > min_byte_align)
s = min_byte_align;
BrigType16_t t = get_integer_type_by_bytes (s, false);
hsa_op_reg *tmp = new hsa_op_reg (t);
addr = new hsa_op_address (src->m_symbol, src->m_reg,
src->m_imm_offset + offset);
mem = new hsa_insn_mem (BRIG_OPCODE_LD, t, tmp, addr);
hbb->append_insn (mem);
addr = new hsa_op_address (target->m_symbol, target->m_reg,
target->m_imm_offset + offset);
mem = new hsa_insn_mem (BRIG_OPCODE_ST, t, tmp, addr);
hbb->append_insn (mem);
offset += s;
size -= s;
}
}
/* Create a memset mask that is created by copying a CONSTANT byte value
to an integer of BYTE_SIZE bytes. */
static unsigned HOST_WIDE_INT
build_memset_value (unsigned HOST_WIDE_INT constant, unsigned byte_size)
{
if (constant == 0)
return 0;
HOST_WIDE_INT v = constant;
for (unsigned i = 1; i < byte_size; i++)
v |= constant << (8 * i);
return v;
}
/* Generate memory set instructions that are going to be used
for setting a CONSTANT byte value to TARGET memory of SIZE bytes.
MIN_ALIGN is minimal alignment of provided HSA addresses. */
static void
gen_hsa_memory_set (hsa_bb *hbb, hsa_op_address *target,
unsigned HOST_WIDE_INT constant,
unsigned size, BrigAlignment8_t min_align)
{
hsa_op_address *addr;
hsa_insn_mem *mem;
unsigned offset = 0;
unsigned min_byte_align = hsa_byte_alignment (min_align);
while (size)
{
unsigned s;
if (size >= 8)
s = 8;
else if (size >= 4)
s = 4;
else if (size >= 2)
s = 2;
else
s = 1;
if (s > min_byte_align)
s = min_byte_align;
addr = new hsa_op_address (target->m_symbol, target->m_reg,
target->m_imm_offset + offset);
BrigType16_t t = get_integer_type_by_bytes (s, false);
HOST_WIDE_INT c = build_memset_value (constant, s);
mem = new hsa_insn_mem (BRIG_OPCODE_ST, t, new hsa_op_immed (c, t),
addr);
hbb->append_insn (mem);
offset += s;
size -= s;
}
}
/* Generate HSAIL instructions for a single assignment
of an empty constructor to an ADDR_LHS. Constructor is passed as a
tree RHS and all instructions are appended to HBB. ALIGN is
alignment of the address. */
void
gen_hsa_ctor_assignment (hsa_op_address *addr_lhs, tree rhs, hsa_bb *hbb,
BrigAlignment8_t align)
{
if (CONSTRUCTOR_NELTS (rhs))
{
HSA_SORRY_AT (EXPR_LOCATION (rhs),
"support for HSA does not implement load from constructor");
return;
}
unsigned size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (rhs)));
gen_hsa_memory_set (hbb, addr_lhs, 0, size, align);
}
/* Generate HSA instructions for a single assignment of RHS to LHS.
HBB is the basic block they will be appended to. */
static void
gen_hsa_insns_for_single_assignment (tree lhs, tree rhs, hsa_bb *hbb)
{
if (TREE_CODE (lhs) == SSA_NAME)
{
hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs);
if (hsa_seen_error ())
return;
gen_hsa_insns_for_load (dest, rhs, TREE_TYPE (lhs), hbb);
}
else if (TREE_CODE (rhs) == SSA_NAME
|| (is_gimple_min_invariant (rhs) && TREE_CODE (rhs) != STRING_CST))
{
/* Store to memory. */
hsa_op_base *src = hsa_reg_or_immed_for_gimple_op (rhs, hbb);
if (hsa_seen_error ())
return;
gen_hsa_insns_for_store (lhs, src, hbb);
}
else
{
BrigAlignment8_t lhs_align;
hsa_op_address *addr_lhs = gen_hsa_addr_with_align (lhs, hbb,
&lhs_align);
if (TREE_CODE (rhs) == CONSTRUCTOR)
gen_hsa_ctor_assignment (addr_lhs, rhs, hbb, lhs_align);
else
{
BrigAlignment8_t rhs_align;
hsa_op_address *addr_rhs = gen_hsa_addr_with_align (rhs, hbb,
&rhs_align);
unsigned size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (rhs)));
gen_hsa_memory_copy (hbb, addr_lhs, addr_rhs, size,
MIN (lhs_align, rhs_align));
}
}
}
/* Prepend before INSN a load from spill symbol of SPILL_REG. Return the
register into which we loaded. If this required another register to convert
from a B1 type, return it in *PTMP2, otherwise store NULL into it. We
assume we are out of SSA so the returned register does not have its
definition set. */
hsa_op_reg *
hsa_spill_in (hsa_insn_basic *insn, hsa_op_reg *spill_reg, hsa_op_reg **ptmp2)
{
hsa_symbol *spill_sym = spill_reg->m_spill_sym;
hsa_op_reg *reg = new hsa_op_reg (spill_sym->m_type);
hsa_op_address *addr = new hsa_op_address (spill_sym);
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, spill_sym->m_type,
reg, addr);
hsa_insert_insn_before (mem, insn);
*ptmp2 = NULL;
if (spill_reg->m_type == BRIG_TYPE_B1)
{
hsa_insn_basic *cvtinsn;
*ptmp2 = reg;
reg = new hsa_op_reg (spill_reg->m_type);
cvtinsn = new hsa_insn_cvt (reg, *ptmp2);
hsa_insert_insn_before (cvtinsn, insn);
}
return reg;
}
/* Append after INSN a store to spill symbol of SPILL_REG. Return the register
from which we stored. If this required another register to convert to a B1
type, return it in *PTMP2, otherwise store NULL into it. We assume we are
out of SSA so the returned register does not have its use updated. */
hsa_op_reg *
hsa_spill_out (hsa_insn_basic *insn, hsa_op_reg *spill_reg, hsa_op_reg **ptmp2)
{
hsa_symbol *spill_sym = spill_reg->m_spill_sym;
hsa_op_reg *reg = new hsa_op_reg (spill_sym->m_type);
hsa_op_address *addr = new hsa_op_address (spill_sym);
hsa_op_reg *returnreg;
*ptmp2 = NULL;
returnreg = reg;
if (spill_reg->m_type == BRIG_TYPE_B1)
{
hsa_insn_basic *cvtinsn;
*ptmp2 = new hsa_op_reg (spill_sym->m_type);
reg->m_type = spill_reg->m_type;
cvtinsn = new hsa_insn_cvt (*ptmp2, returnreg);
hsa_append_insn_after (cvtinsn, insn);
insn = cvtinsn;
reg = *ptmp2;
}
hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, spill_sym->m_type, reg,
addr);
hsa_append_insn_after (mem, insn);
return returnreg;
}
/* Generate a comparison instruction that will compare LHS and RHS with
comparison specified by CODE and put result into register DEST. DEST has to
have its type set already but must not have its definition set yet.
Generated instructions will be added to HBB. */
static void
gen_hsa_cmp_insn_from_gimple (enum tree_code code, tree lhs, tree rhs,
hsa_op_reg *dest, hsa_bb *hbb)
{
BrigCompareOperation8_t compare;
switch (code)
{
case LT_EXPR:
compare = BRIG_COMPARE_LT;
break;
case LE_EXPR:
compare = BRIG_COMPARE_LE;
break;
case GT_EXPR:
compare = BRIG_COMPARE_GT;
break;
case GE_EXPR:
compare = BRIG_COMPARE_GE;
break;
case EQ_EXPR:
compare = BRIG_COMPARE_EQ;
break;
case NE_EXPR:
compare = BRIG_COMPARE_NE;
break;
case UNORDERED_EXPR:
compare = BRIG_COMPARE_NAN;
break;
case ORDERED_EXPR:
compare = BRIG_COMPARE_NUM;
break;
case UNLT_EXPR:
compare = BRIG_COMPARE_LTU;
break;
case UNLE_EXPR:
compare = BRIG_COMPARE_LEU;
break;
case UNGT_EXPR:
compare = BRIG_COMPARE_GTU;
break;
case UNGE_EXPR:
compare = BRIG_COMPARE_GEU;
break;
case UNEQ_EXPR:
compare = BRIG_COMPARE_EQU;
break;
case LTGT_EXPR:
compare = BRIG_COMPARE_NEU;
break;
default:
HSA_SORRY_ATV (EXPR_LOCATION (lhs),
"support for HSA does not implement comparison tree "
"code %s\n", get_tree_code_name (code));
return;
}
/* CMP instruction returns e.g. 0xffffffff (for a 32-bit with integer)
as a result of comparison. */
BrigType16_t dest_type = hsa_type_integer_p (dest->m_type)
? (BrigType16_t) BRIG_TYPE_B1 : dest->m_type;
hsa_insn_cmp *cmp = new hsa_insn_cmp (compare, dest_type);
hsa_op_with_type *op1 = hsa_reg_or_immed_for_gimple_op (lhs, hbb);
cmp->set_op (1, op1->extend_int_to_32bit (hbb));
hsa_op_with_type *op2 = hsa_reg_or_immed_for_gimple_op (rhs, hbb);
cmp->set_op (2, op2->extend_int_to_32bit (hbb));
hbb->append_insn (cmp);
cmp->set_output_in_type (dest, 0, hbb);
}
/* Generate an unary instruction with OPCODE and append it to a basic block
HBB. The instruction uses DEST as a destination and OP1
as a single operand. */
static void
gen_hsa_unary_operation (BrigOpcode opcode, hsa_op_reg *dest,
hsa_op_with_type *op1, hsa_bb *hbb)
{
gcc_checking_assert (dest);
hsa_insn_basic *insn;
if (opcode == BRIG_OPCODE_MOV && hsa_needs_cvt (dest->m_type, op1->m_type))
{
insn = new hsa_insn_cvt (dest, op1);
hbb->append_insn (insn);
return;
}
op1 = op1->extend_int_to_32bit (hbb);
if (opcode == BRIG_OPCODE_FIRSTBIT || opcode == BRIG_OPCODE_LASTBIT)
{
BrigType16_t srctype = hsa_type_integer_p (op1->m_type) ? op1->m_type
: hsa_unsigned_type_for_type (op1->m_type);
insn = new hsa_insn_srctype (2, opcode, BRIG_TYPE_U32, srctype, NULL,
op1);
}
else
{
BrigType16_t optype = hsa_extend_inttype_to_32bit (dest->m_type);
insn = new hsa_insn_basic (2, opcode, optype, NULL, op1);
if (opcode == BRIG_OPCODE_MOV)
hsa_fixup_mov_insn_type (insn);
else if (opcode == BRIG_OPCODE_ABS || opcode == BRIG_OPCODE_NEG)
{
/* ABS and NEG only exist in _s form :-/ */
if (insn->m_type == BRIG_TYPE_U32)
insn->m_type = BRIG_TYPE_S32;
else if (insn->m_type == BRIG_TYPE_U64)
insn->m_type = BRIG_TYPE_S64;
}
}
hbb->append_insn (insn);
insn->set_output_in_type (dest, 0, hbb);
}
/* Generate a binary instruction with OPCODE and append it to a basic block
HBB. The instruction uses DEST as a destination and operands OP1
and OP2. */
static void
gen_hsa_binary_operation (int opcode, hsa_op_reg *dest,
hsa_op_with_type *op1, hsa_op_with_type *op2,
hsa_bb *hbb)
{
gcc_checking_assert (dest);
BrigType16_t optype = hsa_extend_inttype_to_32bit (dest->m_type);
op1 = op1->extend_int_to_32bit (hbb);
op2 = op2->extend_int_to_32bit (hbb);
if ((opcode == BRIG_OPCODE_SHL || opcode == BRIG_OPCODE_SHR)
&& is_a <hsa_op_immed *> (op2))
{
hsa_op_immed *i = dyn_cast <hsa_op_immed *> (op2);
i->set_type (BRIG_TYPE_U32);
}
if ((opcode == BRIG_OPCODE_OR
|| opcode == BRIG_OPCODE_XOR
|| opcode == BRIG_OPCODE_AND)
&& is_a <hsa_op_immed *> (op2))
{
hsa_op_immed *i = dyn_cast <hsa_op_immed *> (op2);
i->set_type (hsa_unsigned_type_for_type (i->m_type));
}
hsa_insn_basic *insn = new hsa_insn_basic (3, opcode, optype, NULL,
op1, op2);
hbb->append_insn (insn);
insn->set_output_in_type (dest, 0, hbb);
}
/* Generate HSA instructions for a single assignment. HBB is the basic block
they will be appended to. */
static void
gen_hsa_insns_for_operation_assignment (gimple *assign, hsa_bb *hbb)
{
tree_code code = gimple_assign_rhs_code (assign);
gimple_rhs_class rhs_class = get_gimple_rhs_class (gimple_expr_code (assign));
tree lhs = gimple_assign_lhs (assign);
tree rhs1 = gimple_assign_rhs1 (assign);
tree rhs2 = gimple_assign_rhs2 (assign);
tree rhs3 = gimple_assign_rhs3 (assign);
BrigOpcode opcode;
switch (code)
{
CASE_CONVERT:
case FLOAT_EXPR:
/* The opcode is changed to BRIG_OPCODE_CVT if BRIG types
needs a conversion. */
opcode = BRIG_OPCODE_MOV;
break;
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
opcode = BRIG_OPCODE_ADD;
break;
case MINUS_EXPR:
opcode = BRIG_OPCODE_SUB;
break;
case MULT_EXPR:
opcode = BRIG_OPCODE_MUL;
break;
case MULT_HIGHPART_EXPR: