blob: ad0096a43eff655a6734f4888998e5e7c0125400 [file] [log] [blame]
/* Expands front end tree to back end RTL for GCC.
Copyright (C) 1987-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file handles the generation of rtl code from tree structure
at the level of the function as a whole.
It creates the rtl expressions for parameters and auto variables
and has full responsibility for allocating stack slots.
`expand_function_start' is called at the beginning of a function,
before the function body is parsed, and `expand_function_end' is
called after parsing the body.
Call `assign_stack_local' to allocate a stack slot for a local variable.
This is usually done during the RTL generation for the function body,
but it can also be done in the reload pass when a pseudo-register does
not get a hard register. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple-expr.h"
#include "cfghooks.h"
#include "df.h"
#include "memmodel.h"
#include "tm_p.h"
#include "stringpool.h"
#include "expmed.h"
#include "optabs.h"
#include "opts.h"
#include "regs.h"
#include "emit-rtl.h"
#include "recog.h"
#include "rtl-error.h"
#include "hard-reg-set.h"
#include "alias.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "varasm.h"
#include "except.h"
#include "dojump.h"
#include "explow.h"
#include "calls.h"
#include "expr.h"
#include "optabs-tree.h"
#include "output.h"
#include "langhooks.h"
#include "common/common-target.h"
#include "gimplify.h"
#include "tree-pass.h"
#include "cfgrtl.h"
#include "cfganal.h"
#include "cfgbuild.h"
#include "cfgcleanup.h"
#include "cfgexpand.h"
#include "shrink-wrap.h"
#include "toplev.h"
#include "rtl-iter.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "stringpool.h"
#include "attribs.h"
#include "gimple.h"
#include "options.h"
#include "function-abi.h"
#include "value-range.h"
#include "gimple-range.h"
/* So we can assign to cfun in this file. */
#undef cfun
#ifndef STACK_ALIGNMENT_NEEDED
#define STACK_ALIGNMENT_NEEDED 1
#endif
#define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
/* Round a value to the lowest integer less than it that is a multiple of
the required alignment. Avoid using division in case the value is
negative. Assume the alignment is a power of two. */
#define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1))
/* Similar, but round to the next highest integer that meets the
alignment. */
#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
/* Nonzero once virtual register instantiation has been done.
assign_stack_local uses frame_pointer_rtx when this is nonzero.
calls.cc:emit_library_call_value_1 uses it to set up
post-instantiation libcalls. */
int virtuals_instantiated;
/* Assign unique numbers to labels generated for profiling, debugging, etc. */
static GTY(()) int funcdef_no;
/* These variables hold pointers to functions to create and destroy
target specific, per-function data structures. */
struct machine_function * (*init_machine_status) (void);
/* The currently compiled function. */
struct function *cfun = 0;
/* These hashes record the prologue and epilogue insns. */
struct insn_cache_hasher : ggc_cache_ptr_hash<rtx_def>
{
static hashval_t hash (rtx x) { return htab_hash_pointer (x); }
static bool equal (rtx a, rtx b) { return a == b; }
};
static GTY((cache))
hash_table<insn_cache_hasher> *prologue_insn_hash;
static GTY((cache))
hash_table<insn_cache_hasher> *epilogue_insn_hash;
hash_table<used_type_hasher> *types_used_by_vars_hash = NULL;
vec<tree, va_gc> *types_used_by_cur_var_decl;
/* Forward declarations. */
static class temp_slot *find_temp_slot_from_address (rtx);
static void pad_to_arg_alignment (struct args_size *, int, struct args_size *);
static void pad_below (struct args_size *, machine_mode, tree);
static void reorder_blocks_1 (rtx_insn *, tree, vec<tree> *);
static int all_blocks (tree, tree *);
static tree *get_block_vector (tree, int *);
extern tree debug_find_var_in_block_tree (tree, tree);
/* We always define `record_insns' even if it's not used so that we
can always export `prologue_epilogue_contains'. */
static void record_insns (rtx_insn *, rtx, hash_table<insn_cache_hasher> **)
ATTRIBUTE_UNUSED;
static bool contains (const rtx_insn *, hash_table<insn_cache_hasher> *);
static void prepare_function_start (void);
static void do_clobber_return_reg (rtx, void *);
static void do_use_return_reg (rtx, void *);
/* Stack of nested functions. */
/* Keep track of the cfun stack. */
static vec<function *> function_context_stack;
/* Save the current context for compilation of a nested function.
This is called from language-specific code. */
void
push_function_context (void)
{
if (cfun == 0)
allocate_struct_function (NULL, false);
function_context_stack.safe_push (cfun);
set_cfun (NULL);
}
/* Restore the last saved context, at the end of a nested function.
This function is called from language-specific code. */
void
pop_function_context (void)
{
struct function *p = function_context_stack.pop ();
set_cfun (p);
current_function_decl = p->decl;
/* Reset variables that have known state during rtx generation. */
virtuals_instantiated = 0;
generating_concat_p = 1;
}
/* Clear out all parts of the state in F that can safely be discarded
after the function has been parsed, but not compiled, to let
garbage collection reclaim the memory. */
void
free_after_parsing (struct function *f)
{
f->language = 0;
}
/* Clear out all parts of the state in F that can safely be discarded
after the function has been compiled, to let garbage collection
reclaim the memory. */
void
free_after_compilation (struct function *f)
{
prologue_insn_hash = NULL;
epilogue_insn_hash = NULL;
free (crtl->emit.regno_pointer_align);
memset (crtl, 0, sizeof (struct rtl_data));
f->eh = NULL;
f->machine = NULL;
f->cfg = NULL;
f->curr_properties &= ~PROP_cfg;
regno_reg_rtx = NULL;
}
/* Return size needed for stack frame based on slots so far allocated.
This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
the caller may have to do that. */
poly_int64
get_frame_size (void)
{
if (FRAME_GROWS_DOWNWARD)
return -frame_offset;
else
return frame_offset;
}
/* Issue an error message and return TRUE if frame OFFSET overflows in
the signed target pointer arithmetics for function FUNC. Otherwise
return FALSE. */
bool
frame_offset_overflow (poly_int64 offset, tree func)
{
poly_uint64 size = FRAME_GROWS_DOWNWARD ? -offset : offset;
unsigned HOST_WIDE_INT limit
= ((HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1))
/* Leave room for the fixed part of the frame. */
- 64 * UNITS_PER_WORD);
if (!coeffs_in_range_p (size, 0U, limit))
{
unsigned HOST_WIDE_INT hwisize;
if (size.is_constant (&hwisize))
error_at (DECL_SOURCE_LOCATION (func),
"total size of local objects %wu exceeds maximum %wu",
hwisize, limit);
else
error_at (DECL_SOURCE_LOCATION (func),
"total size of local objects exceeds maximum %wu",
limit);
return true;
}
return false;
}
/* Return the minimum spill slot alignment for a register of mode MODE. */
unsigned int
spill_slot_alignment (machine_mode mode ATTRIBUTE_UNUSED)
{
return STACK_SLOT_ALIGNMENT (NULL_TREE, mode, GET_MODE_ALIGNMENT (mode));
}
/* Return stack slot alignment in bits for TYPE and MODE. */
static unsigned int
get_stack_local_alignment (tree type, machine_mode mode)
{
unsigned int alignment;
if (mode == BLKmode)
alignment = BIGGEST_ALIGNMENT;
else
alignment = GET_MODE_ALIGNMENT (mode);
/* Allow the frond-end to (possibly) increase the alignment of this
stack slot. */
if (! type)
type = lang_hooks.types.type_for_mode (mode, 0);
return STACK_SLOT_ALIGNMENT (type, mode, alignment);
}
/* Determine whether it is possible to fit a stack slot of size SIZE and
alignment ALIGNMENT into an area in the stack frame that starts at
frame offset START and has a length of LENGTH. If so, store the frame
offset to be used for the stack slot in *POFFSET and return true;
return false otherwise. This function will extend the frame size when
given a start/length pair that lies at the end of the frame. */
static bool
try_fit_stack_local (poly_int64 start, poly_int64 length,
poly_int64 size, unsigned int alignment,
poly_int64_pod *poffset)
{
poly_int64 this_frame_offset;
int frame_off, frame_alignment, frame_phase;
/* Calculate how many bytes the start of local variables is off from
stack alignment. */
frame_alignment = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
frame_off = targetm.starting_frame_offset () % frame_alignment;
frame_phase = frame_off ? frame_alignment - frame_off : 0;
/* Round the frame offset to the specified alignment. */
if (FRAME_GROWS_DOWNWARD)
this_frame_offset
= (aligned_lower_bound (start + length - size - frame_phase, alignment)
+ frame_phase);
else
this_frame_offset
= aligned_upper_bound (start - frame_phase, alignment) + frame_phase;
/* See if it fits. If this space is at the edge of the frame,
consider extending the frame to make it fit. Our caller relies on
this when allocating a new slot. */
if (maybe_lt (this_frame_offset, start))
{
if (known_eq (frame_offset, start))
frame_offset = this_frame_offset;
else
return false;
}
else if (maybe_gt (this_frame_offset + size, start + length))
{
if (known_eq (frame_offset, start + length))
frame_offset = this_frame_offset + size;
else
return false;
}
*poffset = this_frame_offset;
return true;
}
/* Create a new frame_space structure describing free space in the stack
frame beginning at START and ending at END, and chain it into the
function's frame_space_list. */
static void
add_frame_space (poly_int64 start, poly_int64 end)
{
class frame_space *space = ggc_alloc<frame_space> ();
space->next = crtl->frame_space_list;
crtl->frame_space_list = space;
space->start = start;
space->length = end - start;
}
/* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
with machine mode MODE.
ALIGN controls the amount of alignment for the address of the slot:
0 means according to MODE,
-1 means use BIGGEST_ALIGNMENT and round size to multiple of that,
-2 means use BITS_PER_UNIT,
positive specifies alignment boundary in bits.
KIND has ASLK_REDUCE_ALIGN bit set if it is OK to reduce
alignment and ASLK_RECORD_PAD bit set if we should remember
extra space we allocated for alignment purposes. When we are
called from assign_stack_temp_for_type, it is not set so we don't
track the same stack slot in two independent lists.
We do not round to stack_boundary here. */
rtx
assign_stack_local_1 (machine_mode mode, poly_int64 size,
int align, int kind)
{
rtx x, addr;
poly_int64 bigend_correction = 0;
poly_int64 slot_offset = 0, old_frame_offset;
unsigned int alignment, alignment_in_bits;
if (align == 0)
{
alignment = get_stack_local_alignment (NULL, mode);
alignment /= BITS_PER_UNIT;
}
else if (align == -1)
{
alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
size = aligned_upper_bound (size, alignment);
}
else if (align == -2)
alignment = 1; /* BITS_PER_UNIT / BITS_PER_UNIT */
else
alignment = align / BITS_PER_UNIT;
alignment_in_bits = alignment * BITS_PER_UNIT;
/* Ignore alignment if it exceeds MAX_SUPPORTED_STACK_ALIGNMENT. */
if (alignment_in_bits > MAX_SUPPORTED_STACK_ALIGNMENT)
{
alignment_in_bits = MAX_SUPPORTED_STACK_ALIGNMENT;
alignment = MAX_SUPPORTED_STACK_ALIGNMENT / BITS_PER_UNIT;
}
if (SUPPORTS_STACK_ALIGNMENT)
{
if (crtl->stack_alignment_estimated < alignment_in_bits)
{
if (!crtl->stack_realign_processed)
crtl->stack_alignment_estimated = alignment_in_bits;
else
{
/* If stack is realigned and stack alignment value
hasn't been finalized, it is OK not to increase
stack_alignment_estimated. The bigger alignment
requirement is recorded in stack_alignment_needed
below. */
gcc_assert (!crtl->stack_realign_finalized);
if (!crtl->stack_realign_needed)
{
/* It is OK to reduce the alignment as long as the
requested size is 0 or the estimated stack
alignment >= mode alignment. */
gcc_assert ((kind & ASLK_REDUCE_ALIGN)
|| known_eq (size, 0)
|| (crtl->stack_alignment_estimated
>= GET_MODE_ALIGNMENT (mode)));
alignment_in_bits = crtl->stack_alignment_estimated;
alignment = alignment_in_bits / BITS_PER_UNIT;
}
}
}
}
if (crtl->stack_alignment_needed < alignment_in_bits)
crtl->stack_alignment_needed = alignment_in_bits;
if (crtl->max_used_stack_slot_alignment < alignment_in_bits)
crtl->max_used_stack_slot_alignment = alignment_in_bits;
if (mode != BLKmode || maybe_ne (size, 0))
{
if (kind & ASLK_RECORD_PAD)
{
class frame_space **psp;
for (psp = &crtl->frame_space_list; *psp; psp = &(*psp)->next)
{
class frame_space *space = *psp;
if (!try_fit_stack_local (space->start, space->length, size,
alignment, &slot_offset))
continue;
*psp = space->next;
if (known_gt (slot_offset, space->start))
add_frame_space (space->start, slot_offset);
if (known_lt (slot_offset + size, space->start + space->length))
add_frame_space (slot_offset + size,
space->start + space->length);
goto found_space;
}
}
}
else if (!STACK_ALIGNMENT_NEEDED)
{
slot_offset = frame_offset;
goto found_space;
}
old_frame_offset = frame_offset;
if (FRAME_GROWS_DOWNWARD)
{
frame_offset -= size;
try_fit_stack_local (frame_offset, size, size, alignment, &slot_offset);
if (kind & ASLK_RECORD_PAD)
{
if (known_gt (slot_offset, frame_offset))
add_frame_space (frame_offset, slot_offset);
if (known_lt (slot_offset + size, old_frame_offset))
add_frame_space (slot_offset + size, old_frame_offset);
}
}
else
{
frame_offset += size;
try_fit_stack_local (old_frame_offset, size, size, alignment, &slot_offset);
if (kind & ASLK_RECORD_PAD)
{
if (known_gt (slot_offset, old_frame_offset))
add_frame_space (old_frame_offset, slot_offset);
if (known_lt (slot_offset + size, frame_offset))
add_frame_space (slot_offset + size, frame_offset);
}
}
found_space:
/* On a big-endian machine, if we are allocating more space than we will use,
use the least significant bytes of those that are allocated. */
if (mode != BLKmode)
{
/* The slot size can sometimes be smaller than the mode size;
e.g. the rs6000 port allocates slots with a vector mode
that have the size of only one element. However, the slot
size must always be ordered wrt to the mode size, in the
same way as for a subreg. */
gcc_checking_assert (ordered_p (GET_MODE_SIZE (mode), size));
if (BYTES_BIG_ENDIAN && maybe_lt (GET_MODE_SIZE (mode), size))
bigend_correction = size - GET_MODE_SIZE (mode);
}
/* If we have already instantiated virtual registers, return the actual
address relative to the frame pointer. */
if (virtuals_instantiated)
addr = plus_constant (Pmode, frame_pointer_rtx,
trunc_int_for_mode
(slot_offset + bigend_correction
+ targetm.starting_frame_offset (), Pmode));
else
addr = plus_constant (Pmode, virtual_stack_vars_rtx,
trunc_int_for_mode
(slot_offset + bigend_correction,
Pmode));
x = gen_rtx_MEM (mode, addr);
set_mem_align (x, alignment_in_bits);
MEM_NOTRAP_P (x) = 1;
vec_safe_push (stack_slot_list, x);
if (frame_offset_overflow (frame_offset, current_function_decl))
frame_offset = 0;
return x;
}
/* Wrap up assign_stack_local_1 with last parameter as false. */
rtx
assign_stack_local (machine_mode mode, poly_int64 size, int align)
{
return assign_stack_local_1 (mode, size, align, ASLK_RECORD_PAD);
}
/* In order to evaluate some expressions, such as function calls returning
structures in memory, we need to temporarily allocate stack locations.
We record each allocated temporary in the following structure.
Associated with each temporary slot is a nesting level. When we pop up
one level, all temporaries associated with the previous level are freed.
Normally, all temporaries are freed after the execution of the statement
in which they were created. However, if we are inside a ({...}) grouping,
the result may be in a temporary and hence must be preserved. If the
result could be in a temporary, we preserve it if we can determine which
one it is in. If we cannot determine which temporary may contain the
result, all temporaries are preserved. A temporary is preserved by
pretending it was allocated at the previous nesting level. */
class GTY(()) temp_slot {
public:
/* Points to next temporary slot. */
class temp_slot *next;
/* Points to previous temporary slot. */
class temp_slot *prev;
/* The rtx to used to reference the slot. */
rtx slot;
/* The size, in units, of the slot. */
poly_int64 size;
/* The type of the object in the slot, or zero if it doesn't correspond
to a type. We use this to determine whether a slot can be reused.
It can be reused if objects of the type of the new slot will always
conflict with objects of the type of the old slot. */
tree type;
/* The alignment (in bits) of the slot. */
unsigned int align;
/* Nonzero if this temporary is currently in use. */
char in_use;
/* Nesting level at which this slot is being used. */
int level;
/* The offset of the slot from the frame_pointer, including extra space
for alignment. This info is for combine_temp_slots. */
poly_int64 base_offset;
/* The size of the slot, including extra space for alignment. This
info is for combine_temp_slots. */
poly_int64 full_size;
};
/* Entry for the below hash table. */
struct GTY((for_user)) temp_slot_address_entry {
hashval_t hash;
rtx address;
class temp_slot *temp_slot;
};
struct temp_address_hasher : ggc_ptr_hash<temp_slot_address_entry>
{
static hashval_t hash (temp_slot_address_entry *);
static bool equal (temp_slot_address_entry *, temp_slot_address_entry *);
};
/* A table of addresses that represent a stack slot. The table is a mapping
from address RTXen to a temp slot. */
static GTY(()) hash_table<temp_address_hasher> *temp_slot_address_table;
static size_t n_temp_slots_in_use;
/* Removes temporary slot TEMP from LIST. */
static void
cut_slot_from_list (class temp_slot *temp, class temp_slot **list)
{
if (temp->next)
temp->next->prev = temp->prev;
if (temp->prev)
temp->prev->next = temp->next;
else
*list = temp->next;
temp->prev = temp->next = NULL;
}
/* Inserts temporary slot TEMP to LIST. */
static void
insert_slot_to_list (class temp_slot *temp, class temp_slot **list)
{
temp->next = *list;
if (*list)
(*list)->prev = temp;
temp->prev = NULL;
*list = temp;
}
/* Returns the list of used temp slots at LEVEL. */
static class temp_slot **
temp_slots_at_level (int level)
{
if (level >= (int) vec_safe_length (used_temp_slots))
vec_safe_grow_cleared (used_temp_slots, level + 1, true);
return &(*used_temp_slots)[level];
}
/* Returns the maximal temporary slot level. */
static int
max_slot_level (void)
{
if (!used_temp_slots)
return -1;
return used_temp_slots->length () - 1;
}
/* Moves temporary slot TEMP to LEVEL. */
static void
move_slot_to_level (class temp_slot *temp, int level)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, temp_slots_at_level (level));
temp->level = level;
}
/* Make temporary slot TEMP available. */
static void
make_slot_available (class temp_slot *temp)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, &avail_temp_slots);
temp->in_use = 0;
temp->level = -1;
n_temp_slots_in_use--;
}
/* Compute the hash value for an address -> temp slot mapping.
The value is cached on the mapping entry. */
static hashval_t
temp_slot_address_compute_hash (struct temp_slot_address_entry *t)
{
int do_not_record = 0;
return hash_rtx (t->address, GET_MODE (t->address),
&do_not_record, NULL, false);
}
/* Return the hash value for an address -> temp slot mapping. */
hashval_t
temp_address_hasher::hash (temp_slot_address_entry *t)
{
return t->hash;
}
/* Compare two address -> temp slot mapping entries. */
bool
temp_address_hasher::equal (temp_slot_address_entry *t1,
temp_slot_address_entry *t2)
{
return exp_equiv_p (t1->address, t2->address, 0, true);
}
/* Add ADDRESS as an alias of TEMP_SLOT to the addess -> temp slot mapping. */
static void
insert_temp_slot_address (rtx address, class temp_slot *temp_slot)
{
struct temp_slot_address_entry *t = ggc_alloc<temp_slot_address_entry> ();
t->address = copy_rtx (address);
t->temp_slot = temp_slot;
t->hash = temp_slot_address_compute_hash (t);
*temp_slot_address_table->find_slot_with_hash (t, t->hash, INSERT) = t;
}
/* Remove an address -> temp slot mapping entry if the temp slot is
not in use anymore. Callback for remove_unused_temp_slot_addresses. */
int
remove_unused_temp_slot_addresses_1 (temp_slot_address_entry **slot, void *)
{
const struct temp_slot_address_entry *t = *slot;
if (! t->temp_slot->in_use)
temp_slot_address_table->clear_slot (slot);
return 1;
}
/* Remove all mappings of addresses to unused temp slots. */
static void
remove_unused_temp_slot_addresses (void)
{
/* Use quicker clearing if there aren't any active temp slots. */
if (n_temp_slots_in_use)
temp_slot_address_table->traverse
<void *, remove_unused_temp_slot_addresses_1> (NULL);
else
temp_slot_address_table->empty ();
}
/* Find the temp slot corresponding to the object at address X. */
static class temp_slot *
find_temp_slot_from_address (rtx x)
{
class temp_slot *p;
struct temp_slot_address_entry tmp, *t;
/* First try the easy way:
See if X exists in the address -> temp slot mapping. */
tmp.address = x;
tmp.temp_slot = NULL;
tmp.hash = temp_slot_address_compute_hash (&tmp);
t = temp_slot_address_table->find_with_hash (&tmp, tmp.hash);
if (t)
return t->temp_slot;
/* If we have a sum involving a register, see if it points to a temp
slot. */
if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
&& (p = find_temp_slot_from_address (XEXP (x, 0))) != 0)
return p;
else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 1))
&& (p = find_temp_slot_from_address (XEXP (x, 1))) != 0)
return p;
/* Last resort: Address is a virtual stack var address. */
poly_int64 offset;
if (strip_offset (x, &offset) == virtual_stack_vars_rtx)
{
int i;
for (i = max_slot_level (); i >= 0; i--)
for (p = *temp_slots_at_level (i); p; p = p->next)
if (known_in_range_p (offset, p->base_offset, p->full_size))
return p;
}
return NULL;
}
/* Allocate a temporary stack slot and record it for possible later
reuse.
MODE is the machine mode to be given to the returned rtx.
SIZE is the size in units of the space required. We do no rounding here
since assign_stack_local will do any required rounding.
TYPE is the type that will be used for the stack slot. */
rtx
assign_stack_temp_for_type (machine_mode mode, poly_int64 size, tree type)
{
unsigned int align;
class temp_slot *p, *best_p = 0, *selected = NULL, **pp;
rtx slot;
gcc_assert (known_size_p (size));
align = get_stack_local_alignment (type, mode);
/* Try to find an available, already-allocated temporary of the proper
mode which meets the size and alignment requirements. Choose the
smallest one with the closest alignment.
If assign_stack_temp is called outside of the tree->rtl expansion,
we cannot reuse the stack slots (that may still refer to
VIRTUAL_STACK_VARS_REGNUM). */
if (!virtuals_instantiated)
{
for (p = avail_temp_slots; p; p = p->next)
{
if (p->align >= align
&& known_ge (p->size, size)
&& GET_MODE (p->slot) == mode
&& objects_must_conflict_p (p->type, type)
&& (best_p == 0
|| (known_eq (best_p->size, p->size)
? best_p->align > p->align
: known_ge (best_p->size, p->size))))
{
if (p->align == align && known_eq (p->size, size))
{
selected = p;
cut_slot_from_list (selected, &avail_temp_slots);
best_p = 0;
break;
}
best_p = p;
}
}
}
/* Make our best, if any, the one to use. */
if (best_p)
{
selected = best_p;
cut_slot_from_list (selected, &avail_temp_slots);
/* If there are enough aligned bytes left over, make them into a new
temp_slot so that the extra bytes don't get wasted. Do this only
for BLKmode slots, so that we can be sure of the alignment. */
if (GET_MODE (best_p->slot) == BLKmode)
{
int alignment = best_p->align / BITS_PER_UNIT;
poly_int64 rounded_size = aligned_upper_bound (size, alignment);
if (known_ge (best_p->size - rounded_size, alignment))
{
p = ggc_alloc<temp_slot> ();
p->in_use = 0;
p->size = best_p->size - rounded_size;
p->base_offset = best_p->base_offset + rounded_size;
p->full_size = best_p->full_size - rounded_size;
p->slot = adjust_address_nv (best_p->slot, BLKmode, rounded_size);
p->align = best_p->align;
p->type = best_p->type;
insert_slot_to_list (p, &avail_temp_slots);
vec_safe_push (stack_slot_list, p->slot);
best_p->size = rounded_size;
best_p->full_size = rounded_size;
}
}
}
/* If we still didn't find one, make a new temporary. */
if (selected == 0)
{
poly_int64 frame_offset_old = frame_offset;
p = ggc_alloc<temp_slot> ();
/* We are passing an explicit alignment request to assign_stack_local.
One side effect of that is assign_stack_local will not round SIZE
to ensure the frame offset remains suitably aligned.
So for requests which depended on the rounding of SIZE, we go ahead
and round it now. We also make sure ALIGNMENT is at least
BIGGEST_ALIGNMENT. */
gcc_assert (mode != BLKmode || align == BIGGEST_ALIGNMENT);
p->slot = assign_stack_local_1 (mode,
(mode == BLKmode
? aligned_upper_bound (size,
(int) align
/ BITS_PER_UNIT)
: size),
align, 0);
p->align = align;
/* The following slot size computation is necessary because we don't
know the actual size of the temporary slot until assign_stack_local
has performed all the frame alignment and size rounding for the
requested temporary. Note that extra space added for alignment
can be either above or below this stack slot depending on which
way the frame grows. We include the extra space if and only if it
is above this slot. */
if (FRAME_GROWS_DOWNWARD)
p->size = frame_offset_old - frame_offset;
else
p->size = size;
/* Now define the fields used by combine_temp_slots. */
if (FRAME_GROWS_DOWNWARD)
{
p->base_offset = frame_offset;
p->full_size = frame_offset_old - frame_offset;
}
else
{
p->base_offset = frame_offset_old;
p->full_size = frame_offset - frame_offset_old;
}
selected = p;
}
p = selected;
p->in_use = 1;
p->type = type;
p->level = temp_slot_level;
n_temp_slots_in_use++;
pp = temp_slots_at_level (p->level);
insert_slot_to_list (p, pp);
insert_temp_slot_address (XEXP (p->slot, 0), p);
/* Create a new MEM rtx to avoid clobbering MEM flags of old slots. */
slot = gen_rtx_MEM (mode, XEXP (p->slot, 0));
vec_safe_push (stack_slot_list, slot);
/* If we know the alias set for the memory that will be used, use
it. If there's no TYPE, then we don't know anything about the
alias set for the memory. */
set_mem_alias_set (slot, type ? get_alias_set (type) : 0);
set_mem_align (slot, align);
/* If a type is specified, set the relevant flags. */
if (type != 0)
MEM_VOLATILE_P (slot) = TYPE_VOLATILE (type);
MEM_NOTRAP_P (slot) = 1;
return slot;
}
/* Allocate a temporary stack slot and record it for possible later
reuse. First two arguments are same as in preceding function. */
rtx
assign_stack_temp (machine_mode mode, poly_int64 size)
{
return assign_stack_temp_for_type (mode, size, NULL_TREE);
}
/* Assign a temporary.
If TYPE_OR_DECL is a decl, then we are doing it on behalf of the decl
and so that should be used in error messages. In either case, we
allocate of the given type.
MEMORY_REQUIRED is 1 if the result must be addressable stack memory;
it is 0 if a register is OK.
DONT_PROMOTE is 1 if we should not promote values in register
to wider modes. */
rtx
assign_temp (tree type_or_decl, int memory_required,
int dont_promote ATTRIBUTE_UNUSED)
{
tree type, decl;
machine_mode mode;
#ifdef PROMOTE_MODE
int unsignedp;
#endif
if (DECL_P (type_or_decl))
decl = type_or_decl, type = TREE_TYPE (decl);
else
decl = NULL, type = type_or_decl;
mode = TYPE_MODE (type);
#ifdef PROMOTE_MODE
unsignedp = TYPE_UNSIGNED (type);
#endif
/* Allocating temporaries of TREE_ADDRESSABLE type must be done in the front
end. See also create_tmp_var for the gimplification-time check. */
gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type));
if (mode == BLKmode || memory_required)
{
poly_int64 size;
rtx tmp;
/* Unfortunately, we don't yet know how to allocate variable-sized
temporaries. However, sometimes we can find a fixed upper limit on
the size, so try that instead. */
if (!poly_int_tree_p (TYPE_SIZE_UNIT (type), &size))
size = max_int_size_in_bytes (type);
/* Zero sized arrays are a GNU C extension. Set size to 1 to avoid
problems with allocating the stack space. */
if (known_eq (size, 0))
size = 1;
/* The size of the temporary may be too large to fit into an integer. */
/* ??? Not sure this should happen except for user silliness, so limit
this to things that aren't compiler-generated temporaries. The
rest of the time we'll die in assign_stack_temp_for_type. */
if (decl
&& !known_size_p (size)
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST)
{
error ("size of variable %q+D is too large", decl);
size = 1;
}
tmp = assign_stack_temp_for_type (mode, size, type);
return tmp;
}
#ifdef PROMOTE_MODE
if (! dont_promote)
mode = promote_mode (type, mode, &unsignedp);
#endif
return gen_reg_rtx (mode);
}
/* Combine temporary stack slots which are adjacent on the stack.
This allows for better use of already allocated stack space. This is only
done for BLKmode slots because we can be sure that we won't have alignment
problems in this case. */
static void
combine_temp_slots (void)
{
class temp_slot *p, *q, *next, *next_q;
int num_slots;
/* We can't combine slots, because the information about which slot
is in which alias set will be lost. */
if (flag_strict_aliasing)
return;
/* If there are a lot of temp slots, don't do anything unless
high levels of optimization. */
if (! flag_expensive_optimizations)
for (p = avail_temp_slots, num_slots = 0; p; p = p->next, num_slots++)
if (num_slots > 100 || (num_slots > 10 && optimize == 0))
return;
for (p = avail_temp_slots; p; p = next)
{
int delete_p = 0;
next = p->next;
if (GET_MODE (p->slot) != BLKmode)
continue;
for (q = p->next; q; q = next_q)
{
int delete_q = 0;
next_q = q->next;
if (GET_MODE (q->slot) != BLKmode)
continue;
if (known_eq (p->base_offset + p->full_size, q->base_offset))
{
/* Q comes after P; combine Q into P. */
p->size += q->size;
p->full_size += q->full_size;
delete_q = 1;
}
else if (known_eq (q->base_offset + q->full_size, p->base_offset))
{
/* P comes after Q; combine P into Q. */
q->size += p->size;
q->full_size += p->full_size;
delete_p = 1;
break;
}
if (delete_q)
cut_slot_from_list (q, &avail_temp_slots);
}
/* Either delete P or advance past it. */
if (delete_p)
cut_slot_from_list (p, &avail_temp_slots);
}
}
/* Indicate that NEW_RTX is an alternate way of referring to the temp
slot that previously was known by OLD_RTX. */
void
update_temp_slot_address (rtx old_rtx, rtx new_rtx)
{
class temp_slot *p;
if (rtx_equal_p (old_rtx, new_rtx))
return;
p = find_temp_slot_from_address (old_rtx);
/* If we didn't find one, see if both OLD_RTX is a PLUS. If so, and
NEW_RTX is a register, see if one operand of the PLUS is a
temporary location. If so, NEW_RTX points into it. Otherwise,
if both OLD_RTX and NEW_RTX are a PLUS and if there is a register
in common between them. If so, try a recursive call on those
values. */
if (p == 0)
{
if (GET_CODE (old_rtx) != PLUS)
return;
if (REG_P (new_rtx))
{
update_temp_slot_address (XEXP (old_rtx, 0), new_rtx);
update_temp_slot_address (XEXP (old_rtx, 1), new_rtx);
return;
}
else if (GET_CODE (new_rtx) != PLUS)
return;
if (rtx_equal_p (XEXP (old_rtx, 0), XEXP (new_rtx, 0)))
update_temp_slot_address (XEXP (old_rtx, 1), XEXP (new_rtx, 1));
else if (rtx_equal_p (XEXP (old_rtx, 1), XEXP (new_rtx, 0)))
update_temp_slot_address (XEXP (old_rtx, 0), XEXP (new_rtx, 1));
else if (rtx_equal_p (XEXP (old_rtx, 0), XEXP (new_rtx, 1)))
update_temp_slot_address (XEXP (old_rtx, 1), XEXP (new_rtx, 0));
else if (rtx_equal_p (XEXP (old_rtx, 1), XEXP (new_rtx, 1)))
update_temp_slot_address (XEXP (old_rtx, 0), XEXP (new_rtx, 0));
return;
}
/* Otherwise add an alias for the temp's address. */
insert_temp_slot_address (new_rtx, p);
}
/* If X could be a reference to a temporary slot, mark that slot as
belonging to the to one level higher than the current level. If X
matched one of our slots, just mark that one. Otherwise, we can't
easily predict which it is, so upgrade all of them.
This is called when an ({...}) construct occurs and a statement
returns a value in memory. */
void
preserve_temp_slots (rtx x)
{
class temp_slot *p = 0, *next;
if (x == 0)
return;
/* If X is a register that is being used as a pointer, see if we have
a temporary slot we know it points to. */
if (REG_P (x) && REG_POINTER (x))
p = find_temp_slot_from_address (x);
/* If X is not in memory or is at a constant address, it cannot be in
a temporary slot. */
if (p == 0 && (!MEM_P (x) || CONSTANT_P (XEXP (x, 0))))
return;
/* First see if we can find a match. */
if (p == 0)
p = find_temp_slot_from_address (XEXP (x, 0));
if (p != 0)
{
if (p->level == temp_slot_level)
move_slot_to_level (p, temp_slot_level - 1);
return;
}
/* Otherwise, preserve all non-kept slots at this level. */
for (p = *temp_slots_at_level (temp_slot_level); p; p = next)
{
next = p->next;
move_slot_to_level (p, temp_slot_level - 1);
}
}
/* Free all temporaries used so far. This is normally called at the
end of generating code for a statement. */
void
free_temp_slots (void)
{
class temp_slot *p, *next;
bool some_available = false;
for (p = *temp_slots_at_level (temp_slot_level); p; p = next)
{
next = p->next;
make_slot_available (p);
some_available = true;
}
if (some_available)
{
remove_unused_temp_slot_addresses ();
combine_temp_slots ();
}
}
/* Push deeper into the nesting level for stack temporaries. */
void
push_temp_slots (void)
{
temp_slot_level++;
}
/* Pop a temporary nesting level. All slots in use in the current level
are freed. */
void
pop_temp_slots (void)
{
free_temp_slots ();
temp_slot_level--;
}
/* Initialize temporary slots. */
void
init_temp_slots (void)
{
/* We have not allocated any temporaries yet. */
avail_temp_slots = 0;
vec_alloc (used_temp_slots, 0);
temp_slot_level = 0;
n_temp_slots_in_use = 0;
/* Set up the table to map addresses to temp slots. */
if (! temp_slot_address_table)
temp_slot_address_table = hash_table<temp_address_hasher>::create_ggc (32);
else
temp_slot_address_table->empty ();
}
/* Functions and data structures to keep track of the values hard regs
had at the start of the function. */
/* Private type used by get_hard_reg_initial_reg, get_hard_reg_initial_val,
and has_hard_reg_initial_val.. */
struct GTY(()) initial_value_pair {
rtx hard_reg;
rtx pseudo;
};
/* ??? This could be a VEC but there is currently no way to define an
opaque VEC type. This could be worked around by defining struct
initial_value_pair in function.h. */
struct GTY(()) initial_value_struct {
int num_entries;
int max_entries;
initial_value_pair * GTY ((length ("%h.num_entries"))) entries;
};
/* If a pseudo represents an initial hard reg (or expression), return
it, else return NULL_RTX. */
rtx
get_hard_reg_initial_reg (rtx reg)
{
struct initial_value_struct *ivs = crtl->hard_reg_initial_vals;
int i;
if (ivs == 0)
return NULL_RTX;
for (i = 0; i < ivs->num_entries; i++)
if (rtx_equal_p (ivs->entries[i].pseudo, reg))
return ivs->entries[i].hard_reg;
return NULL_RTX;
}
/* Make sure that there's a pseudo register of mode MODE that stores the
initial value of hard register REGNO. Return an rtx for such a pseudo. */
rtx
get_hard_reg_initial_val (machine_mode mode, unsigned int regno)
{
struct initial_value_struct *ivs;
rtx rv;
rv = has_hard_reg_initial_val (mode, regno);
if (rv)
return rv;
ivs = crtl->hard_reg_initial_vals;
if (ivs == 0)
{
ivs = ggc_alloc<initial_value_struct> ();
ivs->num_entries = 0;
ivs->max_entries = 5;
ivs->entries = ggc_vec_alloc<initial_value_pair> (5);
crtl->hard_reg_initial_vals = ivs;
}
if (ivs->num_entries >= ivs->max_entries)
{
ivs->max_entries += 5;
ivs->entries = GGC_RESIZEVEC (initial_value_pair, ivs->entries,
ivs->max_entries);
}
ivs->entries[ivs->num_entries].hard_reg = gen_rtx_REG (mode, regno);
ivs->entries[ivs->num_entries].pseudo = gen_reg_rtx (mode);
return ivs->entries[ivs->num_entries++].pseudo;
}
/* See if get_hard_reg_initial_val has been used to create a pseudo
for the initial value of hard register REGNO in mode MODE. Return
the associated pseudo if so, otherwise return NULL. */
rtx
has_hard_reg_initial_val (machine_mode mode, unsigned int regno)
{
struct initial_value_struct *ivs;
int i;
ivs = crtl->hard_reg_initial_vals;
if (ivs != 0)
for (i = 0; i < ivs->num_entries; i++)
if (GET_MODE (ivs->entries[i].hard_reg) == mode
&& REGNO (ivs->entries[i].hard_reg) == regno)
return ivs->entries[i].pseudo;
return NULL_RTX;
}
unsigned int
emit_initial_value_sets (void)
{
struct initial_value_struct *ivs = crtl->hard_reg_initial_vals;
int i;
rtx_insn *seq;
if (ivs == 0)
return 0;
start_sequence ();
for (i = 0; i < ivs->num_entries; i++)
emit_move_insn (ivs->entries[i].pseudo, ivs->entries[i].hard_reg);
seq = get_insns ();
end_sequence ();
emit_insn_at_entry (seq);
return 0;
}
/* Return the hardreg-pseudoreg initial values pair entry I and
TRUE if I is a valid entry, or FALSE if I is not a valid entry. */
bool
initial_value_entry (int i, rtx *hreg, rtx *preg)
{
struct initial_value_struct *ivs = crtl->hard_reg_initial_vals;
if (!ivs || i >= ivs->num_entries)
return false;
*hreg = ivs->entries[i].hard_reg;
*preg = ivs->entries[i].pseudo;
return true;
}
/* These routines are responsible for converting virtual register references
to the actual hard register references once RTL generation is complete.
The following four variables are used for communication between the
routines. They contain the offsets of the virtual registers from their
respective hard registers. */
static poly_int64 in_arg_offset;
static poly_int64 var_offset;
static poly_int64 dynamic_offset;
static poly_int64 out_arg_offset;
static poly_int64 cfa_offset;
/* In most machines, the stack pointer register is equivalent to the bottom
of the stack. */
#ifndef STACK_POINTER_OFFSET
#define STACK_POINTER_OFFSET 0
#endif
#if defined (REG_PARM_STACK_SPACE) && !defined (INCOMING_REG_PARM_STACK_SPACE)
#define INCOMING_REG_PARM_STACK_SPACE REG_PARM_STACK_SPACE
#endif
/* If not defined, pick an appropriate default for the offset of dynamically
allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS,
INCOMING_REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */
#ifndef STACK_DYNAMIC_OFFSET
/* The bottom of the stack points to the actual arguments. If
REG_PARM_STACK_SPACE is defined, this includes the space for the register
parameters. However, if OUTGOING_REG_PARM_STACK space is not defined,
stack space for register parameters is not pushed by the caller, but
rather part of the fixed stack areas and hence not included in
`crtl->outgoing_args_size'. Nevertheless, we must allow
for it when allocating stack dynamic objects. */
#ifdef INCOMING_REG_PARM_STACK_SPACE
#define STACK_DYNAMIC_OFFSET(FNDECL) \
((ACCUMULATE_OUTGOING_ARGS \
? (crtl->outgoing_args_size \
+ (OUTGOING_REG_PARM_STACK_SPACE ((!(FNDECL) ? NULL_TREE : TREE_TYPE (FNDECL))) ? 0 \
: INCOMING_REG_PARM_STACK_SPACE (FNDECL))) \
: 0) + (STACK_POINTER_OFFSET))
#else
#define STACK_DYNAMIC_OFFSET(FNDECL) \
((ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : poly_int64 (0)) \
+ (STACK_POINTER_OFFSET))
#endif
#endif
/* Given a piece of RTX and a pointer to a HOST_WIDE_INT, if the RTX
is a virtual register, return the equivalent hard register and set the
offset indirectly through the pointer. Otherwise, return 0. */
static rtx
instantiate_new_reg (rtx x, poly_int64_pod *poffset)
{
rtx new_rtx;
poly_int64 offset;
if (x == virtual_incoming_args_rtx)
{
if (stack_realign_drap)
{
/* Replace virtual_incoming_args_rtx with internal arg
pointer if DRAP is used to realign stack. */
new_rtx = crtl->args.internal_arg_pointer;
offset = 0;
}
else
new_rtx = arg_pointer_rtx, offset = in_arg_offset;
}
else if (x == virtual_stack_vars_rtx)
new_rtx = frame_pointer_rtx, offset = var_offset;
else if (x == virtual_stack_dynamic_rtx)
new_rtx = stack_pointer_rtx, offset = dynamic_offset;
else if (x == virtual_outgoing_args_rtx)
new_rtx = stack_pointer_rtx, offset = out_arg_offset;
else if (x == virtual_cfa_rtx)
{
#ifdef FRAME_POINTER_CFA_OFFSET
new_rtx = frame_pointer_rtx;
#else
new_rtx = arg_pointer_rtx;
#endif
offset = cfa_offset;
}
else if (x == virtual_preferred_stack_boundary_rtx)
{
new_rtx = GEN_INT (crtl->preferred_stack_boundary / BITS_PER_UNIT);
offset = 0;
}
else
return NULL_RTX;
*poffset = offset;
return new_rtx;
}
/* A subroutine of instantiate_virtual_regs. Instantiate any virtual
registers present inside of *LOC. The expression is simplified,
as much as possible, but is not to be considered "valid" in any sense
implied by the target. Return true if any change is made. */
static bool
instantiate_virtual_regs_in_rtx (rtx *loc)
{
if (!*loc)
return false;
bool changed = false;
subrtx_ptr_iterator::array_type array;
FOR_EACH_SUBRTX_PTR (iter, array, loc, NONCONST)
{
rtx *loc = *iter;
if (rtx x = *loc)
{
rtx new_rtx;
poly_int64 offset;
switch (GET_CODE (x))
{
case REG:
new_rtx = instantiate_new_reg (x, &offset);
if (new_rtx)
{
*loc = plus_constant (GET_MODE (x), new_rtx, offset);
changed = true;
}
iter.skip_subrtxes ();
break;
case PLUS:
new_rtx = instantiate_new_reg (XEXP (x, 0), &offset);
if (new_rtx)
{
XEXP (x, 0) = new_rtx;
*loc = plus_constant (GET_MODE (x), x, offset, true);
changed = true;
iter.skip_subrtxes ();
break;
}
/* FIXME -- from old code */
/* If we have (plus (subreg (virtual-reg)) (const_int)), we know
we can commute the PLUS and SUBREG because pointers into the
frame are well-behaved. */
break;
default:
break;
}
}
}
return changed;
}
/* A subroutine of instantiate_virtual_regs_in_insn. Return true if X
matches the predicate for insn CODE operand OPERAND. */
static int
safe_insn_predicate (int code, int operand, rtx x)
{
return code < 0 || insn_operand_matches ((enum insn_code) code, operand, x);
}
/* A subroutine of instantiate_virtual_regs. Instantiate any virtual
registers present inside of insn. The result will be a valid insn. */
static void
instantiate_virtual_regs_in_insn (rtx_insn *insn)
{
poly_int64 offset;
int insn_code, i;
bool any_change = false;
rtx set, new_rtx, x;
rtx_insn *seq;
/* There are some special cases to be handled first. */
set = single_set (insn);
if (set)
{
/* We're allowed to assign to a virtual register. This is interpreted
to mean that the underlying register gets assigned the inverse
transformation. This is used, for example, in the handling of
non-local gotos. */
new_rtx = instantiate_new_reg (SET_DEST (set), &offset);
if (new_rtx)
{
start_sequence ();
instantiate_virtual_regs_in_rtx (&SET_SRC (set));
x = simplify_gen_binary (PLUS, GET_MODE (new_rtx), SET_SRC (set),
gen_int_mode (-offset, GET_MODE (new_rtx)));
x = force_operand (x, new_rtx);
if (x != new_rtx)
emit_move_insn (new_rtx, x);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
delete_insn (insn);
return;
}
/* Handle a straight copy from a virtual register by generating a
new add insn. The difference between this and falling through
to the generic case is avoiding a new pseudo and eliminating a
move insn in the initial rtl stream. */
new_rtx = instantiate_new_reg (SET_SRC (set), &offset);
if (new_rtx
&& maybe_ne (offset, 0)
&& REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) > LAST_VIRTUAL_REGISTER)
{
start_sequence ();
x = expand_simple_binop (GET_MODE (SET_DEST (set)), PLUS, new_rtx,
gen_int_mode (offset,
GET_MODE (SET_DEST (set))),
SET_DEST (set), 1, OPTAB_LIB_WIDEN);
if (x != SET_DEST (set))
emit_move_insn (SET_DEST (set), x);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
delete_insn (insn);
return;
}
extract_insn (insn);
insn_code = INSN_CODE (insn);
/* Handle a plus involving a virtual register by determining if the
operands remain valid if they're modified in place. */
poly_int64 delta;
if (GET_CODE (SET_SRC (set)) == PLUS
&& recog_data.n_operands >= 3
&& recog_data.operand_loc[1] == &XEXP (SET_SRC (set), 0)
&& recog_data.operand_loc[2] == &XEXP (SET_SRC (set), 1)
&& poly_int_rtx_p (recog_data.operand[2], &delta)
&& (new_rtx = instantiate_new_reg (recog_data.operand[1], &offset)))
{
offset += delta;
/* If the sum is zero, then replace with a plain move. */
if (known_eq (offset, 0)
&& REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) > LAST_VIRTUAL_REGISTER)
{
start_sequence ();
emit_move_insn (SET_DEST (set), new_rtx);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
delete_insn (insn);
return;
}
x = gen_int_mode (offset, recog_data.operand_mode[2]);
/* Using validate_change and apply_change_group here leaves
recog_data in an invalid state. Since we know exactly what
we want to check, do those two by hand. */
if (safe_insn_predicate (insn_code, 1, new_rtx)
&& safe_insn_predicate (insn_code, 2, x))
{
*recog_data.operand_loc[1] = recog_data.operand[1] = new_rtx;
*recog_data.operand_loc[2] = recog_data.operand[2] = x;
any_change = true;
/* Fall through into the regular operand fixup loop in
order to take care of operands other than 1 and 2. */
}
}
}
else
{
extract_insn (insn);
insn_code = INSN_CODE (insn);
}
/* In the general case, we expect virtual registers to appear only in
operands, and then only as either bare registers or inside memories. */
for (i = 0; i < recog_data.n_operands; ++i)
{
x = recog_data.operand[i];
switch (GET_CODE (x))
{
case MEM:
{
rtx addr = XEXP (x, 0);
if (!instantiate_virtual_regs_in_rtx (&addr))
continue;
start_sequence ();
x = replace_equiv_address (x, addr, true);
/* It may happen that the address with the virtual reg
was valid (e.g. based on the virtual stack reg, which might
be acceptable to the predicates with all offsets), whereas
the address now isn't anymore, for instance when the address
is still offsetted, but the base reg isn't virtual-stack-reg
anymore. Below we would do a force_reg on the whole operand,
but this insn might actually only accept memory. Hence,
before doing that last resort, try to reload the address into
a register, so this operand stays a MEM. */
if (!safe_insn_predicate (insn_code, i, x))
{
addr = force_reg (GET_MODE (addr), addr);
x = replace_equiv_address (x, addr, true);
}
seq = get_insns ();
end_sequence ();
if (seq)
emit_insn_before (seq, insn);
}
break;
case REG:
new_rtx = instantiate_new_reg (x, &offset);
if (new_rtx == NULL)
continue;
if (known_eq (offset, 0))
x = new_rtx;
else
{
start_sequence ();
/* Careful, special mode predicates may have stuff in
insn_data[insn_code].operand[i].mode that isn't useful
to us for computing a new value. */
/* ??? Recognize address_operand and/or "p" constraints
to see if (plus new offset) is a valid before we put
this through expand_simple_binop. */
x = expand_simple_binop (GET_MODE (x), PLUS, new_rtx,
gen_int_mode (offset, GET_MODE (x)),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
}
break;
case SUBREG:
new_rtx = instantiate_new_reg (SUBREG_REG (x), &offset);
if (new_rtx == NULL)
continue;
if (maybe_ne (offset, 0))
{
start_sequence ();
new_rtx = expand_simple_binop
(GET_MODE (new_rtx), PLUS, new_rtx,
gen_int_mode (offset, GET_MODE (new_rtx)),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
}
x = simplify_gen_subreg (recog_data.operand_mode[i], new_rtx,
GET_MODE (new_rtx), SUBREG_BYTE (x));
gcc_assert (x);
break;
default:
continue;
}
/* At this point, X contains the new value for the operand.
Validate the new value vs the insn predicate. Note that
asm insns will have insn_code -1 here. */
if (!safe_insn_predicate (insn_code, i, x))
{
start_sequence ();
if (REG_P (x))
{
gcc_assert (REGNO (x) <= LAST_VIRTUAL_REGISTER);
x = copy_to_reg (x);
}
else
x = force_reg (insn_data[insn_code].operand[i].mode, x);
seq = get_insns ();
end_sequence ();
if (seq)
emit_insn_before (seq, insn);
}
*recog_data.operand_loc[i] = recog_data.operand[i] = x;
any_change = true;
}
if (any_change)
{
/* Propagate operand changes into the duplicates. */
for (i = 0; i < recog_data.n_dups; ++i)
*recog_data.dup_loc[i]
= copy_rtx (recog_data.operand[(unsigned)recog_data.dup_num[i]]);
/* Force re-recognition of the instruction for validation. */
INSN_CODE (insn) = -1;
}
if (asm_noperands (PATTERN (insn)) >= 0)
{
if (!check_asm_operands (PATTERN (insn)))
{
error_for_asm (insn, "impossible constraint in %<asm%>");
/* For asm goto, instead of fixing up all the edges
just clear the template and clear input and output operands
and strip away clobbers. */
if (JUMP_P (insn))
{
rtx asm_op = extract_asm_operands (PATTERN (insn));
PATTERN (insn) = asm_op;
PUT_MODE (asm_op, VOIDmode);
ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup ("");
ASM_OPERANDS_OUTPUT_CONSTRAINT (asm_op) = "";
ASM_OPERANDS_OUTPUT_IDX (asm_op) = 0;
ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0);
ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0);
}
else
delete_insn (insn);
}
}
else
{
if (recog_memoized (insn) < 0)
fatal_insn_not_found (insn);
}
}
/* Subroutine of instantiate_decls. Given RTL representing a decl,
do any instantiation required. */
void
instantiate_decl_rtl (rtx x)
{
rtx addr;
if (x == 0)
return;
/* If this is a CONCAT, recurse for the pieces. */
if (GET_CODE (x) == CONCAT)
{
instantiate_decl_rtl (XEXP (x, 0));
instantiate_decl_rtl (XEXP (x, 1));
return;
}
/* If this is not a MEM, no need to do anything. Similarly if the
address is a constant or a register that is not a virtual register. */
if (!MEM_P (x))
return;
addr = XEXP (x, 0);
if (CONSTANT_P (addr)
|| (REG_P (addr)
&& (REGNO (addr) < FIRST_VIRTUAL_REGISTER
|| REGNO (addr) > LAST_VIRTUAL_REGISTER)))
return;
instantiate_virtual_regs_in_rtx (&XEXP (x, 0));
}
/* Helper for instantiate_decls called via walk_tree: Process all decls
in the given DECL_VALUE_EXPR. */
static tree
instantiate_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
if (! EXPR_P (t))
{
*walk_subtrees = 0;
if (DECL_P (t))
{
if (DECL_RTL_SET_P (t))
instantiate_decl_rtl (DECL_RTL (t));
if (TREE_CODE (t) == PARM_DECL && DECL_NAMELESS (t)
&& DECL_INCOMING_RTL (t))
instantiate_decl_rtl (DECL_INCOMING_RTL (t));
if ((VAR_P (t) || TREE_CODE (t) == RESULT_DECL)
&& DECL_HAS_VALUE_EXPR_P (t))
{
tree v = DECL_VALUE_EXPR (t);
walk_tree (&v, instantiate_expr, NULL, NULL);
}
}
}
return NULL;
}
/* Subroutine of instantiate_decls: Process all decls in the given
BLOCK node and all its subblocks. */
static void
instantiate_decls_1 (tree let)
{
tree t;
for (t = BLOCK_VARS (let); t; t = DECL_CHAIN (t))
{
if (DECL_RTL_SET_P (t))
instantiate_decl_rtl (DECL_RTL (t));
if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
{
tree v = DECL_VALUE_EXPR (t);
walk_tree (&v, instantiate_expr, NULL, NULL);
}
}
/* Process all subblocks. */
for (t = BLOCK_SUBBLOCKS (let); t; t = BLOCK_CHAIN (t))
instantiate_decls_1 (t);
}
/* Scan all decls in FNDECL (both variables and parameters) and instantiate
all virtual registers in their DECL_RTL's. */
static void
instantiate_decls (tree fndecl)
{
tree decl;
unsigned ix;
/* Process all parameters of the function. */
for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl))
{
instantiate_decl_rtl (DECL_RTL (decl));
instantiate_decl_rtl (DECL_INCOMING_RTL (decl));
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree v = DECL_VALUE_EXPR (decl);
walk_tree (&v, instantiate_expr, NULL, NULL);
}
}
if ((decl = DECL_RESULT (fndecl))
&& TREE_CODE (decl) == RESULT_DECL)
{
if (DECL_RTL_SET_P (decl))
instantiate_decl_rtl (DECL_RTL (decl));
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree v = DECL_VALUE_EXPR (decl);
walk_tree (&v, instantiate_expr, NULL, NULL);
}
}
/* Process the saved static chain if it exists. */
decl = DECL_STRUCT_FUNCTION (fndecl)->static_chain_decl;
if (decl && DECL_HAS_VALUE_EXPR_P (decl))
instantiate_decl_rtl (DECL_RTL (DECL_VALUE_EXPR (decl)));
/* Now process all variables defined in the function or its subblocks. */
if (DECL_INITIAL (fndecl))
instantiate_decls_1 (DECL_INITIAL (fndecl));
FOR_EACH_LOCAL_DECL (cfun, ix, decl)
if (DECL_RTL_SET_P (decl))
instantiate_decl_rtl (DECL_RTL (decl));
vec_free (cfun->local_decls);
}
/* Pass through the INSNS of function FNDECL and convert virtual register
references to hard register references. */
static unsigned int
instantiate_virtual_regs (void)
{
rtx_insn *insn;
/* Compute the offsets to use for this function. */
in_arg_offset = FIRST_PARM_OFFSET (current_function_decl);
var_offset = targetm.starting_frame_offset ();
dynamic_offset = STACK_DYNAMIC_OFFSET (current_function_decl);
out_arg_offset = STACK_POINTER_OFFSET;
#ifdef FRAME_POINTER_CFA_OFFSET
cfa_offset = FRAME_POINTER_CFA_OFFSET (current_function_decl);
#else
cfa_offset = ARG_POINTER_CFA_OFFSET (current_function_decl);
#endif
/* Initialize recognition, indicating that volatile is OK. */
init_recog ();
/* Scan through all the insns, instantiating every virtual register still
present. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
/* These patterns in the instruction stream can never be recognized.
Fortunately, they shouldn't contain virtual registers either. */
if (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| GET_CODE (PATTERN (insn)) == ASM_INPUT
|| DEBUG_MARKER_INSN_P (insn))
continue;
else if (DEBUG_BIND_INSN_P (insn))
instantiate_virtual_regs_in_rtx (INSN_VAR_LOCATION_PTR (insn));
else
instantiate_virtual_regs_in_insn (insn);
if (insn->deleted ())
continue;
instantiate_virtual_regs_in_rtx (&REG_NOTES (insn));
/* Instantiate any virtual registers in CALL_INSN_FUNCTION_USAGE. */
if (CALL_P (insn))
instantiate_virtual_regs_in_rtx (&CALL_INSN_FUNCTION_USAGE (insn));
}
/* Instantiate the virtual registers in the DECLs for debugging purposes. */
instantiate_decls (current_function_decl);
targetm.instantiate_decls ();
/* Indicate that, from now on, assign_stack_local should use
frame_pointer_rtx. */
virtuals_instantiated = 1;
return 0;
}
namespace {
const pass_data pass_data_instantiate_virtual_regs =
{
RTL_PASS, /* type */
"vregs", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_instantiate_virtual_regs : public rtl_opt_pass
{
public:
pass_instantiate_virtual_regs (gcc::context *ctxt)
: rtl_opt_pass (pass_data_instantiate_virtual_regs, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *)
{
return instantiate_virtual_regs ();
}
}; // class pass_instantiate_virtual_regs
} // anon namespace
rtl_opt_pass *
make_pass_instantiate_virtual_regs (gcc::context *ctxt)
{
return new pass_instantiate_virtual_regs (ctxt);
}
/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
This means a type for which function calls must pass an address to the
function or get an address back from the function.
EXP may be a type node or an expression (whose type is tested). */
int
aggregate_value_p (const_tree exp, const_tree fntype)
{
const_tree type = (TYPE_P (exp)) ? exp : TREE_TYPE (exp);
int i, regno, nregs;
rtx reg;
if (fntype)
switch (TREE_CODE (fntype))
{
case CALL_EXPR:
{
tree fndecl = get_callee_fndecl (fntype);
if (fndecl)
fntype = TREE_TYPE (fndecl);
else if (CALL_EXPR_FN (fntype))
fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (fntype)));
else
/* For internal functions, assume nothing needs to be
returned in memory. */
return 0;
}
break;
case FUNCTION_DECL:
fntype = TREE_TYPE (fntype);
break;
case FUNCTION_TYPE:
case METHOD_TYPE:
break;
case IDENTIFIER_NODE:
fntype = NULL_TREE;
break;
default:
/* We don't expect other tree types here. */
gcc_unreachable ();
}
if (VOID_TYPE_P (type))
return 0;
/* If a record should be passed the same as its first (and only) member
don't pass it as an aggregate. */
if (TREE_CODE (type) == RECORD_TYPE && TYPE_TRANSPARENT_AGGR (type))
return aggregate_value_p (first_field (type), fntype);
/* If the front end has decided that this needs to be passed by
reference, do so. */
if ((TREE_CODE (exp) == PARM_DECL || TREE_CODE (exp) == RESULT_DECL)
&& DECL_BY_REFERENCE (exp))
return 1;
/* Function types that are TREE_ADDRESSABLE force return in memory. */
if (fntype && TREE_ADDRESSABLE (fntype))
return 1;
/* Types that are TREE_ADDRESSABLE must be constructed in memory,
and thus can't be returned in registers. */
if (TREE_ADDRESSABLE (type))
return 1;
if (TYPE_EMPTY_P (type))
return 0;
if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type))
return 1;
if (targetm.calls.return_in_memory (type, fntype))
return 1;
/* Make sure we have suitable call-clobbered regs to return
the value in; if not, we must return it in memory. */
reg = hard_function_value (type, 0, fntype, 0);
/* If we have something other than a REG (e.g. a PARALLEL), then assume
it is OK. */
if (!REG_P (reg))
return 0;
/* Use the default ABI if the type of the function isn't known.
The scheme for handling interoperability between different ABIs
requires us to be able to tell when we're calling a function with
a nondefault ABI. */
const predefined_function_abi &abi = (fntype
? fntype_abi (fntype)
: default_function_abi);
regno = REGNO (reg);
nregs = hard_regno_nregs (regno, TYPE_MODE (type));
for (i = 0; i < nregs; i++)
if (!fixed_regs[regno + i] && !abi.clobbers_full_reg_p (regno + i))
return 1;
return 0;
}
/* Return true if we should assign DECL a pseudo register; false if it
should live on the local stack. */
bool
use_register_for_decl (const_tree decl)
{
if (TREE_CODE (decl) == SSA_NAME)
{
/* We often try to use the SSA_NAME, instead of its underlying
decl, to get type information and guide decisions, to avoid
differences of behavior between anonymous and named
variables, but in this one case we have to go for the actual
variable if there is one. The main reason is that, at least
at -O0, we want to place user variables on the stack, but we
don't mind using pseudos for anonymous or ignored temps.
Should we take the SSA_NAME, we'd conclude all SSA_NAMEs
should go in pseudos, whereas their corresponding variables
might have to go on the stack. So, disregarding the decl
here would negatively impact debug info at -O0, enable
coalescing between SSA_NAMEs that ought to get different
stack/pseudo assignments, and get the incoming argument
processing thoroughly confused by PARM_DECLs expected to live
in stack slots but assigned to pseudos. */
if (!SSA_NAME_VAR (decl))
return TYPE_MODE (TREE_TYPE (decl)) != BLKmode
&& !(flag_float_store && FLOAT_TYPE_P (TREE_TYPE (decl)));
decl = SSA_NAME_VAR (decl);
}
/* Honor volatile. */
if (TREE_SIDE_EFFECTS (decl))
return false;
/* Honor addressability. */
if (TREE_ADDRESSABLE (decl))
return false;
/* RESULT_DECLs are a bit special in that they're assigned without
regard to use_register_for_decl, but we generally only store in
them. If we coalesce their SSA NAMEs, we'd better return a
result that matches the assignment in expand_function_start. */
if (TREE_CODE (decl) == RESULT_DECL)
{
/* If it's not an aggregate, we're going to use a REG or a
PARALLEL containing a REG. */
if (!aggregate_value_p (decl, current_function_decl))
return true;
/* If expand_function_start determines the return value, we'll
use MEM if it's not by reference. */
if (cfun->returns_pcc_struct
|| (targetm.calls.struct_value_rtx
(TREE_TYPE (current_function_decl), 1)))
return DECL_BY_REFERENCE (decl);
/* Otherwise, we're taking an extra all.function_result_decl
argument. It's set up in assign_parms_augmented_arg_list,
under the (negated) conditions above, and then it's used to
set up the RESULT_DECL rtl in assign_params, after looping
over all parameters. Now, if the RESULT_DECL is not by
reference, we'll use a MEM either way. */
if (!DECL_BY_REFERENCE (decl))
return false;
/* Otherwise, if RESULT_DECL is DECL_BY_REFERENCE, it will take
the function_result_decl's assignment. Since it's a pointer,
we can short-circuit a number of the tests below, and we must
duplicate them because we don't have the function_result_decl
to test. */
if (!targetm.calls.allocate_stack_slots_for_args ())
return true;
/* We don't set DECL_IGNORED_P for the function_result_decl. */
if (optimize)
return true;
if (cfun->tail_call_marked)
return true;
/* We don't set DECL_REGISTER for the function_result_decl. */
return false;
}
/* Only register-like things go in registers. */
if (DECL_MODE (decl) == BLKmode)
return false;
/* If -ffloat-store specified, don't put explicit float variables
into registers. */
/* ??? This should be checked after DECL_ARTIFICIAL, but tree-ssa
propagates values across these stores, and it probably shouldn't. */
if (flag_float_store && FLOAT_TYPE_P (TREE_TYPE (decl)))
return false;
if (!targetm.calls.allocate_stack_slots_for_args ())
return true;
/* If we're not interested in tracking debugging information for
this decl, then we can certainly put it in a register. */
if (DECL_IGNORED_P (decl))
return true;
if (optimize)
return true;
/* Thunks force a tail call even at -O0 so we need to avoid creating a
dangling reference in case the parameter is passed by reference. */
if (TREE_CODE (decl) == PARM_DECL && cfun->tail_call_marked)
return true;
if (!DECL_REGISTER (decl))
return false;
/* When not optimizing, disregard register keyword for types that
could have methods, otherwise the methods won't be callable from
the debugger. */
if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl)))
return false;
return true;
}
/* Structures to communicate between the subroutines of assign_parms.
The first holds data persistent across all parameters, the second
is cleared out for each parameter. */
struct assign_parm_data_all
{
/* When INIT_CUMULATIVE_ARGS gets revamped, allocating CUMULATIVE_ARGS
should become a job of the target or otherwise encapsulated. */
CUMULATIVE_ARGS args_so_far_v;
cumulative_args_t args_so_far;
struct args_size stack_args_size;
tree function_result_decl;
tree orig_fnargs;
rtx_insn *first_conversion_insn;
rtx_insn *last_conversion_insn;
HOST_WIDE_INT pretend_args_size;
HOST_WIDE_INT extra_pretend_bytes;
int reg_parm_stack_space;
};
struct assign_parm_data_one
{
tree nominal_type;
function_arg_info arg;
rtx entry_parm;
rtx stack_parm;
machine_mode nominal_mode;
machine_mode passed_mode;
struct locate_and_pad_arg_data locate;
int partial;
};
/* A subroutine of assign_parms. Initialize ALL. */
static void
assign_parms_initialize_all (struct assign_parm_data_all *all)
{
tree fntype ATTRIBUTE_UNUSED;
memset (all, 0, sizeof (*all));
fntype = TREE_TYPE (current_function_decl);
#ifdef INIT_CUMULATIVE_INCOMING_ARGS
INIT_CUMULATIVE_INCOMING_ARGS (all->args_so_far_v, fntype, NULL_RTX);
#else
INIT_CUMULATIVE_ARGS (all->args_so_far_v, fntype, NULL_RTX,
current_function_decl, -1);
#endif
all->args_so_far = pack_cumulative_args (&all->args_so_far_v);
#ifdef INCOMING_REG_PARM_STACK_SPACE
all->reg_parm_stack_space
= INCOMING_REG_PARM_STACK_SPACE (current_function_decl);
#endif
}
/* If ARGS contains entries with complex types, split the entry into two
entries of the component type. Return a new list of substitutions are
needed, else the old list. */
static void
split_complex_args (vec<tree> *args)
{
unsigned i;
tree p;
FOR_EACH_VEC_ELT (*args, i, p)
{
tree type = TREE_TYPE (p);
if (TREE_CODE (type) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg (type))
{
tree decl;
tree subtype = TREE_TYPE (type);
bool addressable = TREE_ADDRESSABLE (p);
/* Rewrite the PARM_DECL's type with its component. */
p = copy_node (p);
TREE_TYPE (p) = subtype;
DECL_ARG_TYPE (p) = TREE_TYPE (DECL_ARG_TYPE (p));
SET_DECL_MODE (p, VOIDmode);
DECL_SIZE (p) = NULL;
DECL_SIZE_UNIT (p) = NULL;
/* If this arg must go in memory, put it in a pseudo here.
We can't allow it to go in memory as per normal parms,
because the usual place might not have the imag part
adjacent to the real part. */
DECL_ARTIFICIAL (p) = addressable;
DECL_IGNORED_P (p) = addressable;
TREE_ADDRESSABLE (p) = 0;
layout_decl (p, 0);
(*args)[i] = p;
/* Build a second synthetic decl. */
decl = build_decl (EXPR_LOCATION (p),
PARM_DECL, NULL_TREE, subtype);
DECL_ARG_TYPE (decl) = DECL_ARG_TYPE (p);
DECL_ARTIFICIAL (decl) = addressable;
DECL_IGNORED_P (decl) = addressable;
layout_decl (decl, 0);
args->safe_insert (++i, decl);
}
}
}
/* A subroutine of assign_parms. Adjust the parameter list to incorporate
the hidden struct return argument, and (abi willing) complex args.
Return the new parameter list. */
static vec<tree>
assign_parms_augmented_arg_list (struct assign_parm_data_all *all)
{
tree fndecl = current_function_decl;
tree fntype = TREE_TYPE (fndecl);
vec<tree> fnargs = vNULL;
tree arg;
for (arg = DECL_ARGUMENTS (fndecl); arg; arg = DECL_CHAIN (arg))
fnargs.safe_push (arg);
all->orig_fnargs = DECL_ARGUMENTS (fndecl);
/* If struct value address is treated as the first argument, make it so. */
if (aggregate_value_p (DECL_RESULT (fndecl), fndecl)
&& ! cfun->returns_pcc_struct
&& targetm.calls.struct_value_rtx (TREE_TYPE (fndecl), 1) == 0)
{
tree type = build_pointer_type (TREE_TYPE (fntype));
tree decl;
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, get_identifier (".result_ptr"), type);
DECL_ARG_TYPE (decl) = type;
DECL_ARTIFICIAL (decl) = 1;
DECL_NAMELESS (decl) = 1;
TREE_CONSTANT (decl) = 1;
/* We don't set DECL_IGNORED_P or DECL_REGISTER here. If this
changes, the end of the RESULT_DECL handling block in
use_register_for_decl must be adjusted to match. */
DECL_CHAIN (decl) = all->orig_fnargs;
all->orig_fnargs = decl;
fnargs.safe_insert (0, decl);
all->function_result_decl = decl;
}
/* If the target wants to split complex arguments into scalars, do so. */
if (targetm.calls.split_complex_arg)
split_complex_args (&fnargs);
return fnargs;
}
/* A subroutine of assign_parms. Examine PARM and pull out type and mode
data for the parameter. Incorporate ABI specifics such as pass-by-
reference and type promotion. */
static void
assign_parm_find_data_types (struct assign_parm_data_all *all, tree parm,
struct assign_parm_data_one *data)
{
int unsignedp;
#ifndef BROKEN_VALUE_INITIALIZATION
*data = assign_parm_data_one ();
#else
/* Old versions of GCC used to miscompile the above by only initializing
the members with explicit constructors and copying garbage
to the other members. */
assign_parm_data_one zero_data = {};
*data = zero_data;
#endif
/* NAMED_ARG is a misnomer. We really mean 'non-variadic'. */
if (!cfun->stdarg)
data->arg.named = 1; /* No variadic parms. */
else if (DECL_CHAIN (parm))
data->arg.named = 1; /* Not the last non-variadic parm. */
else if (targetm.calls.strict_argument_naming (all->args_so_far))
data->arg.named = 1; /* Only variadic ones are unnamed. */
else
data->arg.named = 0; /* Treat as variadic. */
data->nominal_type = TREE_TYPE (parm);
data->arg.type = DECL_ARG_TYPE (parm);
/* Look out for errors propagating this far. Also, if the parameter's
type is void then its value doesn't matter. */
if (TREE_TYPE (parm) == error_mark_node
/* This can happen after weird syntax errors
or if an enum type is defined among the parms. */
|| TREE_CODE (parm) != PARM_DECL
|| data->arg.type == NULL
|| VOID_TYPE_P (data->nominal_type))
{
data->nominal_type = data->arg.type = void_type_node;
data->nominal_mode = data->passed_mode = data->arg.mode = VOIDmode;
return;
}
/* Find mode of arg as it is passed, and mode of arg as it should be
during execution of this function. */
data->passed_mode = data->arg.mode = TYPE_MODE (data->arg.type);
data->nominal_mode = TYPE_MODE (data->nominal_type);
/* If the parm is to be passed as a transparent union or record, use the
type of the first field for the tests below. We have already verified
that the modes are the same. */
if (RECORD_OR_UNION_TYPE_P (data->arg.type)
&& TYPE_TRANSPARENT_AGGR (data->arg.type))
data->arg.type = TREE_TYPE (first_field (data->arg.type));
/* See if this arg was passed by invisible reference. */
if (apply_pass_by_reference_rules (&all->args_so_far_v, data->arg))
{
data->nominal_type = data->arg.type;
data->passed_mode = data->nominal_mode = data->arg.mode;
}
/* Find mode as it is passed by the ABI. */
unsignedp = TYPE_UNSIGNED (data->arg.type);
data->arg.mode
= promote_function_mode (data->arg.type, data->arg.mode, &unsignedp,
TREE_TYPE (current_function_decl), 0);
}
/* A subroutine of assign_parms. Invoke setup_incoming_varargs. */
static void
assign_parms_setup_varargs (struct assign_parm_data_all *all,
struct assign_parm_data_one *data, bool no_rtl)
{
int varargs_pretend_bytes = 0;
function_arg_info last_named_arg = data->arg;
last_named_arg.named = true;
targetm.calls.setup_incoming_varargs (all->args_so_far, last_named_arg,
&varargs_pretend_bytes, no_rtl);
/* If the back-end has requested extra stack space, record how much is
needed. Do not change pretend_args_size otherwise since it may be
nonzero from an earlier partial argument. */
if (varargs_pretend_bytes > 0)
all->pretend_args_size = varargs_pretend_bytes;
}
/* A subroutine of assign_parms. Set DATA->ENTRY_PARM corresponding to
the incoming location of the current parameter. */
static void
assign_parm_find_entry_rtl (struct assign_parm_data_all *all,
struct assign_parm_data_one *data)
{
HOST_WIDE_INT pretend_bytes = 0;
rtx entry_parm;
bool in_regs;
if (data->arg.mode == VOIDmode)
{
data->entry_parm = data->stack_parm = const0_rtx;
return;
}
targetm.calls.warn_parameter_passing_abi (all->args_so_far,
data->arg.type);
entry_parm = targetm.calls.function_incoming_arg (all->args_so_far,
data->arg);
if (entry_parm == 0)
data->arg.mode = data->passed_mode;
/* Determine parm's home in the stack, in case it arrives in the stack
or we should pretend it did. Compute the stack position and rtx where
the argument arrives and its size.
There is one complexity here: If this was a parameter that would
have been passed in registers, but wasn't only because it is
__builtin_va_alist, we want locate_and_pad_parm to treat it as if
it came in a register so that REG_PARM_STACK_SPACE isn't skipped.
In this case, we call FUNCTION_ARG with NAMED set to 1 instead of 0
as it was the previous time. */
in_regs = (entry_parm != 0);
#ifdef STACK_PARMS_IN_REG_PARM_AREA
in_regs = true;
#endif
if (!in_regs && !data->arg.named)
{
if (targetm.calls.pretend_outgoing_varargs_named (all->args_so_far))
{
rtx tem;
function_arg_info named_arg = data->arg;
named_arg.named = true;
tem = targetm.calls.function_incoming_arg (all->args_so_far,
named_arg);
in_regs = tem != NULL;
}
}
/* If this parameter was passed both in registers and in the stack, use
the copy on the stack. */
if (targetm.calls.must_pass_in_stack (data->arg))
entry_parm = 0;
if (entry_parm)
{
int partial;
partial = targetm.calls.arg_partial_bytes (all->args_so_far, data->arg);
data->partial = partial;
/* The caller might already have allocated stack space for the
register parameters. */
if (partial != 0 && all->reg_parm_stack_space == 0)
{
/* Part of this argument is passed in registers and part
is passed on the stack. Ask the prologue code to extend
the stack part so that we can recreate the full value.
PRETEND_BYTES is the size of the registers we need to store.
CURRENT_FUNCTION_PRETEND_ARGS_SIZE is the amount of extra
stack space that the prologue should allocate.
Internally, gcc assumes that the argument pointer is aligned
to STACK_BOUNDARY bits. This is used both for alignment
optimizations (see init_emit) and to locate arguments that are
aligned to more than PARM_BOUNDARY bits. We must preserve this
invariant by rounding CURRENT_FUNCTION_PRETEND_ARGS_SIZE up to
a stack boundary. */
/* We assume at most one partial arg, and it must be the first
argument on the stack. */
gcc_assert (!all->extra_pretend_bytes && !all->pretend_args_size);
pretend_bytes = partial;
all->pretend_args_size = CEIL_ROUND (pretend_bytes, STACK_BYTES);
/* We want to align relative to the actual stack pointer, so
don't include this in the stack size until later. */
all->extra_pretend_bytes = all->pretend_args_size;
}
}
locate_and_pad_parm (data->arg.mode, data->arg.type, in_regs,
all->reg_parm_stack_space,
entry_parm ? data->partial : 0, current_function_decl,
&all->stack_args_size, &data->locate);
/* Update parm_stack_boundary if this parameter is passed in the
stack. */
if (!in_regs && crtl->parm_stack_boundary < data->locate.boundary)
crtl->parm_stack_boundary = data->locate.boundary;
/* Adjust offsets to include the pretend args. */
pretend_bytes = all->extra_pretend_bytes - pretend_bytes;
data->locate.slot_offset.constant += pretend_bytes;
data->locate.offset.constant += pretend_bytes;
data->entry_parm = entry_parm;
}
/* A subroutine of assign_parms. If there is actually space on the stack
for this parm, count it in stack_args_size and return true. */
static bool
assign_parm_is_stack_parm (struct assign_parm_data_all *all,
struct assign_parm_data_one *data)
{
/* Trivially true if we've no incoming register. */
if (data->entry_parm == NULL)
;
/* Also true if we're partially in registers and partially not,
since we've arranged to drop the entire argument on the stack. */
else if (data->partial != 0)
;
/* Also true if the target says that it's passed in both registers
and on the stack. */
else if (GET_CODE (data->entry_parm) == PARALLEL
&& XEXP (XVECEXP (data->entry_parm, 0, 0), 0) == NULL_RTX)
;
/* Also true if the target says that there's stack allocated for
all register parameters. */
else if (all->reg_parm_stack_space > 0)
;
/* Otherwise, no, this parameter has no ABI defined stack slot. */
else
return false;
all->stack_args_size.constant += data->locate.size.constant;
if (data->locate.size.var)
ADD_PARM_SIZE (all->stack_args_size, data->locate.size.var);
return true;
}
/* A subroutine of assign_parms. Given that this parameter is allocated
stack space by the ABI, find it. */
static void
assign_parm_find_stack_rtl (tree parm, struct assign_parm_data_one *data)
{
rtx offset_rtx, stack_parm;
unsigned int align, boundary;
/* If we're passing this arg using a reg, make its stack home the
aligned stack slot. */
if (data->entry_parm)
offset_rtx = ARGS_SIZE_RTX (data->locate.slot_offset);
else
offset_rtx = ARGS_SIZE_RTX (data->locate.offset);
stack_parm = crtl->args.internal_arg_pointer;
if (offset_rtx != const0_rtx)
stack_parm = gen_rtx_PLUS (Pmode, stack_parm, offset_rtx);
stack_parm = gen_rtx_MEM (data->arg.mode, stack_parm);
if (!data->arg.pass_by_reference)
{
set_mem_attributes (stack_parm, parm, 1);
/* set_mem_attributes could set MEM_SIZE to the passed mode's size,
while promoted mode's size is needed. */
if (data->arg.mode != BLKmode
&& data->arg.mode != DECL_MODE (parm))
{
set_mem_size (stack_parm, GET_MODE_SIZE (data->arg.mode));
if (MEM_EXPR (stack_parm) && MEM_OFFSET_KNOWN_P (stack_parm))
{
poly_int64 offset = subreg_lowpart_offset (DECL_MODE (parm),
data->arg.mode);
if (maybe_ne (offset, 0))
set_mem_offset (stack_parm, MEM_OFFSET (stack_parm) - offset);
}
}
}
boundary = data->locate.boundary;
align = BITS_PER_UNIT;
/* If we're padding upward, we know that the alignment of the slot
is TARGET_FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
intentionally forcing upward padding. Otherwise we have to come
up with a guess at the alignment based on OFFSET_RTX. */
poly_int64 offset;
if (data->locate.where_pad == PAD_NONE || data->entry_parm)
align = boundary;
else if (data->locate.where_pad == PAD_UPWARD)
{
align = boundary;
/* If the argument offset is actually more aligned than the nominal
stack slot boundary, take advantage of that excess alignment.
Don't make any assumptions if STACK_POINTER_OFFSET is in use. */
if (poly_int_rtx_p (offset_rtx, &offset)
&& known_eq (STACK_POINTER_OFFSET, 0))
{
unsigned int offset_align = known_alignment (offset) * BITS_PER_UNIT;
if (offset_align == 0 || offset_align > STACK_BOUNDARY)
offset_align = STACK_BOUNDARY;
align = MAX (align, offset_align);
}
}
else if (poly_int_rtx_p (offset_rtx, &offset))
{
align = least_bit_hwi (boundary);
unsigned int offset_align = known_alignment (offset) * BITS_PER_UNIT;
if (offset_align != 0)
align = MIN (align, offset_align);
}
set_mem_align (stack_parm, align);
if (data->entry_parm)
set_reg_attrs_for_parm (data->entry_parm, stack_parm);
data->stack_parm = stack_parm;
}
/* A subroutine of assign_parms. Adjust DATA->ENTRY_RTL such that it's
always valid and contiguous. */
static void
assign_parm_adjust_entry_rtl (struct assign_parm_data_one *data)
{
rtx entry_parm = data->entry_parm;
rtx stack_parm = data->stack_parm;
/* If this parm was passed part in regs and part in memory, pretend it
arrived entirely in memory by pushing the register-part onto the stack.
In the special case of a DImode or DFmode that is split, we could put
it together in a pseudoreg directly, but for now that's not worth
bothering with. */
if (data->partial != 0)
{
/* Handle calls that pass values in multiple non-contiguous
locations. The Irix 6 ABI has examples of this. */
if (GET_CODE (entry_parm) == PARALLEL)
emit_group_store (validize_mem (copy_rtx (stack_parm)), entry_parm,
data->arg.type, int_size_in_bytes (data->arg.type));
else
{
gcc_assert (data->partial % UNITS_PER_WORD == 0);
move_block_from_reg (REGNO (entry_parm),
validize_mem (copy_rtx (stack_parm)),
data->partial / UNITS_PER_WORD);
}
entry_parm = stack_parm;
}
/* If we didn't decide this parm came in a register, by default it came
on the stack. */
else if (entry_parm == NULL)
entry_parm = stack_parm;
/* When an argument is passed in multiple locations, we can't make use
of this information, but we can save some copying if the whole argument
is passed in a single register. */
else if (GET_CODE (entry_parm) == PARALLEL
&& data->nominal_mode != BLKmode
&& data->passed_mode != BLKmode)
{
size_t i, len = XVECLEN (entry_parm, 0);
for (i = 0; i < len; i++)
if (XEXP (XVECEXP (entry_parm, 0, i), 0) != NULL_RTX
&& REG_P (XEXP (XVECEXP (entry_parm, 0, i), 0))
&& (GET_MODE (XEXP (XVECEXP (entry_parm, 0, i), 0))
== data->passed_mode)
&& INTVAL (XEXP (XVECEXP (entry_parm, 0, i), 1)) == 0)
{
entry_parm = XEXP (XVECEXP (entry_parm, 0, i), 0);
break;
}
}
data->entry_parm = entry_parm;
}
/* A subroutine of assign_parms. Reconstitute any values which were
passed in multiple registers and would fit in a single register. */
static void
assign_parm_remove_parallels (struct assign_parm_data_one *data)
{
rtx entry_parm = data->entry_parm;
/* Convert the PARALLEL to a REG of the same mode as the parallel.
This can be done with register operations rather than on the
stack, even if we will store the reconstituted parameter on the
stack later. */
if (GET_CODE (entry_parm) == PARALLEL && GET_MODE (entry_parm) != BLKmode)
{
rtx parmreg = gen_reg_rtx (GET_MODE (entry_parm));
emit_group_store (parmreg, entry_parm, data->arg.type,
GET_MODE_SIZE (GET_MODE (entry_parm)));
entry_parm = parmreg;
}
data->entry_parm = entry_parm;
}
/* A subroutine of assign_parms. Adjust DATA->STACK_RTL such that it's
always valid and properly aligned. */
static void
assign_parm_adjust_stack_rtl (struct assign_parm_data_one *data)
{
rtx stack_parm = data->stack_parm;
/* If we can't trust the parm stack slot to be aligned enough for its
ultimate type, don't use that slot after entry. We'll make another
stack slot, if we need one. */
if (stack_parm
&& ((GET_MODE_ALIGNMENT (data->nominal_mode) > MEM_ALIGN (stack_parm)
&& ((optab_handler (movmisalign_optab, data->nominal_mode)
!= CODE_FOR_nothing)
|| targetm.slow_unaligned_access (data->nominal_mode,
MEM_ALIGN (stack_parm))))
|| (data->nominal_type
&& TYPE_ALIGN (data->nominal_type) > MEM_ALIGN (stack_parm)
&& MEM_ALIGN (stack_parm) < PREFERRED_STACK_BOUNDARY)))
stack_parm = NULL;
/* If parm was passed in memory, and we need to convert it on entry,
don't store it back in that same slot. */
else if (data->entry_parm == stack_parm
&& data->nominal_mode != BLKmode
&& data->nominal_mode != data->passed_mode)
stack_parm = NULL;
/* If stack protection is in effect for this function, don't leave any
pointers in their passed stack slots. */
else if (crtl->stack_protect_guard
&& (flag_stack_protect == SPCT_FLAG_ALL
|| data->arg.pass_by_reference
|| POINTER_TYPE_P (data->nominal_type)))
stack_parm = NULL;
data->stack_parm = stack_parm;
}
/* A subroutine of assign_parms. Return true if the current parameter
should be stored as a BLKmode in the current frame. */
static bool
assign_parm_setup_block_p (struct assign_parm_data_one *data)
{
if (data->nominal_mode == BLKmode)
return true;
if (GET_MODE (data->entry_parm) == BLKmode)
return true;
#ifdef BLOCK_REG_PADDING
/* Only assign_parm_setup_block knows how to deal with register arguments
that are padded at the least significant end. */
if (REG_P (data->entry_parm)
&& known_lt (GET_MODE_SIZE (data->arg.mode), UNITS_PER_WORD)
&& (BLOCK_REG_PADDING (data->passed_mode, data->arg.type, 1)
== (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)))
return true;
#endif
return false;
}
/* A subroutine of assign_parms. Arrange for the parameter to be
present and valid in DATA->STACK_RTL. */
static void
assign_parm_setup_block (struct assign_parm_data_all *all,
tree parm, struct assign_parm_data_one *data)
{
rtx entry_parm = data->entry_parm;
rtx stack_parm = data->stack_parm;
rtx target_reg = NULL_RTX;
bool in_conversion_seq = false;
HOST_WIDE_INT size;
HOST_WIDE_INT size_stored;
if (GET_CODE (entry_parm) == PARALLEL)
entry_parm = emit_group_move_into_temps (entry_parm);
/* If we want the parameter in a pseudo, don't use a stack slot. */
if (is_gimple_reg (parm) && use_register_for_decl (parm))
{
tree def = ssa_default_def (cfun, parm);
gcc_assert (def);
machine_mode mode = promote_ssa_mode (def, NULL);
rtx reg = gen_reg_rtx (mode);
if (GET_CODE (reg) != CONCAT)
stack_parm = reg;
else
{
target_reg = reg;
/* Avoid allocating a stack slot, if there isn't one
preallocated by the ABI. It might seem like we should
always prefer a pseudo, but converting between
floating-point and integer modes goes through the stack
on various machines, so it's better to use the reserved
stack slot than to risk wasting it and allocating more
for the conversion. */
if (stack_parm == NULL_RTX)
{
int save = generating_concat_p;
generating_concat_p = 0;
stack_parm = gen_reg_rtx (mode);
generating_concat_p = save;
}
}
data->stack_parm = NULL;
}
size = int_size_in_bytes (data->arg.type);
size_stored = CEIL_ROUND (size, UNITS_PER_WORD);
if (stack_parm == 0)
{
HOST_WIDE_INT parm_align
= (STRICT_ALIGNMENT
? MAX (DECL_ALIGN (parm), BITS_PER_WORD) : DECL_ALIGN (parm));
SET_DECL_ALIGN (parm, parm_align);
if (DECL_ALIGN (parm) > MAX_SUPPORTED_STACK_ALIGNMENT)
{
rtx allocsize = gen_int_mode (size_stored, Pmode);
get_dynamic_stack_size (&allocsize, 0, DECL_ALIGN (parm), NULL);
stack_parm = assign_stack_local (BLKmode, UINTVAL (allocsize),
MAX_SUPPORTED_STACK_ALIGNMENT);
rtx addr = align_dynamic_address (XEXP (stack_parm, 0),
DECL_ALIGN (parm));
mark_reg_pointer (addr, DECL_ALIGN (parm));
stack_parm = gen_rtx_MEM (GET_MODE (stack_parm), addr);
MEM_NOTRAP_P (stack_parm) = 1;
}
else
stack_parm = assign_stack_local (BLKmode, size_stored,
DECL_ALIGN (parm));
if (known_eq (GET_MODE_SIZE (GET_MODE (entry_parm)), size))
PUT_MODE (stack_parm, GET_MODE (entry_parm));
set_mem_attributes (stack_parm, parm, 1);
}
/* If a BLKmode arrives in registers, copy it to a stack slot. Handle
calls that pass values in multiple non-contiguous locations. */
if (REG_P (entry_parm) || GET_CODE (entry_parm) == PARALLEL)
{
rtx mem;
/* Note that we will be storing an integral number of words.
So we have to be careful to ensure that we allocate an
integral number of words. We do this above when we call
assign_stack_local if space was not allocated in the argument
list. If it was, this will not work if PARM_BOUNDARY is not
a multiple of BITS_PER_WORD. It isn't clear how to fix this
if it becomes a problem. Exception is when BLKmode arrives
with arguments not conforming to word_mode. */
if (data->stack_parm == 0)
;
else if (GET_CODE (entry_parm) == PARALLEL)
;
else
gcc_assert (!size || !(PARM_BOUNDARY % BITS_PER_WORD));
mem = validize_mem (copy_rtx (stack_parm));
/* Handle values in multiple non-contiguous locations. */
if (GET_CODE (entry_parm) == PARALLEL && !MEM_P (mem))
emit_group_store (mem, entry_parm, data->arg.type, size);
else if (GET_CODE (entry_parm) == PARALLEL)
{
push_to_sequence2 (all->first_conversion_insn,
all->last_conversion_insn);
emit_group_store (mem, entry_parm, data->arg.type, size);
all->first_conversion_insn = get_insns ();
all->last_conversion_insn = get_last_insn ();
end_sequence ();
in_conversion_seq = true;
}
else if (size == 0)
;
/* If SIZE is that of a mode no bigger than a word, just use
that mode's store operation. */
else if (size <= UNITS_PER_WORD)
{
unsigned int bits = size * BITS_PER_UNIT;
machine_mode mode = int_mode_for_size (bits, 0).else_blk ();
if (mode != BLKmode
#ifdef BLOCK_REG_PADDING
&& (size == UNITS_PER_WORD
|| (BLOCK_REG_PADDING (mode, data->arg.type, 1)
!= (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)))
#endif
)
{
rtx reg;
/* We are really truncating a word_mode value containing
SIZE bytes into a value of mode MODE. If such an
operation requires no actual instructions, we can refer
to the value directly in mode MODE, otherwise we must
start with the register in word_mode and explicitly
convert it. */
if (mode == word_mode
|| TRULY_NOOP_TRUNCATION_MODES_P (mode, word_mode))
reg = gen_rtx_REG (mode, REGNO (entry_parm));
else
{
reg = gen_rtx_REG (word_mode, REGNO (entry_parm));
reg = convert_to_mode (mode, copy_to_reg (reg), 1);
}
/* We use adjust_address to get a new MEM with the mode
changed. adjust_address is better than change_address
for this purpose because adjust_address does not lose
the MEM_EXPR associated with the MEM.
If the MEM_EXPR is lost, then optimizations like DSE
assume the MEM escapes and thus is not subject to DSE. */
emit_move_insn (adjust_address (mem, mode, 0), reg);
}
#ifdef BLOCK_REG_PADDING
/* Storing the register in memory as a full word, as
move_block_from_reg below would do, and then using the
MEM in a smaller mode, has the effect of shifting right
if BYTES_BIG_ENDIAN. If we're bypassing memory, the
shifting must be explicit. */
else if (!MEM_P (mem))
{
rtx x;
/* If the assert below fails, we should have taken the
mode != BLKmode path above, unless we have downward
padding of smaller-than-word arguments on a machine
with little-endian bytes, which would likely require
additional changes to work correctly. */
gcc_checking_assert (BYTES_BIG_ENDIAN
&& (BLOCK_REG_PADDING (mode,
data->arg.type, 1)
== PAD_UPWARD));
int by = (UNITS_PER_WORD - size) * BITS_PER_UNIT;
x = gen_rtx_REG (word_mode, REGNO (entry_parm));
x = expand_shift (RSHIFT_EXPR, word_mode, x, by,
NULL_RTX, 1);
x = force_reg (word_mode, x);
x = gen_lowpart_SUBREG (GET_MODE (mem), x);
emit_move_insn (mem, x);
}
#endif
/* Blocks smaller than a word on a BYTES_BIG_ENDIAN
machine must be aligned to the left before storing
to memory. Note that the previous test doesn't
handle all cases (e.g. SIZE == 3). */
else if (size != UNITS_PER_WORD
#ifdef BLOCK_REG_PADDING
&& (BLOCK_REG_PADDING (mode, data->arg.type, 1)
== PAD_DOWNWARD)
#else
&& BYTES_BIG_ENDIAN
#endif
)
{
rtx tem, x;
int by = (UNITS_PER_WORD - size) * BITS_PER_UNIT;
rtx reg = gen_rtx_REG (word_mode, REGNO (entry_parm));
x = expand_shift (LSHIFT_EXPR, word_mode, reg, by, NULL_RTX, 1);
tem = change_address (mem, word_mode, 0);
emit_move_insn (tem, x);
}
else
move_block_from_reg (REGNO (entry_parm), mem,
size_stored / UNITS_PER_WORD);
}
else if (!MEM_P (mem))
{
gcc_checking_assert (size > UNITS_PER_WORD);
#ifdef BLOCK_REG_PADDING
gcc_checking_assert (BLOCK_REG_PADDING (GET_MODE (mem),
data->arg.type, 0)
== PAD_UPWARD);
#endif
emit_move_insn (mem, entry_parm);
}
else
move_block_from_reg (REGNO (entry_parm), mem,
size_stored / UNITS_PER_WORD);
}
else if (data->stack_parm == 0 && !TYPE_EMPTY_P (data->arg.type))
{
push_to_sequence2 (all->first_conversion_insn, all->last_conversion_insn);
emit_block_move (stack_parm, data->entry_parm, GEN_INT (size),
BLOCK_OP_NORMAL);
all->first_conversion_insn = get_insns ();
all->last_conversion_insn = get_last_insn ();
end_sequence ();
in_conversion_seq = true;
}
if (target_reg)
{
if (!in_conversion_seq)
emit_move_insn (target_reg, stack_parm);
else
{
push_to_sequence2 (all->first_conversion_insn,
all->last_conversion_insn);
emit_move_insn (target_reg, stack_parm);
all->first_conversion_insn = get_insns ();
all->last_conversion_insn = get_last_insn ();
end_sequence ();
}
stack_parm = target_reg;
}
data->stack_parm = stack_parm;
set_parm_rtl (parm, stack_parm);
}
/* A subroutine of assign_parms. Allocate a pseudo to hold the current
parameter. Get it there. Perform all ABI specified conversions. */
static void
assign_parm_setup_reg (struct assign_parm_data_all *all, tree parm,
struct assign_parm_data_one *data)
{
rtx parmreg, validated_mem;
rtx equiv_stack_parm;
machine_mode promoted_nominal_mode;
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (parm));
bool did_conversion = false;
bool need_conversion, moved;
enum insn_code icode;
rtx rtl;
/* Store the parm in a pseudoregister during the function, but we may
need to do it in a wider mode. Using 2 here makes the result
consistent with promote_decl_mode and thus expand_expr_real_1. */
promoted_nominal_mode
= promote_function_mode (data->nominal_type, data->nominal_mode, &unsignedp,
TREE_TYPE (current_function_decl), 2);
parmreg = gen_reg_rtx (promoted_nominal_mode);
if (!DECL_ARTIFICIAL (parm))
mark_user_reg (parmreg);
/* If this was an item that we received a pointer to,
set rtl appropriately. */
if (data->arg.pass_by_reference)
{
rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (data->arg.type)), parmreg);
set_mem_attributes (rtl, parm, 1);
}
else
rtl = parmreg;
assign_parm_remove_parallels (data);
/* Copy the value into the register, thus bridging between
assign_parm_find_data_types and expand_expr_real_1. */
equiv_stack_parm = data->stack_parm;
validated_mem = validize_mem (copy_rtx (data->entry_parm));
need_conversion = (data->nominal_mode != data->passed_mode
|| promoted_nominal_mode != data->arg.mode);
moved = false;
if (need_conversion
&& GET_MODE_CLASS (data->nominal_mode) == MODE_INT
&& data->nominal_mode == data->passed_mode
&& data->nominal_mode == GET_MODE (data->entry_parm))
{
/* ENTRY_PARM has been converted to PROMOTED_MODE, its
mode, by the caller. We now have to convert it to
NOMINAL_MODE, if different. However, PARMREG may be in
a different mode than NOMINAL_MODE if it is being stored
promoted.
If ENTRY_PARM is a hard register, it might be in a register
not valid for operating in its mode (e.g., an odd-numbered
register for a DFmode). In that case, moves are the only
thing valid, so we can't do a convert from there. This
occurs when the calling sequence allow such misaligned
usages.
In addition, the conversion may involve a call, which could
clobber parameters which haven't been copied to pseudo
registers yet.
First, we try to emit an insn which performs the necessary
conversion. We verify that this insn does not clobber any
hard registers. */
rtx op0, op1;
icode = can_extend_p (promoted_nominal_mode, data->passed_mode,
unsignedp);
op0 = parmreg;
op1 = val