blob: 6a894ec345e537077c0cd90973ac319ca7527b49 [file] [log] [blame]
/* Target machine subroutines for Altera Nios II.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Jonah Graham (jgraham@altera.com),
Will Reece (wreece@altera.com), and Jeff DaSilva (jdasilva@altera.com).
Contributed by Mentor Graphics, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#define IN_TARGET_CODE 1
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "stringpool.h"
#include "attribs.h"
#include "df.h"
#include "memmodel.h"
#include "tm_p.h"
#include "optabs.h"
#include "regs.h"
#include "emit-rtl.h"
#include "recog.h"
#include "diagnostic-core.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
#include "explow.h"
#include "calls.h"
#include "varasm.h"
#include "expr.h"
#include "toplev.h"
#include "langhooks.h"
#include "stor-layout.h"
#include "builtins.h"
#include "tree-pass.h"
#include "xregex.h"
#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
/* Forward function declarations. */
static bool nios2_symbolic_constant_p (rtx);
static bool prologue_saved_reg_p (unsigned);
static void nios2_load_pic_register (void);
static void nios2_register_custom_code (unsigned int, enum nios2_ccs_code, int);
static const char *nios2_unspec_reloc_name (int);
static void nios2_register_builtin_fndecl (unsigned, tree);
static rtx nios2_ldst_parallel (bool, bool, bool, rtx, int,
unsigned HOST_WIDE_INT, bool);
static int nios2_address_cost (rtx, machine_mode, addr_space_t, bool);
/* Threshold for data being put into the small data/bss area, instead
of the normal data area (references to the small data/bss area take
1 instruction, and use the global pointer, references to the normal
data area takes 2 instructions). */
unsigned HOST_WIDE_INT nios2_section_threshold = NIOS2_DEFAULT_GVALUE;
struct GTY (()) machine_function
{
/* Current frame information, to be filled in by nios2_compute_frame_layout
with register save masks, and offsets for the current function. */
/* Mask of registers to save. */
unsigned int save_mask;
/* Number of bytes that the entire frame takes up. */
int total_size;
/* Number of bytes that variables take up. */
int var_size;
/* Number of bytes that outgoing arguments take up. */
int args_size;
/* Number of bytes needed to store registers in frame. */
int save_reg_size;
/* Number of bytes used to store callee-saved registers. */
int callee_save_reg_size;
/* Offset from new stack pointer to store registers. */
int save_regs_offset;
/* Offset from save_regs_offset to store frame pointer register. */
int fp_save_offset;
/* != 0 if function has a variable argument list. */
int uses_anonymous_args;
/* != 0 if frame layout already calculated. */
int initialized;
};
/* State to track the assignment of custom codes to FPU/custom builtins. */
static enum nios2_ccs_code custom_code_status[256];
static int custom_code_index[256];
/* Set to true if any conflicts (re-use of a code between 0-255) are found. */
static bool custom_code_conflict = false;
/* State for command-line options. */
regex_t nios2_gprel_sec_regex;
regex_t nios2_r0rel_sec_regex;
/* Definition of builtin function types for nios2. */
#define N2_FTYPES \
N2_FTYPE(1, (SF)) \
N2_FTYPE(1, (VOID)) \
N2_FTYPE(2, (DF, DF)) \
N2_FTYPE(3, (DF, DF, DF)) \
N2_FTYPE(2, (DF, SF)) \
N2_FTYPE(2, (DF, SI)) \
N2_FTYPE(2, (DF, UI)) \
N2_FTYPE(2, (SF, DF)) \
N2_FTYPE(2, (SF, SF)) \
N2_FTYPE(3, (SF, SF, SF)) \
N2_FTYPE(2, (SF, SI)) \
N2_FTYPE(2, (SF, UI)) \
N2_FTYPE(2, (SI, CVPTR)) \
N2_FTYPE(2, (SI, DF)) \
N2_FTYPE(3, (SI, DF, DF)) \
N2_FTYPE(2, (SI, SF)) \
N2_FTYPE(3, (SI, SF, SF)) \
N2_FTYPE(2, (SI, SI)) \
N2_FTYPE(3, (SI, SI, SI)) \
N2_FTYPE(3, (SI, VPTR, SI)) \
N2_FTYPE(2, (UI, CVPTR)) \
N2_FTYPE(2, (UI, DF)) \
N2_FTYPE(2, (UI, SF)) \
N2_FTYPE(2, (VOID, DF)) \
N2_FTYPE(2, (VOID, SF)) \
N2_FTYPE(2, (VOID, SI)) \
N2_FTYPE(3, (VOID, SI, SI)) \
N2_FTYPE(2, (VOID, VPTR)) \
N2_FTYPE(3, (VOID, VPTR, SI))
#define N2_FTYPE_OP1(R) N2_FTYPE_ ## R ## _VOID
#define N2_FTYPE_OP2(R, A1) N2_FTYPE_ ## R ## _ ## A1
#define N2_FTYPE_OP3(R, A1, A2) N2_FTYPE_ ## R ## _ ## A1 ## _ ## A2
/* Expand ftcode enumeration. */
enum nios2_ftcode {
#define N2_FTYPE(N,ARGS) N2_FTYPE_OP ## N ARGS,
N2_FTYPES
#undef N2_FTYPE
N2_FTYPE_MAX
};
/* Return the tree function type, based on the ftcode. */
static tree
nios2_ftype (enum nios2_ftcode ftcode)
{
static tree types[(int) N2_FTYPE_MAX];
tree N2_TYPE_SF = float_type_node;
tree N2_TYPE_DF = double_type_node;
tree N2_TYPE_SI = integer_type_node;
tree N2_TYPE_UI = unsigned_type_node;
tree N2_TYPE_VOID = void_type_node;
static const_tree N2_TYPE_CVPTR, N2_TYPE_VPTR;
if (!N2_TYPE_CVPTR)
{
/* const volatile void *. */
N2_TYPE_CVPTR
= build_pointer_type (build_qualified_type (void_type_node,
(TYPE_QUAL_CONST
| TYPE_QUAL_VOLATILE)));
/* volatile void *. */
N2_TYPE_VPTR
= build_pointer_type (build_qualified_type (void_type_node,
TYPE_QUAL_VOLATILE));
}
if (types[(int) ftcode] == NULL_TREE)
switch (ftcode)
{
#define N2_FTYPE_ARGS1(R) N2_TYPE_ ## R
#define N2_FTYPE_ARGS2(R,A1) N2_TYPE_ ## R, N2_TYPE_ ## A1
#define N2_FTYPE_ARGS3(R,A1,A2) N2_TYPE_ ## R, N2_TYPE_ ## A1, N2_TYPE_ ## A2
#define N2_FTYPE(N,ARGS) \
case N2_FTYPE_OP ## N ARGS: \
types[(int) ftcode] \
= build_function_type_list (N2_FTYPE_ARGS ## N ARGS, NULL_TREE); \
break;
N2_FTYPES
#undef N2_FTYPE
default: gcc_unreachable ();
}
return types[(int) ftcode];
}
/* Definition of FPU instruction descriptions. */
struct nios2_fpu_insn_info
{
const char *name;
int num_operands, *optvar;
int opt, no_opt;
#define N2F_DF 0x1
#define N2F_DFREQ 0x2
#define N2F_UNSAFE 0x4
#define N2F_FINITE 0x8
#define N2F_NO_ERRNO 0x10
unsigned int flags;
enum insn_code icode;
enum nios2_ftcode ftcode;
};
/* Base macro for defining FPU instructions. */
#define N2FPU_INSN_DEF_BASE(insn, nop, flags, icode, args) \
{ #insn, nop, &nios2_custom_ ## insn, OPT_mcustom_##insn##_, \
OPT_mno_custom_##insn, flags, CODE_FOR_ ## icode, \
N2_FTYPE_OP ## nop args }
/* Arithmetic and math functions; 2 or 3 operand FP operations. */
#define N2FPU_OP2(mode) (mode, mode)
#define N2FPU_OP3(mode) (mode, mode, mode)
#define N2FPU_INSN_DEF(code, icode, nop, flags, m, M) \
N2FPU_INSN_DEF_BASE (f ## code ## m, nop, flags, \
icode ## m ## f ## nop, N2FPU_OP ## nop (M ## F))
#define N2FPU_INSN_SF(code, nop, flags) \
N2FPU_INSN_DEF (code, code, nop, flags, s, S)
#define N2FPU_INSN_DF(code, nop, flags) \
N2FPU_INSN_DEF (code, code, nop, flags | N2F_DF, d, D)
/* Compare instructions, 3 operand FP operation with a SI result. */
#define N2FPU_CMP_DEF(code, flags, m, M) \
N2FPU_INSN_DEF_BASE (fcmp ## code ## m, 3, flags, \
nios2_s ## code ## m ## f, (SI, M ## F, M ## F))
#define N2FPU_CMP_SF(code) N2FPU_CMP_DEF (code, 0, s, S)
#define N2FPU_CMP_DF(code) N2FPU_CMP_DEF (code, N2F_DF, d, D)
/* The order of definition needs to be maintained consistent with
enum n2fpu_code in nios2-opts.h. */
struct nios2_fpu_insn_info nios2_fpu_insn[] =
{
/* Single precision instructions. */
N2FPU_INSN_SF (add, 3, 0),
N2FPU_INSN_SF (sub, 3, 0),
N2FPU_INSN_SF (mul, 3, 0),
N2FPU_INSN_SF (div, 3, 0),
/* Due to textual difference between min/max and smin/smax. */
N2FPU_INSN_DEF (min, smin, 3, N2F_FINITE, s, S),
N2FPU_INSN_DEF (max, smax, 3, N2F_FINITE, s, S),
N2FPU_INSN_SF (neg, 2, 0),
N2FPU_INSN_SF (abs, 2, 0),
N2FPU_INSN_SF (sqrt, 2, 0),
N2FPU_INSN_SF (sin, 2, N2F_UNSAFE),
N2FPU_INSN_SF (cos, 2, N2F_UNSAFE),
N2FPU_INSN_SF (tan, 2, N2F_UNSAFE),
N2FPU_INSN_SF (atan, 2, N2F_UNSAFE),
N2FPU_INSN_SF (exp, 2, N2F_UNSAFE),
N2FPU_INSN_SF (log, 2, N2F_UNSAFE),
/* Single precision compares. */
N2FPU_CMP_SF (eq), N2FPU_CMP_SF (ne),
N2FPU_CMP_SF (lt), N2FPU_CMP_SF (le),
N2FPU_CMP_SF (gt), N2FPU_CMP_SF (ge),
/* Double precision instructions. */
N2FPU_INSN_DF (add, 3, 0),
N2FPU_INSN_DF (sub, 3, 0),
N2FPU_INSN_DF (mul, 3, 0),
N2FPU_INSN_DF (div, 3, 0),
/* Due to textual difference between min/max and smin/smax. */
N2FPU_INSN_DEF (min, smin, 3, N2F_FINITE, d, D),
N2FPU_INSN_DEF (max, smax, 3, N2F_FINITE, d, D),
N2FPU_INSN_DF (neg, 2, 0),
N2FPU_INSN_DF (abs, 2, 0),
N2FPU_INSN_DF (sqrt, 2, 0),
N2FPU_INSN_DF (sin, 2, N2F_UNSAFE),
N2FPU_INSN_DF (cos, 2, N2F_UNSAFE),
N2FPU_INSN_DF (tan, 2, N2F_UNSAFE),
N2FPU_INSN_DF (atan, 2, N2F_UNSAFE),
N2FPU_INSN_DF (exp, 2, N2F_UNSAFE),
N2FPU_INSN_DF (log, 2, N2F_UNSAFE),
/* Double precision compares. */
N2FPU_CMP_DF (eq), N2FPU_CMP_DF (ne),
N2FPU_CMP_DF (lt), N2FPU_CMP_DF (le),
N2FPU_CMP_DF (gt), N2FPU_CMP_DF (ge),
/* Conversion instructions. */
N2FPU_INSN_DEF_BASE (floatis, 2, 0, floatsisf2, (SF, SI)),
N2FPU_INSN_DEF_BASE (floatus, 2, 0, floatunssisf2, (SF, UI)),
N2FPU_INSN_DEF_BASE (floatid, 2, 0, floatsidf2, (DF, SI)),
N2FPU_INSN_DEF_BASE (floatud, 2, 0, floatunssidf2, (DF, UI)),
N2FPU_INSN_DEF_BASE (round, 2, N2F_NO_ERRNO, lroundsfsi2, (SI, SF)),
N2FPU_INSN_DEF_BASE (fixsi, 2, 0, fix_truncsfsi2, (SI, SF)),
N2FPU_INSN_DEF_BASE (fixsu, 2, 0, fixuns_truncsfsi2, (UI, SF)),
N2FPU_INSN_DEF_BASE (fixdi, 2, 0, fix_truncdfsi2, (SI, DF)),
N2FPU_INSN_DEF_BASE (fixdu, 2, 0, fixuns_truncdfsi2, (UI, DF)),
N2FPU_INSN_DEF_BASE (fextsd, 2, 0, extendsfdf2, (DF, SF)),
N2FPU_INSN_DEF_BASE (ftruncds, 2, 0, truncdfsf2, (SF, DF)),
/* X, Y access instructions. */
N2FPU_INSN_DEF_BASE (fwrx, 2, N2F_DFREQ, nios2_fwrx, (VOID, DF)),
N2FPU_INSN_DEF_BASE (fwry, 2, N2F_DFREQ, nios2_fwry, (VOID, SF)),
N2FPU_INSN_DEF_BASE (frdxlo, 1, N2F_DFREQ, nios2_frdxlo, (SF)),
N2FPU_INSN_DEF_BASE (frdxhi, 1, N2F_DFREQ, nios2_frdxhi, (SF)),
N2FPU_INSN_DEF_BASE (frdy, 1, N2F_DFREQ, nios2_frdy, (SF))
};
/* Some macros for ease of access. */
#define N2FPU(code) nios2_fpu_insn[(int) code]
#define N2FPU_ENABLED_P(code) (N2FPU_N(code) >= 0)
#define N2FPU_N(code) (*N2FPU(code).optvar)
#define N2FPU_NAME(code) (N2FPU(code).name)
#define N2FPU_ICODE(code) (N2FPU(code).icode)
#define N2FPU_FTCODE(code) (N2FPU(code).ftcode)
#define N2FPU_FINITE_P(code) (N2FPU(code).flags & N2F_FINITE)
#define N2FPU_UNSAFE_P(code) (N2FPU(code).flags & N2F_UNSAFE)
#define N2FPU_NO_ERRNO_P(code) (N2FPU(code).flags & N2F_NO_ERRNO)
#define N2FPU_DOUBLE_P(code) (N2FPU(code).flags & N2F_DF)
#define N2FPU_DOUBLE_REQUIRED_P(code) (N2FPU(code).flags & N2F_DFREQ)
/* Same as above, but for cases where using only the op part is shorter. */
#define N2FPU_OP(op) N2FPU(n2fpu_ ## op)
#define N2FPU_OP_NAME(op) N2FPU_NAME(n2fpu_ ## op)
#define N2FPU_OP_ENABLED_P(op) N2FPU_ENABLED_P(n2fpu_ ## op)
/* Export the FPU insn enabled predicate to nios2.md. */
bool
nios2_fpu_insn_enabled (enum n2fpu_code code)
{
return N2FPU_ENABLED_P (code);
}
/* Return true if COND comparison for mode MODE is enabled under current
settings. */
static bool
nios2_fpu_compare_enabled (enum rtx_code cond, machine_mode mode)
{
if (mode == SFmode)
switch (cond)
{
case EQ: return N2FPU_OP_ENABLED_P (fcmpeqs);
case NE: return N2FPU_OP_ENABLED_P (fcmpnes);
case GT: return N2FPU_OP_ENABLED_P (fcmpgts);
case GE: return N2FPU_OP_ENABLED_P (fcmpges);
case LT: return N2FPU_OP_ENABLED_P (fcmplts);
case LE: return N2FPU_OP_ENABLED_P (fcmples);
default: break;
}
else if (mode == DFmode)
switch (cond)
{
case EQ: return N2FPU_OP_ENABLED_P (fcmpeqd);
case NE: return N2FPU_OP_ENABLED_P (fcmpned);
case GT: return N2FPU_OP_ENABLED_P (fcmpgtd);
case GE: return N2FPU_OP_ENABLED_P (fcmpged);
case LT: return N2FPU_OP_ENABLED_P (fcmpltd);
case LE: return N2FPU_OP_ENABLED_P (fcmpled);
default: break;
}
return false;
}
/* Stack layout and calling conventions. */
#define NIOS2_STACK_ALIGN(LOC) \
(((LOC) + ((PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT) - 1)) \
& ~((PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT) - 1))
/* Return the bytes needed to compute the frame pointer from the current
stack pointer. */
static int
nios2_compute_frame_layout (void)
{
unsigned int regno;
unsigned int save_mask = 0;
int total_size;
int var_size;
int out_args_size;
int save_reg_size;
int callee_save_reg_size;
if (cfun->machine->initialized)
return cfun->machine->total_size;
/* Calculate space needed for gp registers. */
save_reg_size = 0;
for (regno = 0; regno <= LAST_GP_REG; regno++)
if (prologue_saved_reg_p (regno))
{
save_mask |= 1 << regno;
save_reg_size += 4;
}
/* If we are saving any callee-save register, then assume
push.n/pop.n should be used. Make sure RA is saved, and
contiguous registers starting from r16-- are all saved. */
if (TARGET_HAS_CDX && save_reg_size != 0)
{
if ((save_mask & (1 << RA_REGNO)) == 0)
{
save_mask |= 1 << RA_REGNO;
save_reg_size += 4;
}
for (regno = 23; regno >= 16; regno--)
if ((save_mask & (1 << regno)) != 0)
{
/* Starting from highest numbered callee-saved
register that is used, make sure all regs down
to r16 is saved, to maintain contiguous range
for push.n/pop.n. */
unsigned int i;
for (i = regno - 1; i >= 16; i--)
if ((save_mask & (1 << i)) == 0)
{
save_mask |= 1 << i;
save_reg_size += 4;
}
break;
}
}
callee_save_reg_size = save_reg_size;
/* If we call eh_return, we need to save the EH data registers. */
if (crtl->calls_eh_return)
{
unsigned i;
unsigned r;
for (i = 0; (r = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
if (!(save_mask & (1 << r)))
{
save_mask |= 1 << r;
save_reg_size += 4;
}
}
cfun->machine->fp_save_offset = 0;
if (save_mask & (1 << HARD_FRAME_POINTER_REGNUM))
{
int fp_save_offset = 0;
for (regno = 0; regno < HARD_FRAME_POINTER_REGNUM; regno++)
if (save_mask & (1 << regno))
fp_save_offset += 4;
cfun->machine->fp_save_offset = fp_save_offset;
}
var_size = NIOS2_STACK_ALIGN (get_frame_size ());
out_args_size = NIOS2_STACK_ALIGN (crtl->outgoing_args_size);
total_size = var_size + out_args_size;
save_reg_size = NIOS2_STACK_ALIGN (save_reg_size);
total_size += save_reg_size;
total_size += NIOS2_STACK_ALIGN (crtl->args.pretend_args_size);
/* Save other computed information. */
cfun->machine->save_mask = save_mask;
cfun->machine->total_size = total_size;
cfun->machine->var_size = var_size;
cfun->machine->args_size = out_args_size;
cfun->machine->save_reg_size = save_reg_size;
cfun->machine->callee_save_reg_size = callee_save_reg_size;
cfun->machine->initialized = reload_completed;
cfun->machine->save_regs_offset = out_args_size + var_size;
return total_size;
}
/* Generate save/restore of register REGNO at SP + OFFSET. Used by the
prologue/epilogue expand routines. */
static void
save_reg (int regno, unsigned offset)
{
rtx reg = gen_rtx_REG (SImode, regno);
rtx addr = plus_constant (Pmode, stack_pointer_rtx, offset, false);
rtx_insn *insn = emit_move_insn (gen_frame_mem (Pmode, addr), reg);
RTX_FRAME_RELATED_P (insn) = 1;
}
static void
restore_reg (int regno, unsigned offset)
{
rtx reg = gen_rtx_REG (SImode, regno);
rtx addr = plus_constant (Pmode, stack_pointer_rtx, offset, false);
rtx_insn *insn = emit_move_insn (reg, gen_frame_mem (Pmode, addr));
/* Tag epilogue unwind note. */
add_reg_note (insn, REG_CFA_RESTORE, reg);
RTX_FRAME_RELATED_P (insn) = 1;
}
/* This routine tests for the base register update SET in load/store
multiple RTL insns, used in pop_operation_p and ldstwm_operation_p. */
static bool
base_reg_adjustment_p (rtx set, rtx *base_reg, rtx *offset)
{
if (GET_CODE (set) == SET
&& REG_P (SET_DEST (set))
&& GET_CODE (SET_SRC (set)) == PLUS
&& REG_P (XEXP (SET_SRC (set), 0))
&& rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
&& CONST_INT_P (XEXP (SET_SRC (set), 1)))
{
*base_reg = XEXP (SET_SRC (set), 0);
*offset = XEXP (SET_SRC (set), 1);
return true;
}
return false;
}
/* Does the CFA note work for push/pop prologue/epilogue instructions. */
static void
nios2_create_cfa_notes (rtx_insn *insn, bool epilogue_p)
{
int i = 0;
rtx base_reg, offset, elt, pat = PATTERN (insn);
if (epilogue_p)
{
elt = XVECEXP (pat, 0, 0);
if (GET_CODE (elt) == RETURN)
i++;
elt = XVECEXP (pat, 0, i);
if (base_reg_adjustment_p (elt, &base_reg, &offset))
{
add_reg_note (insn, REG_CFA_ADJUST_CFA, copy_rtx (elt));
i++;
}
for (; i < XVECLEN (pat, 0); i++)
{
elt = SET_DEST (XVECEXP (pat, 0, i));
gcc_assert (REG_P (elt));
add_reg_note (insn, REG_CFA_RESTORE, elt);
}
}
else
{
/* Tag each of the prologue sets. */
for (i = 0; i < XVECLEN (pat, 0); i++)
RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
}
}
/* Temp regno used inside prologue/epilogue. */
#define TEMP_REG_NUM 8
/* Emit conditional trap for checking stack limit. SIZE is the number of
additional bytes required.
GDB prologue analysis depends on this generating a direct comparison
to the SP register, so the adjustment to add SIZE needs to be done on
the other operand to the comparison. Use TEMP_REG_NUM as a temporary,
if necessary. */
static void
nios2_emit_stack_limit_check (int size)
{
rtx sum = NULL_RTX;
if (GET_CODE (stack_limit_rtx) == SYMBOL_REF)
{
/* This generates a %hiadj/%lo pair with the constant size
add handled by the relocations. */
sum = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_move_insn (sum, plus_constant (Pmode, stack_limit_rtx, size));
}
else if (!REG_P (stack_limit_rtx))
sorry ("Unknown form for stack limit expression");
else if (size == 0)
sum = stack_limit_rtx;
else if (SMALL_INT (size))
{
sum = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_move_insn (sum, plus_constant (Pmode, stack_limit_rtx, size));
}
else
{
sum = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_move_insn (sum, gen_int_mode (size, Pmode));
emit_insn (gen_add2_insn (sum, stack_limit_rtx));
}
emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx, sum),
stack_pointer_rtx, sum, GEN_INT (3)));
}
static rtx_insn *
nios2_emit_add_constant (rtx reg, HOST_WIDE_INT immed)
{
rtx_insn *insn;
if (SMALL_INT (immed))
insn = emit_insn (gen_add2_insn (reg, gen_int_mode (immed, Pmode)));
else
{
rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_move_insn (tmp, gen_int_mode (immed, Pmode));
insn = emit_insn (gen_add2_insn (reg, tmp));
}
return insn;
}
static rtx_insn *
nios2_adjust_stack (int sp_adjust, bool epilogue_p)
{
enum reg_note note_kind = REG_NOTE_MAX;
rtx_insn *insn = NULL;
if (sp_adjust)
{
if (SMALL_INT (sp_adjust))
insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
gen_int_mode (sp_adjust, Pmode)));
else
{
rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_move_insn (tmp, gen_int_mode (sp_adjust, Pmode));
insn = emit_insn (gen_add2_insn (stack_pointer_rtx, tmp));
/* Attach a note indicating what happened. */
if (!epilogue_p)
note_kind = REG_FRAME_RELATED_EXPR;
}
if (epilogue_p)
note_kind = REG_CFA_ADJUST_CFA;
if (note_kind != REG_NOTE_MAX)
{
rtx cfa_adj = gen_rtx_SET (stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
sp_adjust));
add_reg_note (insn, note_kind, cfa_adj);
}
RTX_FRAME_RELATED_P (insn) = 1;
}
return insn;
}
void
nios2_expand_prologue (void)
{
unsigned int regno;
int total_frame_size, save_offset;
int sp_offset; /* offset from base_reg to final stack value. */
int save_regs_base; /* offset from base_reg to register save area. */
rtx_insn *insn;
total_frame_size = nios2_compute_frame_layout ();
if (flag_stack_usage_info)
current_function_static_stack_size = total_frame_size;
/* When R2 CDX push.n/stwm is available, arrange for stack frame to be built
using them. */
if (TARGET_HAS_CDX
&& (cfun->machine->save_reg_size != 0
|| cfun->machine->uses_anonymous_args))
{
unsigned int regmask = cfun->machine->save_mask;
unsigned int callee_save_regs = regmask & 0xffff0000;
unsigned int caller_save_regs = regmask & 0x0000ffff;
int push_immed = 0;
int pretend_args_size = NIOS2_STACK_ALIGN (crtl->args.pretend_args_size);
rtx stack_mem =
gen_frame_mem (SImode, plus_constant (Pmode, stack_pointer_rtx, -4));
/* Check that there is room for the entire stack frame before doing
any SP adjustments or pushes. */
if (crtl->limit_stack)
nios2_emit_stack_limit_check (total_frame_size);
if (pretend_args_size)
{
if (cfun->machine->uses_anonymous_args)
{
/* Emit a stwm to push copy of argument registers onto
the stack for va_arg processing. */
unsigned int r, mask = 0, n = pretend_args_size / 4;
for (r = LAST_ARG_REGNO - n + 1; r <= LAST_ARG_REGNO; r++)
mask |= (1 << r);
insn = emit_insn (nios2_ldst_parallel
(false, false, false, stack_mem,
-pretend_args_size, mask, false));
/* Tag first SP adjustment as frame-related. */
RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
RTX_FRAME_RELATED_P (insn) = 1;
}
else
nios2_adjust_stack (-pretend_args_size, false);
}
if (callee_save_regs)
{
/* Emit a push.n to save registers and optionally allocate
push_immed extra bytes on the stack. */
int sp_adjust;
if (caller_save_regs)
/* Can't allocate extra stack space yet. */
push_immed = 0;
else if (cfun->machine->save_regs_offset <= 60)
/* Stack adjustment fits entirely in the push.n. */
push_immed = cfun->machine->save_regs_offset;
else if (frame_pointer_needed
&& cfun->machine->fp_save_offset == 0)
/* Deferring the entire stack adjustment until later
allows us to use a mov.n instead of a 32-bit addi
instruction to set the frame pointer. */
push_immed = 0;
else
/* Splitting the stack adjustment between the push.n
and an explicit adjustment makes it more likely that
we can use spdeci.n for the explicit part. */
push_immed = 60;
sp_adjust = -(cfun->machine->callee_save_reg_size + push_immed);
insn = emit_insn (nios2_ldst_parallel (false, false, false,
stack_mem, sp_adjust,
callee_save_regs, false));
nios2_create_cfa_notes (insn, false);
RTX_FRAME_RELATED_P (insn) = 1;
}
if (caller_save_regs)
{
/* Emit a stwm to save the EH data regs, r4-r7. */
int caller_save_size = (cfun->machine->save_reg_size
- cfun->machine->callee_save_reg_size);
gcc_assert ((caller_save_regs & ~0xf0) == 0);
insn = emit_insn (nios2_ldst_parallel
(false, false, false, stack_mem,
-caller_save_size, caller_save_regs, false));
nios2_create_cfa_notes (insn, false);
RTX_FRAME_RELATED_P (insn) = 1;
}
save_regs_base = push_immed;
sp_offset = -(cfun->machine->save_regs_offset - push_immed);
}
/* The non-CDX cases decrement the stack pointer, to prepare for individual
register saves to the stack. */
else if (!SMALL_INT (total_frame_size))
{
/* We need an intermediary point, this will point at the spill block. */
nios2_adjust_stack (cfun->machine->save_regs_offset - total_frame_size,
false);
save_regs_base = 0;
sp_offset = -cfun->machine->save_regs_offset;
if (crtl->limit_stack)
nios2_emit_stack_limit_check (cfun->machine->save_regs_offset);
}
else if (total_frame_size)
{
nios2_adjust_stack (-total_frame_size, false);
save_regs_base = cfun->machine->save_regs_offset;
sp_offset = 0;
if (crtl->limit_stack)
nios2_emit_stack_limit_check (0);
}
else
save_regs_base = sp_offset = 0;
/* Save the registers individually in the non-CDX case. */
if (!TARGET_HAS_CDX)
{
save_offset = save_regs_base + cfun->machine->save_reg_size;
for (regno = LAST_GP_REG; regno > 0; regno--)
if (cfun->machine->save_mask & (1 << regno))
{
save_offset -= 4;
save_reg (regno, save_offset);
}
}
/* Set the hard frame pointer. */
if (frame_pointer_needed)
{
int fp_save_offset = save_regs_base + cfun->machine->fp_save_offset;
insn =
(fp_save_offset == 0
? emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx)
: emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
stack_pointer_rtx,
gen_int_mode (fp_save_offset, Pmode))));
RTX_FRAME_RELATED_P (insn) = 1;
}
/* Allocate sp_offset more bytes in the stack frame. */
nios2_adjust_stack (sp_offset, false);
/* Load the PIC register if needed. */
if (crtl->uses_pic_offset_table)
nios2_load_pic_register ();
/* If we are profiling, make sure no instructions are scheduled before
the call to mcount. */
if (crtl->profile)
emit_insn (gen_blockage ());
}
void
nios2_expand_epilogue (bool sibcall_p)
{
rtx_insn *insn;
rtx cfa_adj;
int total_frame_size;
int sp_adjust, save_offset;
unsigned int regno;
if (!sibcall_p && nios2_can_use_return_insn ())
{
emit_jump_insn (gen_return ());
return;
}
emit_insn (gen_blockage ());
total_frame_size = nios2_compute_frame_layout ();
if (frame_pointer_needed)
{
/* Recover the stack pointer. */
insn =
(cfun->machine->fp_save_offset == 0
? emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx)
: emit_insn (gen_add3_insn
(stack_pointer_rtx, hard_frame_pointer_rtx,
gen_int_mode (-cfun->machine->fp_save_offset, Pmode))));
cfa_adj = plus_constant (Pmode, stack_pointer_rtx,
(total_frame_size
- cfun->machine->save_regs_offset));
add_reg_note (insn, REG_CFA_DEF_CFA, cfa_adj);
RTX_FRAME_RELATED_P (insn) = 1;
save_offset = 0;
sp_adjust = total_frame_size - cfun->machine->save_regs_offset;
}
else if (!SMALL_INT (total_frame_size))
{
nios2_adjust_stack (cfun->machine->save_regs_offset, true);
save_offset = 0;
sp_adjust = total_frame_size - cfun->machine->save_regs_offset;
}
else
{
save_offset = cfun->machine->save_regs_offset;
sp_adjust = total_frame_size;
}
if (!TARGET_HAS_CDX)
{
/* Generate individual register restores. */
save_offset += cfun->machine->save_reg_size;
for (regno = LAST_GP_REG; regno > 0; regno--)
if (cfun->machine->save_mask & (1 << regno))
{
save_offset -= 4;
restore_reg (regno, save_offset);
}
nios2_adjust_stack (sp_adjust, true);
}
else if (cfun->machine->save_reg_size == 0)
{
/* Nothing to restore, just recover the stack position. */
nios2_adjust_stack (sp_adjust, true);
}
else
{
/* Emit CDX pop.n/ldwm to restore registers and optionally return. */
unsigned int regmask = cfun->machine->save_mask;
unsigned int callee_save_regs = regmask & 0xffff0000;
unsigned int caller_save_regs = regmask & 0x0000ffff;
int callee_save_size = cfun->machine->callee_save_reg_size;
int caller_save_size = cfun->machine->save_reg_size - callee_save_size;
int pretend_args_size = NIOS2_STACK_ALIGN (crtl->args.pretend_args_size);
bool ret_p = (!pretend_args_size && !crtl->calls_eh_return
&& !sibcall_p);
if (!ret_p || caller_save_size > 0)
sp_adjust = save_offset;
else
sp_adjust = (save_offset > 60 ? save_offset - 60 : 0);
save_offset -= sp_adjust;
nios2_adjust_stack (sp_adjust, true);
if (caller_save_regs)
{
/* Emit a ldwm to restore EH data regs. */
rtx stack_mem = gen_frame_mem (SImode, stack_pointer_rtx);
insn = emit_insn (nios2_ldst_parallel
(true, true, true, stack_mem,
caller_save_size, caller_save_regs, false));
RTX_FRAME_RELATED_P (insn) = 1;
nios2_create_cfa_notes (insn, true);
}
if (callee_save_regs)
{
int sp_adjust = save_offset + callee_save_size;
rtx stack_mem;
if (ret_p)
{
/* Emit a pop.n to restore regs and return. */
stack_mem =
gen_frame_mem (SImode,
gen_rtx_PLUS (Pmode, stack_pointer_rtx,
gen_int_mode (sp_adjust - 4,
Pmode)));
insn =
emit_jump_insn (nios2_ldst_parallel (true, false, false,
stack_mem, sp_adjust,
callee_save_regs, ret_p));
RTX_FRAME_RELATED_P (insn) = 1;
/* No need to attach CFA notes since we cannot step over
a return. */
return;
}
else
{
/* If no return, we have to use the ldwm form. */
stack_mem = gen_frame_mem (SImode, stack_pointer_rtx);
insn =
emit_insn (nios2_ldst_parallel (true, true, true,
stack_mem, sp_adjust,
callee_save_regs, ret_p));
RTX_FRAME_RELATED_P (insn) = 1;
nios2_create_cfa_notes (insn, true);
}
}
if (pretend_args_size)
nios2_adjust_stack (pretend_args_size, true);
}
/* Add in the __builtin_eh_return stack adjustment. */
if (crtl->calls_eh_return)
emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
if (!sibcall_p)
emit_jump_insn (gen_simple_return ());
}
bool
nios2_expand_return (void)
{
/* If CDX is available, generate a pop.n instruction to do both
the stack pop and return. */
if (TARGET_HAS_CDX)
{
int total_frame_size = nios2_compute_frame_layout ();
int sp_adjust = (cfun->machine->save_regs_offset
+ cfun->machine->callee_save_reg_size);
gcc_assert (sp_adjust == total_frame_size);
if (sp_adjust != 0)
{
rtx mem =
gen_frame_mem (SImode,
plus_constant (Pmode, stack_pointer_rtx,
sp_adjust - 4, false));
rtx_insn *insn =
emit_jump_insn (nios2_ldst_parallel (true, false, false,
mem, sp_adjust,
cfun->machine->save_mask,
true));
RTX_FRAME_RELATED_P (insn) = 1;
/* No need to create CFA notes since we can't step over
a return. */
return true;
}
}
return false;
}
/* Implement RETURN_ADDR_RTX. Note, we do not support moving
back to a previous frame. */
rtx
nios2_get_return_address (int count)
{
if (count != 0)
return const0_rtx;
return get_hard_reg_initial_val (Pmode, RA_REGNO);
}
/* Emit code to change the current function's return address to
ADDRESS. SCRATCH is available as a scratch register, if needed.
ADDRESS and SCRATCH are both word-mode GPRs. */
void
nios2_set_return_address (rtx address, rtx scratch)
{
nios2_compute_frame_layout ();
if (cfun->machine->save_mask & (1 << RA_REGNO))
{
unsigned offset = cfun->machine->save_reg_size - 4;
rtx base;
if (frame_pointer_needed)
base = hard_frame_pointer_rtx;
else
{
base = stack_pointer_rtx;
offset += cfun->machine->save_regs_offset;
if (!SMALL_INT (offset))
{
emit_move_insn (scratch, gen_int_mode (offset, Pmode));
emit_insn (gen_add2_insn (scratch, base));
base = scratch;
offset = 0;
}
}
if (offset)
base = plus_constant (Pmode, base, offset);
emit_move_insn (gen_rtx_MEM (Pmode, base), address);
}
else
emit_move_insn (gen_rtx_REG (Pmode, RA_REGNO), address);
}
/* Implement FUNCTION_PROFILER macro. */
void
nios2_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
{
fprintf (file, "\tmov\tr8, ra\n");
if (flag_pic == 1)
{
fprintf (file, "\tnextpc\tr2\n");
fprintf (file, "\t1: movhi\tr3, %%hiadj(_gp_got - 1b)\n");
fprintf (file, "\taddi\tr3, r3, %%lo(_gp_got - 1b)\n");
fprintf (file, "\tadd\tr2, r2, r3\n");
fprintf (file, "\tldw\tr2, %%call(_mcount)(r2)\n");
fprintf (file, "\tcallr\tr2\n");
}
else if (flag_pic == 2)
{
fprintf (file, "\tnextpc\tr2\n");
fprintf (file, "\t1: movhi\tr3, %%hiadj(_gp_got - 1b)\n");
fprintf (file, "\taddi\tr3, r3, %%lo(_gp_got - 1b)\n");
fprintf (file, "\tadd\tr2, r2, r3\n");
fprintf (file, "\tmovhi\tr3, %%call_hiadj(_mcount)\n");
fprintf (file, "\taddi\tr3, r3, %%call_lo(_mcount)\n");
fprintf (file, "\tadd\tr3, r2, r3\n");
fprintf (file, "\tldw\tr2, 0(r3)\n");
fprintf (file, "\tcallr\tr2\n");
}
else
fprintf (file, "\tcall\t_mcount\n");
fprintf (file, "\tmov\tra, r8\n");
}
/* Dump stack layout. */
static void
nios2_dump_frame_layout (FILE *file)
{
fprintf (file, "\t%s Current Frame Info\n", ASM_COMMENT_START);
fprintf (file, "\t%s total_size = %d\n", ASM_COMMENT_START,
cfun->machine->total_size);
fprintf (file, "\t%s var_size = %d\n", ASM_COMMENT_START,
cfun->machine->var_size);
fprintf (file, "\t%s args_size = %d\n", ASM_COMMENT_START,
cfun->machine->args_size);
fprintf (file, "\t%s save_reg_size = %d\n", ASM_COMMENT_START,
cfun->machine->save_reg_size);
fprintf (file, "\t%s initialized = %d\n", ASM_COMMENT_START,
cfun->machine->initialized);
fprintf (file, "\t%s save_regs_offset = %d\n", ASM_COMMENT_START,
cfun->machine->save_regs_offset);
fprintf (file, "\t%s is_leaf = %d\n", ASM_COMMENT_START,
crtl->is_leaf);
fprintf (file, "\t%s frame_pointer_needed = %d\n", ASM_COMMENT_START,
frame_pointer_needed);
fprintf (file, "\t%s pretend_args_size = %d\n", ASM_COMMENT_START,
crtl->args.pretend_args_size);
}
/* Return true if REGNO should be saved in the prologue. */
static bool
prologue_saved_reg_p (unsigned regno)
{
gcc_assert (GP_REG_P (regno));
if (df_regs_ever_live_p (regno) && !call_used_or_fixed_reg_p (regno))
return true;
if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
return true;
if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
return true;
if (regno == RA_REGNO && df_regs_ever_live_p (RA_REGNO))
return true;
return false;
}
/* Implement TARGET_CAN_ELIMINATE. */
static bool
nios2_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
if (to == STACK_POINTER_REGNUM)
return !frame_pointer_needed;
return true;
}
/* Implement INITIAL_ELIMINATION_OFFSET macro. */
int
nios2_initial_elimination_offset (int from, int to)
{
int offset;
nios2_compute_frame_layout ();
/* Set OFFSET to the offset from the stack pointer. */
switch (from)
{
case FRAME_POINTER_REGNUM:
/* This is the high end of the local variable storage, not the
hard frame pointer. */
offset = cfun->machine->args_size + cfun->machine->var_size;
break;
case ARG_POINTER_REGNUM:
offset = cfun->machine->total_size;
offset -= crtl->args.pretend_args_size;
break;
default:
gcc_unreachable ();
}
/* If we are asked for the frame pointer offset, then adjust OFFSET
by the offset from the frame pointer to the stack pointer. */
if (to == HARD_FRAME_POINTER_REGNUM)
offset -= (cfun->machine->save_regs_offset
+ cfun->machine->fp_save_offset);
return offset;
}
/* Return nonzero if this function is known to have a null epilogue.
This allows the optimizer to omit jumps to jumps if no stack
was created. */
int
nios2_can_use_return_insn (void)
{
int total_frame_size;
if (!reload_completed || crtl->profile)
return 0;
total_frame_size = nios2_compute_frame_layout ();
/* If CDX is available, check if we can return using a
single pop.n instruction. */
if (TARGET_HAS_CDX
&& !frame_pointer_needed
&& cfun->machine->save_regs_offset <= 60
&& (cfun->machine->save_mask & 0x80000000) != 0
&& (cfun->machine->save_mask & 0xffff) == 0
&& crtl->args.pretend_args_size == 0)
return true;
return total_frame_size == 0;
}
/* Check and signal some warnings/errors on FPU insn options. */
static void
nios2_custom_check_insns (void)
{
unsigned int i, j;
bool errors = false;
for (i = 0; i < ARRAY_SIZE (nios2_fpu_insn); i++)
if (N2FPU_ENABLED_P (i) && N2FPU_DOUBLE_P (i))
{
for (j = 0; j < ARRAY_SIZE (nios2_fpu_insn); j++)
if (N2FPU_DOUBLE_REQUIRED_P (j) && ! N2FPU_ENABLED_P (j))
{
error ("switch %<-mcustom-%s%> is required for "
"double-precision floating-point", N2FPU_NAME (j));
errors = true;
}
break;
}
if (errors || custom_code_conflict)
fatal_error (input_location,
"conflicting use of %<-mcustom%> switches, "
"target attributes, "
"and/or %<__builtin_custom_%> functions");
}
static void
nios2_set_fpu_custom_code (enum n2fpu_code code, int n, bool override_p)
{
if (override_p || N2FPU_N (code) == -1)
N2FPU_N (code) = n;
nios2_register_custom_code (n, CCS_FPU, (int) code);
}
/* Type to represent a standard FPU config. */
struct nios2_fpu_config
{
const char *name;
bool set_sp_constants;
int code[n2fpu_code_num];
};
#define NIOS2_FPU_CONFIG_NUM 4
static struct nios2_fpu_config custom_fpu_config[NIOS2_FPU_CONFIG_NUM];
static void
nios2_init_fpu_configs (void)
{
struct nios2_fpu_config* cfg;
int i = 0;
#define NEXT_FPU_CONFIG \
do { \
cfg = &custom_fpu_config[i++]; \
memset (cfg, -1, sizeof (struct nios2_fpu_config));\
} while (0)
NEXT_FPU_CONFIG;
cfg->name = "60-1";
cfg->set_sp_constants = true;
cfg->code[n2fpu_fmuls] = 252;
cfg->code[n2fpu_fadds] = 253;
cfg->code[n2fpu_fsubs] = 254;
NEXT_FPU_CONFIG;
cfg->name = "60-2";
cfg->set_sp_constants = true;
cfg->code[n2fpu_fmuls] = 252;
cfg->code[n2fpu_fadds] = 253;
cfg->code[n2fpu_fsubs] = 254;
cfg->code[n2fpu_fdivs] = 255;
NEXT_FPU_CONFIG;
cfg->name = "72-3";
cfg->set_sp_constants = true;
cfg->code[n2fpu_floatus] = 243;
cfg->code[n2fpu_fixsi] = 244;
cfg->code[n2fpu_floatis] = 245;
cfg->code[n2fpu_fcmpgts] = 246;
cfg->code[n2fpu_fcmples] = 249;
cfg->code[n2fpu_fcmpeqs] = 250;
cfg->code[n2fpu_fcmpnes] = 251;
cfg->code[n2fpu_fmuls] = 252;
cfg->code[n2fpu_fadds] = 253;
cfg->code[n2fpu_fsubs] = 254;
cfg->code[n2fpu_fdivs] = 255;
NEXT_FPU_CONFIG;
cfg->name = "fph2";
cfg->code[n2fpu_fabss] = 224;
cfg->code[n2fpu_fnegs] = 225;
cfg->code[n2fpu_fcmpnes] = 226;
cfg->code[n2fpu_fcmpeqs] = 227;
cfg->code[n2fpu_fcmpges] = 228;
cfg->code[n2fpu_fcmpgts] = 229;
cfg->code[n2fpu_fcmples] = 230;
cfg->code[n2fpu_fcmplts] = 231;
cfg->code[n2fpu_fmaxs] = 232;
cfg->code[n2fpu_fmins] = 233;
cfg->code[n2fpu_round] = 248;
cfg->code[n2fpu_fixsi] = 249;
cfg->code[n2fpu_floatis] = 250;
cfg->code[n2fpu_fsqrts] = 251;
cfg->code[n2fpu_fmuls] = 252;
cfg->code[n2fpu_fadds] = 253;
cfg->code[n2fpu_fsubs] = 254;
cfg->code[n2fpu_fdivs] = 255;
#undef NEXT_FPU_CONFIG
gcc_assert (i == NIOS2_FPU_CONFIG_NUM);
}
static struct nios2_fpu_config *
nios2_match_custom_fpu_cfg (const char *cfgname, const char *endp)
{
int i;
for (i = 0; i < NIOS2_FPU_CONFIG_NUM; i++)
{
bool match = !(endp != NULL
? strncmp (custom_fpu_config[i].name, cfgname,
endp - cfgname)
: strcmp (custom_fpu_config[i].name, cfgname));
if (match)
return &custom_fpu_config[i];
}
return NULL;
}
/* Use CFGNAME to lookup FPU config, ENDP if not NULL marks end of string.
OVERRIDE is true if loaded config codes should overwrite current state. */
static void
nios2_handle_custom_fpu_cfg (const char *cfgname, const char *endp,
bool override)
{
struct nios2_fpu_config *cfg = nios2_match_custom_fpu_cfg (cfgname, endp);
if (cfg)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE (nios2_fpu_insn); i++)
if (cfg->code[i] >= 0)
nios2_set_fpu_custom_code ((enum n2fpu_code) i, cfg->code[i],
override);
if (cfg->set_sp_constants)
flag_single_precision_constant = 1;
}
else
warning (0, "ignoring unrecognized switch %<-mcustom-fpu-cfg%> "
"value %<%s%>", cfgname);
/* Guard against errors in the standard configurations. */
nios2_custom_check_insns ();
}
/* Check individual FPU insn options, and register custom code. */
static void
nios2_handle_custom_fpu_insn_option (int fpu_insn_index)
{
int param = N2FPU_N (fpu_insn_index);
if (param >= 0 && param <= 255)
nios2_register_custom_code (param, CCS_FPU, fpu_insn_index);
/* Valid values are 0-255, but also allow -1 so that the
-mno-custom-<opt> switches work. */
else if (param != -1)
error ("switch %<-mcustom-%s%> value %d must be between 0 and 255",
N2FPU_NAME (fpu_insn_index), param);
}
/* Allocate a chunk of memory for per-function machine-dependent data. */
static struct machine_function *
nios2_init_machine_status (void)
{
return ggc_cleared_alloc<machine_function> ();
}
/* Implement TARGET_OPTION_OVERRIDE. */
static void
nios2_option_override (void)
{
unsigned int i;
#ifdef SUBTARGET_OVERRIDE_OPTIONS
SUBTARGET_OVERRIDE_OPTIONS;
#endif
/* Check for unsupported options. */
if (flag_pic && !TARGET_LINUX_ABI)
sorry ("position-independent code requires the Linux ABI");
if (flag_pic && stack_limit_rtx
&& GET_CODE (stack_limit_rtx) == SYMBOL_REF)
sorry ("PIC support for %<-fstack-limit-symbol%>");
/* Function to allocate machine-dependent function status. */
init_machine_status = &nios2_init_machine_status;
nios2_section_threshold
= (OPTION_SET_P (g_switch_value)
? g_switch_value : NIOS2_DEFAULT_GVALUE);
if (nios2_gpopt_option == gpopt_unspecified)
{
/* Default to -mgpopt unless -fpic or -fPIC. */
if (flag_pic)
nios2_gpopt_option = gpopt_none;
else
nios2_gpopt_option = gpopt_local;
}
/* GP-relative and r0-relative addressing don't make sense for PIC. */
if (flag_pic)
{
if (nios2_gpopt_option != gpopt_none)
error ("%<-mgpopt%> not supported with PIC");
if (nios2_gprel_sec)
error ("%<-mgprel-sec=%> not supported with PIC");
if (nios2_r0rel_sec)
error ("%<-mr0rel-sec=%> not supported with PIC");
}
/* Process -mgprel-sec= and -m0rel-sec=. */
if (nios2_gprel_sec)
{
if (regcomp (&nios2_gprel_sec_regex, nios2_gprel_sec,
REG_EXTENDED | REG_NOSUB))
error ("%<-mgprel-sec=%> argument is not a valid regular expression");
}
if (nios2_r0rel_sec)
{
if (regcomp (&nios2_r0rel_sec_regex, nios2_r0rel_sec,
REG_EXTENDED | REG_NOSUB))
error ("%<-mr0rel-sec=%> argument is not a valid regular expression");
}
/* If we don't have mul, we don't have mulx either! */
if (!TARGET_HAS_MUL && TARGET_HAS_MULX)
target_flags &= ~MASK_HAS_MULX;
/* Optional BMX and CDX instructions only make sense for R2. */
if (!TARGET_ARCH_R2)
{
if (TARGET_HAS_BMX)
error ("BMX instructions are only supported with R2 architecture");
if (TARGET_HAS_CDX)
error ("CDX instructions are only supported with R2 architecture");
}
/* R2 is little-endian only. */
if (TARGET_ARCH_R2 && TARGET_BIG_ENDIAN)
error ("R2 architecture is little-endian only");
/* Initialize default FPU configurations. */
nios2_init_fpu_configs ();
/* Set up default handling for floating point custom instructions.
Putting things in this order means that the -mcustom-fpu-cfg=
switch will always be overridden by individual -mcustom-fadds=
switches, regardless of the order in which they were specified
on the command line.
This behavior of prioritization of individual -mcustom-<insn>=
options before the -mcustom-fpu-cfg= switch is maintained for
compatibility. */
if (nios2_custom_fpu_cfg_string && *nios2_custom_fpu_cfg_string)
nios2_handle_custom_fpu_cfg (nios2_custom_fpu_cfg_string, NULL, false);
/* Handle options for individual FPU insns. */
for (i = 0; i < ARRAY_SIZE (nios2_fpu_insn); i++)
nios2_handle_custom_fpu_insn_option (i);
nios2_custom_check_insns ();
/* Save the initial options in case the user does function specific
options. */
target_option_default_node = target_option_current_node
= build_target_option_node (&global_options, &global_options_set);
}
/* Return true if CST is a constant within range of movi/movui/movhi. */
static bool
nios2_simple_const_p (const_rtx cst)
{
if (!CONST_INT_P (cst))
return false;
HOST_WIDE_INT val = INTVAL (cst);
return SMALL_INT (val) || SMALL_INT_UNSIGNED (val) || UPPER16_INT (val);
}
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result. */
static bool
nios2_rtx_costs (rtx x, machine_mode mode,
int outer_code,
int opno,
int *total, bool speed)
{
int code = GET_CODE (x);
switch (code)
{
case CONST_INT:
if (INTVAL (x) == 0 || nios2_simple_const_p (x))
{
*total = COSTS_N_INSNS (0);
return true;
}
else
{
/* High + lo_sum. */
*total = COSTS_N_INSNS (1);
return true;
}
case LABEL_REF:
case SYMBOL_REF:
case CONST:
case CONST_DOUBLE:
if (gprel_constant_p (x) || r0rel_constant_p (x))
{
*total = COSTS_N_INSNS (1);
return true;
}
else
{
/* High + lo_sum. */
*total = COSTS_N_INSNS (1);
return true;
}
case HIGH:
{
/* This is essentially a constant. */
*total = COSTS_N_INSNS (0);
return true;
}
case LO_SUM:
{
*total = COSTS_N_INSNS (0);
return true;
}
case AND:
{
/* Recognize 'nor' insn pattern. */
if (GET_CODE (XEXP (x, 0)) == NOT
&& GET_CODE (XEXP (x, 1)) == NOT)
{
*total = COSTS_N_INSNS (1);
return true;
}
return false;
}
/* For insns that have an execution latency (3 cycles), don't
penalize by the full amount since we can often schedule
to avoid it. */
case MULT:
{
if (!TARGET_HAS_MUL)
*total = COSTS_N_INSNS (5); /* Guess? */
else if (speed)
*total = COSTS_N_INSNS (2); /* Latency adjustment. */
else
*total = COSTS_N_INSNS (1);
if (TARGET_HAS_MULX && GET_MODE (x) == DImode)
{
enum rtx_code c0 = GET_CODE (XEXP (x, 0));
enum rtx_code c1 = GET_CODE (XEXP (x, 1));
if ((c0 == SIGN_EXTEND && c1 == SIGN_EXTEND)
|| (c0 == ZERO_EXTEND && c1 == ZERO_EXTEND))
/* This is the <mul>sidi3 pattern, which expands into 4 insns,
2 multiplies and 2 moves. */
{
*total = *total * 2 + COSTS_N_INSNS (2);
return true;
}
}
return false;
}
case DIV:
{
if (!TARGET_HAS_DIV)
*total = COSTS_N_INSNS (5); /* Guess? */
else if (speed)
*total = COSTS_N_INSNS (2); /* Latency adjustment. */
else
*total = COSTS_N_INSNS (1);
return false;
}
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
case ROTATE:
{
if (!speed)
*total = COSTS_N_INSNS (1);
else
*total = COSTS_N_INSNS (2); /* Latency adjustment. */
return false;
}
case ZERO_EXTRACT:
if (TARGET_HAS_BMX)
{
*total = COSTS_N_INSNS (1);
return true;
}
return false;
case SIGN_EXTEND:
{
if (MEM_P (XEXP (x, 0)))
*total = COSTS_N_INSNS (1);
else
*total = COSTS_N_INSNS (3);
return false;
}
case MEM:
{
rtx addr = XEXP (x, 0);
/* Account for cost of different addressing modes. */
*total = nios2_address_cost (addr, mode, ADDR_SPACE_GENERIC, speed);
if (outer_code == SET && opno == 0)
/* Stores execute in 1 cycle accounted for by
the outer SET. */
;
else if (outer_code == SET || outer_code == SIGN_EXTEND
|| outer_code == ZERO_EXTEND)
/* Latency adjustment. */
{
if (speed)
*total += COSTS_N_INSNS (1);
}
else
/* This is going to have to be split into a load. */
*total += COSTS_N_INSNS (speed ? 2 : 1);
return true;
}
default:
return false;
}
}
/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
static reg_class_t
nios2_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t regclass)
{
return regclass == NO_REGS ? GENERAL_REGS : regclass;
}
/* Emit a call to __tls_get_addr. TI is the argument to this function.
RET is an RTX for the return value location. The entire insn sequence
is returned. */
static GTY(()) rtx nios2_tls_symbol;
static rtx
nios2_call_tls_get_addr (rtx ti)
{
rtx arg = gen_rtx_REG (Pmode, FIRST_ARG_REGNO);
rtx ret = gen_rtx_REG (Pmode, FIRST_RETVAL_REGNO);
rtx fn;
rtx_insn *insn;
if (!nios2_tls_symbol)
nios2_tls_symbol = init_one_libfunc ("__tls_get_addr");
emit_move_insn (arg, ti);
fn = gen_rtx_MEM (QImode, nios2_tls_symbol);
insn = emit_call_insn (gen_call_value (ret, fn, const0_rtx));
RTL_CONST_CALL_P (insn) = 1;
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), ret);
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
return ret;
}
/* Return true for large offsets requiring hiadj/lo relocation pairs. */
static bool
nios2_large_offset_p (int unspec)
{
gcc_assert (nios2_unspec_reloc_name (unspec) != NULL);
if (flag_pic == 2
/* FIXME: TLS GOT offset relocations will eventually also get this
treatment, after binutils support for those are also completed. */
&& (unspec == UNSPEC_PIC_SYM || unspec == UNSPEC_PIC_CALL_SYM))
return true;
/* 'gotoff' offsets are always hiadj/lo. */
if (unspec == UNSPEC_PIC_GOTOFF_SYM)
return true;
return false;
}
/* Return true for conforming unspec relocations. Also used in
constraints.md and predicates.md. */
bool
nios2_unspec_reloc_p (rtx op)
{
return (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == UNSPEC
&& ! nios2_large_offset_p (XINT (XEXP (op, 0), 1)));
}
static bool
nios2_large_unspec_reloc_p (rtx op)
{
return (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == UNSPEC
&& nios2_large_offset_p (XINT (XEXP (op, 0), 1)));
}
/* Helper to generate unspec constant. */
static rtx
nios2_unspec_offset (rtx loc, int unspec)
{
return gen_rtx_CONST (Pmode, gen_rtx_UNSPEC (Pmode, gen_rtvec (1, loc),
unspec));
}
/* Generate GOT pointer based address with large offset. */
static rtx
nios2_large_got_address (rtx offset, rtx tmp)
{
if (!tmp)
tmp = gen_reg_rtx (Pmode);
emit_move_insn (tmp, offset);
return gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
}
/* Generate a GOT pointer based address. */
static rtx
nios2_got_address (rtx loc, int unspec)
{
rtx offset = nios2_unspec_offset (loc, unspec);
crtl->uses_pic_offset_table = 1;
if (nios2_large_offset_p (unspec))
return force_reg (Pmode, nios2_large_got_address (offset, NULL_RTX));
return gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
}
/* Generate the code to access LOC, a thread local SYMBOL_REF. The
return value will be a valid address and move_operand (either a REG
or a LO_SUM). */
static rtx
nios2_legitimize_tls_address (rtx loc)
{
rtx tmp, mem, tp;
enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
switch (model)
{
case TLS_MODEL_GLOBAL_DYNAMIC:
tmp = gen_reg_rtx (Pmode);
emit_move_insn (tmp, nios2_got_address (loc, UNSPEC_ADD_TLS_GD));
return nios2_call_tls_get_addr (tmp);
case TLS_MODEL_LOCAL_DYNAMIC:
tmp = gen_reg_rtx (Pmode);
emit_move_insn (tmp, nios2_got_address (loc, UNSPEC_ADD_TLS_LDM));
return gen_rtx_PLUS (Pmode, nios2_call_tls_get_addr (tmp),
nios2_unspec_offset (loc, UNSPEC_ADD_TLS_LDO));
case TLS_MODEL_INITIAL_EXEC:
tmp = gen_reg_rtx (Pmode);
mem = gen_const_mem (Pmode, nios2_got_address (loc, UNSPEC_LOAD_TLS_IE));
emit_move_insn (tmp, mem);
tp = gen_rtx_REG (Pmode, TP_REGNO);
return gen_rtx_PLUS (Pmode, tp, tmp);
case TLS_MODEL_LOCAL_EXEC:
tp = gen_rtx_REG (Pmode, TP_REGNO);
return gen_rtx_PLUS (Pmode, tp,
nios2_unspec_offset (loc, UNSPEC_ADD_TLS_LE));
default:
gcc_unreachable ();
}
}
/* Divide Support
If -O3 is used, we want to output a table lookup for
divides between small numbers (both num and den >= 0
and < 0x10). The overhead of this method in the worst
case is 40 bytes in the text section (10 insns) and
256 bytes in the data section. Additional divides do
not incur additional penalties in the data section.
Code speed is improved for small divides by about 5x
when using this method in the worse case (~9 cycles
vs ~45). And in the worst case divides not within the
table are penalized by about 10% (~5 cycles vs ~45).
However in the typical case the penalty is not as bad
because doing the long divide in only 45 cycles is
quite optimistic.
??? would be nice to have some benchmarks other
than Dhrystone to back this up.
This bit of expansion is to create this instruction
sequence as rtl.
or $8, $4, $5
slli $9, $4, 4
cmpgeui $3, $8, 16
beq $3, $0, .L3
or $10, $9, $5
add $12, $11, divide_table
ldbu $2, 0($12)
br .L1
.L3:
call slow_div
.L1:
# continue here with result in $2
??? Ideally I would like the libcall block to contain all
of this code, but I don't know how to do that. What it
means is that if the divide can be eliminated, it may not
completely disappear.
??? The __divsi3_table label should ideally be moved out
of this block and into a global. If it is placed into the
sdata section we can save even more cycles by doing things
gp relative. */
void
nios2_emit_expensive_div (rtx *operands, machine_mode mode)
{
rtx or_result, shift_left_result;
rtx lookup_value;
rtx_code_label *lab1, *lab3;
rtx_insn *insns;
rtx libfunc;
rtx final_result;
rtx_insn *tmp;
rtx table;
/* It may look a little generic, but only SImode is supported for now. */
gcc_assert (mode == SImode);
libfunc = optab_libfunc (sdiv_optab, SImode);
lab1 = gen_label_rtx ();
lab3 = gen_label_rtx ();
or_result = expand_simple_binop (SImode, IOR,
operands[1], operands[2],
0, 0, OPTAB_LIB_WIDEN);
emit_cmp_and_jump_insns (or_result, GEN_INT (15), GTU, 0,
GET_MODE (or_result), 0, lab3);
JUMP_LABEL (get_last_insn ()) = lab3;
shift_left_result = expand_simple_binop (SImode, ASHIFT,
operands[1], GEN_INT (4),
0, 0, OPTAB_LIB_WIDEN);
lookup_value = expand_simple_binop (SImode, IOR,
shift_left_result, operands[2],
0, 0, OPTAB_LIB_WIDEN);
table = gen_rtx_PLUS (SImode, lookup_value,
gen_rtx_SYMBOL_REF (SImode, "__divsi3_table"));
convert_move (operands[0], gen_rtx_MEM (QImode, table), 1);
tmp = emit_jump_insn (gen_jump (lab1));
JUMP_LABEL (tmp) = lab1;
emit_barrier ();
emit_label (lab3);
LABEL_NUSES (lab3) = 1;
start_sequence ();
final_result = emit_library_call_value (libfunc, NULL_RTX,
LCT_CONST, SImode,
operands[1], SImode,
operands[2], SImode);
insns = get_insns ();
end_sequence ();
emit_libcall_block (insns, operands[0], final_result,
gen_rtx_DIV (SImode, operands[1], operands[2]));
emit_label (lab1);
LABEL_NUSES (lab1) = 1;
}
/* Branches and compares. */
/* Return in *ALT_CODE and *ALT_OP, an alternate equivalent constant
comparison, e.g. >= 1 into > 0. */
static void
nios2_alternate_compare_const (enum rtx_code code, rtx op,
enum rtx_code *alt_code, rtx *alt_op,
machine_mode mode)
{
gcc_assert (CONST_INT_P (op));
HOST_WIDE_INT opval = INTVAL (op);
enum rtx_code scode = signed_condition (code);
bool dec_p = (scode == LT || scode == GE);
if (code == EQ || code == NE)
{
*alt_code = code;
*alt_op = op;
return;
}
*alt_op = (dec_p
? gen_int_mode (opval - 1, mode)
: gen_int_mode (opval + 1, mode));
/* The required conversion between [>,>=] and [<,<=] is captured
by a reverse + swap of condition codes. */
*alt_code = reverse_condition (swap_condition (code));
{
/* Test if the incremented/decremented value crosses the over/underflow
boundary. Supposedly, such boundary cases should already be transformed
into always-true/false or EQ conditions, so use an assertion here. */
unsigned HOST_WIDE_INT alt_opval = INTVAL (*alt_op);
if (code == scode)
alt_opval ^= (1 << (GET_MODE_BITSIZE (mode) - 1));
alt_opval &= GET_MODE_MASK (mode);
gcc_assert (dec_p ? alt_opval != GET_MODE_MASK (mode) : alt_opval != 0);
}
}
/* Return true if the constant comparison is supported by nios2. */
static bool
nios2_valid_compare_const_p (enum rtx_code code, rtx op)
{
gcc_assert (CONST_INT_P (op));
switch (code)
{
case EQ: case NE: case GE: case LT:
return SMALL_INT (INTVAL (op));
case GEU: case LTU:
return SMALL_INT_UNSIGNED (INTVAL (op));
default:
return false;
}
}
/* Checks if the FPU comparison in *CMP, *OP1, and *OP2 can be supported in
the current configuration. Perform modifications if MODIFY_P is true.
Returns true if FPU compare can be done. */
bool
nios2_validate_fpu_compare (machine_mode mode, rtx *cmp, rtx *op1, rtx *op2,
bool modify_p)
{
bool rev_p = false;
enum rtx_code code = GET_CODE (*cmp);
if (!nios2_fpu_compare_enabled (code, mode))
{
code = swap_condition (code);
if (nios2_fpu_compare_enabled (code, mode))
rev_p = true;
else
return false;
}
if (modify_p)
{
if (rev_p)
{
rtx tmp = *op1;
*op1 = *op2;
*op2 = tmp;
}
*op1 = force_reg (mode, *op1);
*op2 = force_reg (mode, *op2);
*cmp = gen_rtx_fmt_ee (code, mode, *op1, *op2);
}
return true;
}
/* Checks and modifies the comparison in *CMP, *OP1, and *OP2 into valid
nios2 supported form. Returns true if success. */
bool
nios2_validate_compare (machine_mode mode, rtx *cmp, rtx *op1, rtx *op2)
{
enum rtx_code code = GET_CODE (*cmp);
enum rtx_code alt_code;
rtx alt_op2;
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
return nios2_validate_fpu_compare (mode, cmp, op1, op2, true);
if (CONST_INT_P (*op2) && *op2 != const0_rtx)
{
/* Create alternate constant compare. */
nios2_alternate_compare_const (code, *op2, &alt_code, &alt_op2, mode);
/* If alterate op2 is zero(0), we can use it directly, possibly
swapping the compare code. */
if (alt_op2 == const0_rtx)
{
code = alt_code;
*op2 = alt_op2;
goto check_rebuild_cmp;
}
/* Check if either constant compare can be used. */
if (nios2_valid_compare_const_p (code, *op2))
return true;
else if (nios2_valid_compare_const_p (alt_code, alt_op2))
{
code = alt_code;
*op2 = alt_op2;
goto rebuild_cmp;
}
/* We have to force op2 into a register now. Try to pick one
with a lower cost. */
if (! nios2_simple_const_p (*op2)
&& nios2_simple_const_p (alt_op2))
{
code = alt_code;
*op2 = alt_op2;
}
*op2 = force_reg (mode, *op2);
}
else if (!reg_or_0_operand (*op2, mode))
*op2 = force_reg (mode, *op2);
check_rebuild_cmp:
if (code == GT || code == GTU || code == LE || code == LEU)
{
rtx t = *op1; *op1 = *op2; *op2 = t;
code = swap_condition (code);
}
rebuild_cmp:
*cmp = gen_rtx_fmt_ee (code, mode, *op1, *op2);
return true;
}
/* Addressing modes and constants. */
/* Symbol references and other 32-bit constants are split into
high/lo_sum pairs during the split1 pass. After that, they are not
considered legitimate addresses.
This function returns true if in a pre-split context where these
constants are allowed. */
static bool
nios2_large_constant_allowed (void)
{
/* The reload_completed check is for the benefit of
nios2_asm_output_mi_thunk and perhaps other places that try to
emulate a post-reload pass. */
return !(cfun->curr_properties & PROP_rtl_split_insns) && !reload_completed;
}
/* Return true if X is constant expression with a reference to an
"ordinary" symbol; not GOT-relative, not GP-relative, not TLS. */
static bool
nios2_symbolic_constant_p (rtx x)
{
rtx base, offset;
if (flag_pic)
return false;
if (GET_CODE (x) == LABEL_REF)
return true;
else if (CONSTANT_P (x))
{
split_const (x, &base, &offset);
return (SYMBOL_REF_P (base)
&& !SYMBOL_REF_TLS_MODEL (base)
&& !gprel_constant_p (base)
&& !r0rel_constant_p (base)
&& SMALL_INT (INTVAL (offset)));
}
return false;
}
/* Return true if X is an expression of the form
(PLUS reg large_constant). */
static bool
nios2_plus_large_constant_p (rtx x)
{
return (GET_CODE (x) == PLUS
&& REG_P (XEXP (x, 0))
&& nios2_large_constant_p (XEXP (x, 1)));
}
/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
static bool
nios2_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
rtx base, offset;
split_const (x, &base, &offset);
return GET_CODE (base) != SYMBOL_REF || !SYMBOL_REF_TLS_MODEL (base);
}
/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
static bool
nios2_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
return nios2_legitimate_constant_p (mode, x) == false;
}
/* Return true if register REGNO is a valid base register.
STRICT_P is true if REG_OK_STRICT is in effect. */
bool
nios2_regno_ok_for_base_p (int regno, bool strict_p)
{
if (!HARD_REGISTER_NUM_P (regno))
{
if (!strict_p)
return true;
if (!reg_renumber)
return false;
regno = reg_renumber[regno];
}
/* The fake registers will be eliminated to either the stack or
hard frame pointer, both of which are usually valid base registers.
Reload deals with the cases where the eliminated form isn't valid. */
return (GP_REG_P (regno)
|| regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM);
}
/* Return true if OFFSET is permitted in a load/store address expression.
Normally any 16-bit value is permitted, but on R2 if we may be emitting
the IO forms of these instructions we must restrict the offset to fit
in a 12-bit field instead. */
static bool
nios2_valid_addr_offset_p (rtx offset)
{
return (CONST_INT_P (offset)
&& ((TARGET_ARCH_R2 && (TARGET_BYPASS_CACHE
|| TARGET_BYPASS_CACHE_VOLATILE))
? SMALL_INT12 (INTVAL (offset))
: SMALL_INT (INTVAL (offset))));
}
/* Return true if the address expression formed by BASE + OFFSET is
valid. */
static bool
nios2_valid_addr_expr_p (rtx base, rtx offset, bool strict_p)
{
if (!strict_p && GET_CODE (base) == SUBREG)
base = SUBREG_REG (base);
return (REG_P (base)
&& nios2_regno_ok_for_base_p (REGNO (base), strict_p)
&& (offset == NULL_RTX
|| nios2_valid_addr_offset_p (offset)
|| (nios2_large_constant_allowed ()
&& nios2_symbolic_constant_p (offset))
|| nios2_unspec_reloc_p (offset)));
}
/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
static bool
nios2_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
rtx operand, bool strict_p)
{
switch (GET_CODE (operand))
{
/* Direct. */
case SYMBOL_REF:
if (SYMBOL_REF_TLS_MODEL (operand))
return false;
/* Else, fall through. */
case CONST:
if (gprel_constant_p (operand) || r0rel_constant_p (operand))
return true;
/* Else, fall through. */
case LABEL_REF:
if (nios2_large_constant_allowed ()
&& nios2_symbolic_constant_p (operand))
return true;
return false;
case CONST_INT:
if (r0rel_constant_p (operand))
return true;
return nios2_large_constant_allowed ();
case CONST_DOUBLE:
return false;
/* Register indirect. */
case REG:
return nios2_regno_ok_for_base_p (REGNO (operand), strict_p);
/* Register indirect with displacement. */
case PLUS:
{
rtx op0 = XEXP (operand, 0);
rtx op1 = XEXP (operand, 1);
if (nios2_valid_addr_expr_p (op0, op1, strict_p)
|| nios2_valid_addr_expr_p (op1, op0, strict_p))
return true;
}
break;
/* %lo(constant)(reg)
This requires a 16-bit relocation and isn't valid with R2
io-variant load/stores. */
case LO_SUM:
if (TARGET_ARCH_R2
&& (TARGET_BYPASS_CACHE || TARGET_BYPASS_CACHE_VOLATILE))
return false;
else
{
rtx op0 = XEXP (operand, 0);
rtx op1 = XEXP (operand, 1);
return (REG_P (op0)
&& nios2_regno_ok_for_base_p (REGNO (op0), strict_p)
&& nios2_large_constant_p (op1));
}
default:
break;
}
return false;
}
/* Implement TARGET_ADDRESS_COST.
Experimentation has shown that we get better code by penalizing the
the (plus reg symbolic_constant) and (plus reg (const ...)) forms
but giving (plus reg symbol_ref) address modes the same cost as those
that don't require splitting. Also, from a theoretical point of view:
- This is in line with the recommendation in the GCC internals
documentation to make address forms involving multiple
registers more expensive than single-register forms.
- OTOH it still encourages fwprop1 to propagate constants into
address expressions more aggressively.
- We should discourage splitting (symbol + offset) into hi/lo pairs
to allow CSE'ing the symbol when it's used with more than one offset,
but not so heavily as to avoid this addressing mode at all. */
static int
nios2_address_cost (rtx address,
machine_mode mode ATTRIBUTE_UNUSED,
addr_space_t as ATTRIBUTE_UNUSED,
bool speed ATTRIBUTE_UNUSED)
{
if (nios2_plus_large_constant_p (address))
return COSTS_N_INSNS (1);
if (nios2_large_constant_p (address))
{
if (GET_CODE (address) == CONST)
return COSTS_N_INSNS (1);
else
return COSTS_N_INSNS (0);
}
return COSTS_N_INSNS (0);
}
/* Return true if X is a MEM whose address expression involves a large (32-bit)
constant. */
bool
nios2_large_constant_memory_operand_p (rtx x)
{
rtx addr;
if (GET_CODE (x) != MEM)
return false;
addr = XEXP (x, 0);
return (nios2_large_constant_p (addr)
|| nios2_plus_large_constant_p (addr));
}
/* Return true if X is something that needs to be split into a
high/lo_sum pair. */
bool
nios2_large_constant_p (rtx x)
{
return (nios2_symbolic_constant_p (x)
|| nios2_large_unspec_reloc_p (x)
|| (CONST_INT_P (x) && !SMALL_INT (INTVAL (x))));
}
/* Given an RTX X that satisfies nios2_large_constant_p, split it into
high and lo_sum parts using TEMP as a scratch register. Emit the high
instruction and return the lo_sum expression.
Also handle special cases involving constant integers. */
rtx
nios2_split_large_constant (rtx x, rtx temp)
{
if (CONST_INT_P (x))
{
HOST_WIDE_INT val = INTVAL (x);
if (SMALL_INT (val))
return x;
else if (SMALL_INT_UNSIGNED (val) || UPPER16_INT (val))
{
emit_move_insn (temp, x);
return temp;
}
else
{
HOST_WIDE_INT high = (val + 0x8000) & ~0xffff;
HOST_WIDE_INT low = val - high;
emit_move_insn (temp, gen_int_mode (high, Pmode));
return gen_rtx_PLUS (Pmode, temp, gen_int_mode (low, Pmode));
}
}
emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (Pmode, copy_rtx (x))));
return gen_rtx_LO_SUM (Pmode, temp, copy_rtx (x));
}
/* Split an RTX of the form
(plus op0 op1)
where op1 is a large constant into
(set temp (high op1))
(set temp (plus op0 temp))
(lo_sum temp op1)
returning the lo_sum expression as the value. */
static rtx
nios2_split_plus_large_constant (rtx op0, rtx op1)
{
rtx temp = gen_reg_rtx (Pmode);
op0 = force_reg (Pmode, op0);
emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (Pmode, copy_rtx (op1))));
emit_insn (gen_rtx_SET (temp, gen_rtx_PLUS (Pmode, op0, temp)));
return gen_rtx_LO_SUM (Pmode, temp, copy_rtx (op1));
}
/* Given a MEM OP with an address that includes a splittable symbol or
other large constant, emit some instructions to do the split and
return a new MEM. */
rtx
nios2_split_large_constant_memory_operand (rtx op)
{
rtx addr = XEXP (op, 0);
if (nios2_large_constant_p (addr))
addr = nios2_split_large_constant (addr, gen_reg_rtx (Pmode));
else if (nios2_plus_large_constant_p (addr))
addr = nios2_split_plus_large_constant (XEXP (addr, 0), XEXP (addr, 1));
else
gcc_unreachable ();
return replace_equiv_address (op, addr, false);
}
/* Return true if SECTION is a small section name. */
static bool
nios2_small_section_name_p (const char *section)
{
return (strcmp (section, ".sbss") == 0
|| startswith (section, ".sbss.")
|| strcmp (section, ".sdata") == 0
|| startswith (section, ".sdata.")
|| (nios2_gprel_sec
&& regexec (&nios2_gprel_sec_regex, section, 0, NULL, 0) == 0));
}
/* Return true if SECTION is a r0-relative section name. */
static bool
nios2_r0rel_section_name_p (const char *section)
{
return (nios2_r0rel_sec
&& regexec (&nios2_r0rel_sec_regex, section, 0, NULL, 0) == 0);
}
/* Return true if EXP should be placed in the small data section. */
static bool
nios2_in_small_data_p (const_tree exp)
{
/* We want to merge strings, so we never consider them small data. */
if (TREE_CODE (exp) == STRING_CST)
return false;
if (TREE_CODE (exp) == VAR_DECL)
{
if (DECL_SECTION_NAME (exp))
{
const char *section = DECL_SECTION_NAME (exp);
if (nios2_small_section_name_p (section))
return true;
}
else if (flexible_array_type_p (TREE_TYPE (exp))
&& (!TREE_PUBLIC (exp) || DECL_EXTERNAL (exp)))
{
/* We really should not consider any objects of any flexibly-sized
type to be small data, but pre-GCC 10 did not test
for this and just fell through to the next case. Thus older
code compiled with -mgpopt=global could contain GP-relative
accesses to objects defined in this compilation unit with
external linkage. We retain the possible small-data treatment
of such definitions for backward ABI compatibility, but
no longer generate GP-relative accesses for external
references (so that the ABI could be changed in the future
with less potential impact), or objects with internal
linkage. */
return false;
}
else
{
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
/* If this is an incomplete type with size 0, then we can't put it
in sdata because it might be too big when completed. */
if (size > 0
&& (unsigned HOST_WIDE_INT) size <= nios2_section_threshold)
return true;
}
}
return false;
}
/* Return true if symbol is in small data section. */
static bool
nios2_symbol_ref_in_small_data_p (rtx sym)
{
tree decl;
gcc_assert (GET_CODE (sym) == SYMBOL_REF);
decl = SYMBOL_REF_DECL (sym);
/* TLS variables are not accessed through the GP. */
if (SYMBOL_REF_TLS_MODEL (sym) != 0)
return false;
/* On Nios II R2, there is no GP-relative relocation that can be
used with "io" instructions. So, if we are implicitly generating
those instructions, we cannot emit GP-relative accesses. */
if (TARGET_ARCH_R2
&& (TARGET_BYPASS_CACHE || TARGET_BYPASS_CACHE_VOLATILE))
return false;
/* If the user has explicitly placed the symbol in a small data section
via an attribute, generate gp-relative addressing even if the symbol
is external, weak, or larger than we'd automatically put in the
small data section. OTOH, if the symbol is located in some
non-small-data section, we can't use gp-relative accesses on it
unless the user has requested gpopt_data or gpopt_all. */
switch (nios2_gpopt_option)
{
case gpopt_none:
/* Don't generate a gp-relative addressing mode if that's been
disabled. */
return false;
case gpopt_local:
/* Use GP-relative addressing for small data symbols that are
not external or weak or uninitialized common, plus any symbols
that have explicitly been placed in a small data section. */
if (decl && DECL_SECTION_NAME (decl))
return nios2_small_section_name_p (DECL_SECTION_NAME (decl));
return (SYMBOL_REF_SMALL_P (sym)
&& !SYMBOL_REF_EXTERNAL_P (sym)
&& !(decl && DECL_WEAK (decl))
&& !(decl && DECL_COMMON (decl)
&& (DECL_INITIAL (decl) == NULL
|| (!in_lto_p
&& DECL_INITIAL (decl) == error_mark_node))));
case gpopt_global:
/* Use GP-relative addressing for small data symbols, even if
they are external or weak. Note that SYMBOL_REF_SMALL_P
is also true of symbols that have explicitly been placed
in a small data section. */
return SYMBOL_REF_SMALL_P (sym);
case gpopt_data:
/* Use GP-relative addressing for all data symbols regardless
of the object size, but not for code symbols. This option
is equivalent to the user asserting that the entire data
section is accessible from the GP. */
return !SYMBOL_REF_FUNCTION_P (sym);
case gpopt_all:
/* Use GP-relative addressing for everything, including code.
Effectively, the user has asserted that the entire program
fits within the 64K range of the GP offset. */
return true;
default:
/* We shouldn't get here. */
return false;
}
}
/* Likewise for r0-relative addressing. */
static bool
nios2_symbol_ref_in_r0rel_data_p (rtx sym)
{
tree decl;
gcc_assert (GET_CODE (sym) == SYMBOL_REF);
decl = SYMBOL_REF_DECL (sym);
/* TLS variables are not accessed through r0. */
if (SYMBOL_REF_TLS_MODEL (sym) != 0)
return false;
/* On Nios II R2, there is no r0-relative relocation that can be
used with "io" instructions. So, if we are implicitly generating
those instructions, we cannot emit r0-relative accesses. */
if (TARGET_ARCH_R2
&& (TARGET_BYPASS_CACHE || TARGET_BYPASS_CACHE_VOLATILE))
return false;
/* If the user has explicitly placed the symbol in a r0rel section
via an attribute, generate r0-relative addressing. */
if (decl && DECL_SECTION_NAME (decl))
return nios2_r0rel_section_name_p (DECL_SECTION_NAME (decl));
return false;
}
/* Implement TARGET_SECTION_TYPE_FLAGS. */
static unsigned int
nios2_section_type_flags (tree decl, const char *name, int reloc)
{
unsigned int flags;
flags = default_section_type_flags (decl, name, reloc);
if (nios2_small_section_name_p (name))
flags |= SECTION_SMALL;
return flags;
}
/* Return true if SYMBOL_REF X binds locally. */
static bool
nios2_symbol_binds_local_p (const_rtx x)
{
return (SYMBOL_REF_DECL (x)
? targetm.binds_local_p (SYMBOL_REF_DECL (x))
: SYMBOL_REF_LOCAL_P (x));
}
/* Position independent code related. */
/* Emit code to load the PIC register. */
static void
nios2_load_pic_register (void)
{
rtx tmp = gen_rtx_REG (Pmode, TEMP_REG_NUM);
emit_insn (gen_load_got_register (pic_offset_table_rtx, tmp));
emit_insn (gen_add3_insn (pic_offset_table_rtx, pic_offset_table_rtx, tmp));
}
/* Generate a PIC address as a MEM rtx. */
static rtx
nios2_load_pic_address (rtx sym, int unspec, rtx tmp)
{
if (flag_pic == 2
&& GET_CODE (sym) == SYMBOL_REF
&& nios2_symbol_binds_local_p (sym))
/* Under -fPIC, generate a GOTOFF address for local symbols. */
{
rtx offset = nios2_unspec_offset (sym, UNSPEC_PIC_GOTOFF_SYM);
crtl->uses_pic_offset_table = 1;
return nios2_large_got_address (offset, tmp);
}
if (unspec == UNSPEC_PIC_CALL_SYM)
return gen_rtx_MEM (Pmode, nios2_got_address (sym, unspec));
else
return gen_const_mem (Pmode, nios2_got_address (sym, unspec));
}
/* Nonzero if the constant value X is a legitimate general operand
when generating PIC code. It is given that flag_pic is on and
that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
bool
nios2_legitimate_pic_operand_p (rtx x)
{
if (nios2_large_unspec_reloc_p (x))
return true;
return ! (GET_CODE (x) == SYMBOL_REF
|| GET_CODE (x) == LABEL_REF || GET_CODE (x) == CONST);
}
/* Return TRUE if X is a thread-local symbol. */
static bool
nios2_tls_symbol_p (rtx x)
{
return (targetm.have_tls && GET_CODE (x) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x) != 0);
}
/* Legitimize addresses that are CONSTANT_P expressions. */
static rtx
nios2_legitimize_constant_address (rtx addr)
{
rtx base, offset;
split_const (addr, &base, &offset);
if (nios2_tls_symbol_p (base))
base = nios2_legitimize_tls_address (base);
else if (flag_pic)
base = nios2_load_pic_address (base, UNSPEC_PIC_SYM, NULL_RTX);
else if (!nios2_large_constant_allowed ()
&& nios2_symbolic_constant_p (addr))
return nios2_split_large_constant (addr, gen_reg_rtx (Pmode));
else if (CONST_INT_P (addr))
{
HOST_WIDE_INT val = INTVAL (addr);
if (SMALL_INT (val))
/* Use r0-relative addressing. */
return addr;
else if (!nios2_large_constant_allowed ())
/* Split into high/lo pair. */
return nios2_split_large_constant (addr, gen_reg_rtx (Pmode));
}
else
return addr;
if (offset != const0_rtx)
{
gcc_assert (can_create_pseudo_p ());
return gen_rtx_PLUS (Pmode, force_reg (Pmode, base),
(CONST_INT_P (offset)
? (SMALL_INT (INTVAL (offset))
? offset : force_reg (Pmode, offset))
: offset));
}
return base;
}
/* Implement TARGET_LEGITIMIZE_ADDRESS. */
static rtx
nios2_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
machine_mode mode ATTRIBUTE_UNUSED)
{
rtx op0, op1;
if (CONSTANT_P (x))
return nios2_legitimize_constant_address (x);
/* Remaining cases all involve something + a constant. */
if (GET_CODE (x) != PLUS)
return x;
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
/* Target-independent code turns (exp + constant) into plain
register indirect. Although subsequent optimization passes will
eventually sort that out, ivopts uses the unoptimized form for
computing its cost model, so we get better results by generating
the correct form from the start. */
if (nios2_valid_addr_offset_p (op1))
return gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), copy_rtx (op1));
/* We may need to split symbolic constants now. */
else if (nios2_symbolic_constant_p (op1))
{
if (nios2_large_constant_allowed ())
return gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), copy_rtx (op1));
else
return nios2_split_plus_large_constant (op0, op1);
}
/* For the TLS LE (Local Exec) model, the compiler may try to
combine constant offsets with unspec relocs, creating address RTXs
looking like this:
(plus:SI (reg:SI 23 r23)
(const:SI
(plus:SI
(unspec:SI [(symbol_ref:SI ("var"))] UNSPEC_ADD_TLS_LE)
(const_int 48 [0x30]))))
This usually happens when 'var' is a thread-local struct variable,
and access of a field in var causes the addend.
We typically want this combining, so transform the above into this
form, which is allowed:
(plus:SI (reg:SI 23 r23)
(const:SI
(unspec:SI
[(const:SI
(plus:SI (symbol_ref:SI ("var"))
(const_int 48 [0x30])))] UNSPEC_ADD_TLS_LE)))
Which will be output as '%tls_le(var+48)(r23)' in assembly. */
else if (GET_CODE (op1) == CONST)
{
rtx unspec, offset;
split_const (op1, &unspec, &offset);
if (GET_CODE (unspec) == UNSPEC
&& !nios2_large_offset_p (XINT (unspec, 1))
&& offset != const0_rtx)
{
rtx reg = force_reg (Pmode, op0);
unspec = copy_rtx (unspec);
XVECEXP (unspec, 0, 0)
= plus_constant (Pmode, XVECEXP (unspec, 0, 0), INTVAL (offset));
return gen_rtx_PLUS (Pmode, reg, gen_rtx_CONST (Pmode, unspec));
}
}
return x;
}
static rtx
nios2_delegitimize_address (rtx x)
{
x = delegitimize_mem_from_attrs (x);
if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
{
switch (XINT (XEXP (x, 0), 1))
{
case UNSPEC_PIC_SYM:
case UNSPEC_PIC_CALL_SYM:
case UNSPEC_PIC_GOTOFF_SYM:
case UNSPEC_ADD_TLS_GD:
case UNSPEC_ADD_TLS_LDM:
case UNSPEC_LOAD_TLS_IE:
case UNSPEC_ADD_TLS_LE:
x = XVECEXP (XEXP (x, 0), 0, 0);
gcc_assert (CONSTANT_P (x));
break;
}
}
return x;
}
/* Main expander function for RTL moves. */
bool
nios2_emit_move_sequence (rtx *operands, machine_mode mode)
{
rtx to = operands[0];
rtx from = operands[1];
if (!register_operand (to, mode) && !reg_or_0_operand (from, mode))
{
gcc_assert (can_create_pseudo_p ());
from = copy_to_mode_reg (mode, from);
}
if (CONSTANT_P (from))
{
if (CONST_INT_P (from))
{
if (!SMALL_INT (INTVAL (from))
&& !SMALL_INT_UNSIGNED (INTVAL (from))
&& !UPPER16_INT (INTVAL (from)))
{
HOST_WIDE_INT high = (INTVAL (from) + 0x8000) & ~0xffff;
HOST_WIDE_INT low = INTVAL (from) & 0xffff;
emit_move_insn (to, gen_int_mode (high, SImode));
emit_insn (gen_add2_insn (to, gen_int_mode (low, HImode)));
set_unique_reg_note (get_last_insn (), REG_EQUAL,
copy_rtx (from));
return true;
}
}
else if (gprel_constant_p (from) || r0rel_constant_p (from))
/* Handled directly by movsi_internal as gp + offset
or r0 + offset. */
;
else if (nios2_large_constant_p (from))
/* This case covers either a regular symbol reference or an UNSPEC
representing a 32-bit offset. We split the former
only conditionally and the latter always. */
{
if (!nios2_large_constant_allowed ()
|| nios2_large_unspec_reloc_p (from))
{
rtx lo = nios2_split_large_constant (from, to);
emit_insn (gen_rtx_SET (to, lo));
set_unique_reg_note (get_last_insn (), REG_EQUAL,
copy_rtx (operands[1]));
return true;
}
}
else
/* This is a TLS or PIC symbol. */
{
from = nios2_legitimize_constant_address (from);
if (CONSTANT_P (from))
{
emit_insn (gen_rtx_SET (to,
gen_rtx_HIGH (Pmode, copy_rtx (from))));
emit_insn (gen_rtx_SET (to, gen_rtx_LO_SUM (Pmode, to, from)));
set_unique_reg_note (get_last_insn (), REG_EQUAL,
copy_rtx (operands[1]));
return true;
}
}
}
operands[0] = to;
operands[1] = from;
return false;
}
/* The function with address *ADDR is being called. If the address
needs to be loaded from the GOT, emit the instruction to do so and
update *ADDR to point to the rtx for the loaded value.
If REG != NULL_RTX, it is used as the target/scratch register in the
GOT address calculation. */
void
nios2_adjust_call_address (rtx *call_op, rtx reg)
{
if (MEM_P (*call_op))
call_op = &XEXP (*call_op, 0);
rtx addr = *call_op;
if (flag_pic && CONSTANT_P (addr))
{
rtx tmp = reg ? reg : NULL_RTX;
if (!reg)
reg = gen_reg_rtx (Pmode);
addr = nios2_load_pic_address (addr, UNSPEC_PIC_CALL_SYM, tmp);
emit_insn (gen_rtx_SET (reg, addr));
*call_op = reg;
}
}
/* Output assembly language related definitions. */
/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
static bool
nios2_print_operand_punct_valid_p (unsigned char code)
{
return (code == '.' || code == '!');
}
/* Print the operand OP to file stream FILE modified by LETTER.
LETTER can be one of:
i: print i/hi/ui suffixes (used for mov instruction variants),
when OP is the appropriate immediate operand.
u: like 'i', except without "ui" suffix case (used for cmpgeu/cmpltu)
o: print "io" if OP needs volatile access (due to TARGET_BYPASS_CACHE
or TARGET_BYPASS_CACHE_VOLATILE).
x: print i/hi/ci/chi suffixes for the and instruction,
when OP is the appropriate immediate operand.
z: prints the third register immediate operand in assembly
instructions. Outputs const0_rtx as the 'zero' register
instead of '0'.
y: same as 'z', but for specifically for logical instructions,
where the processing for immediates are slightly different.
H: for %hiadj
L: for %lo
D: for the upper 32-bits of a 64-bit double value
R: prints reverse condition.
A: prints (reg) operand for ld[s]ex and st[s]ex.
.: print .n suffix for 16-bit instructions.
!: print r.n suffix for 16-bit instructions. Used for jmpr.n.
*/
static void
nios2_print_operand (FILE *file, rtx op, int letter)
{
/* First take care of the format letters that just insert a string
into the output stream. */
switch (letter)
{
case '.':
if (current_output_insn && get_attr_length (current_output_insn) == 2)
fprintf (file, ".n");
return;
case '!':
if (current_output_insn && get_attr_length (current_output_insn) == 2)
fprintf (file, "r.n");
return;
case 'x':
if (CONST_INT_P (op))
{
HOST_WIDE_INT val = INTVAL (op);
HOST_WIDE_INT low = val & 0xffff;
HOST_WIDE_INT high = (val >> 16) & 0xffff;
if (val != 0)
{
if (high != 0)
{
if (low != 0)
{
gcc_assert (TARGET_ARCH_R2);
if (high == 0xffff)
fprintf (file, "c");
else if (low == 0xffff)
fprintf (file, "ch");
else
gcc_unreachable ();
}
else
fprintf (file, "h");
}
fprintf (file, "i");
}
}
return;
case 'u':
case 'i':
if (CONST_INT_P (op))
{
HOST_WIDE_INT val = INTVAL (op);
HOST_WIDE_INT low = val & 0xffff;
HOST_WIDE_INT high = (val >> 16) & 0xffff;
if (val != 0)
{
if (low == 0 && high != 0)
fprintf (file, "h");
else if (high == 0 && (low & 0x8000) != 0 && letter != 'u')
fprintf (file, "u");
}
}
if (CONSTANT_P (op) && op != const0_rtx)
fprintf (file, "i");
return;
case 'o':
if (GET_CODE (op) == MEM
&& ((MEM_VOLATILE_P (op) && TARGET_BYPASS_CACHE_VOLATILE)
|| TARGET_BYPASS_CACHE))
{
gcc_assert (current_output_insn
&& get_attr_length (current_output_insn) == 4);
fprintf (file, "io");
}
return;
default:
break;
}
/* Handle comparison operator names. */
if (comparison_operator (op, VOIDmode))
{
enum rtx_code cond = GET_CODE (op);
if (letter == 0)
{
fprintf (file, "%s", GET_RTX_NAME (cond));
return;
}
if (letter == 'R')
{
fprintf (file, "%s", GET_RTX_NAME (reverse_condition (cond)));
return;
}
}
/* Now handle the cases where we actually need to format an operand. */
switch (GET_CODE (op))
{
case REG:
if (letter == 0 || letter == 'z' || letter == 'y')
{
fprintf (file, "%s", reg_names[REGNO (op)]);
return;
}
else if (letter == 'D')
{
fprintf (file, "%s", reg_names[REGNO (op)+1]);
return;
}
break;
case CONST_INT:
{
rtx int_rtx = op;
HOST_WIDE_INT val = INTVAL (int_rtx);
HOST_WIDE_INT low = val & 0xffff;
HOST_WIDE_INT high = (val >> 16) & 0xffff;
if (letter == 'y')
{
if (val == 0)
fprintf (file, "zero");
else
{
if (high != 0)
{
if (low != 0)
{
gcc_assert (TARGET_ARCH_R2);
if (high == 0xffff)
/* andci. */
int_rtx = gen_int_mode (low, SImode);
else if (low == 0xffff)
/* andchi. */
int_rtx = gen_int_mode (high, SImode);
else
gcc_unreachable ();
}
else
/* andhi. */
int_rtx = gen_int_mode (high, SImode);
}
else
/* andi. */
int_rtx = gen_int_mode (low, SImode);
output_addr_const (file, int_rtx);
}
return;
}
else if (letter == 'z')
{
if (val == 0)
fprintf (file, "zero");
else
{
if (low == 0 && high != 0)
int_rtx = gen_int_mode (high, SImode);
else if (low != 0)
{
gcc_assert (high == 0 || high == 0xffff);
int_rtx = gen_int_mode (low, high == 0 ? SImode : HImode);
}
else
gcc_unreachable ();
output_addr_const (file, int_rtx);
}
return;
}
}
/* Else, fall through. */
case CONST:
case LABEL_REF:
case SYMBOL_REF:
case CONST_DOUBLE:
if (letter == 0 || letter == 'z')
{
output_addr_const (file, op);
return;
}
else if (letter == 'H' || letter == 'L')
{
fprintf (file, "%%");
if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == UNSPEC)
{
rtx unspec = XEXP (op, 0);
int unspec_reloc = XINT (unspec, 1);
gcc_assert (nios2_large_offset_p (unspec_reloc));
fprintf (file, "%s_", nios2_unspec_reloc_name (unspec_reloc));
op = XVECEXP (unspec, 0, 0);
}
fprintf (file, letter == 'H' ? "hiadj(" : "lo(");
output_addr_const (file, op);
fprintf (file, ")");
return;
}
break;
case SUBREG:
case MEM:
if (letter == 'A')
{
/* Address of '(reg)' form, with no index. */
fprintf (file, "(%s)", reg_names[REGNO (XEXP (op, 0))]);
return;
}
if (letter == 0)
{
output_address (VOIDmode, op);
return;
}
break;
case CODE_LABEL:
if (letter == 0)
{
output_addr_const (file, op);
return;
}
break;
default:
break;
}
debug_rtx (op);
output_operand_lossage ("Unsupported operand for code '%c'", letter);
gcc_unreachable ();
}
/* Return true if this is a GP-relative accessible reference. */
bool
gprel_constant_p (rtx op)
{
if (GET_CODE (op) == SYMBOL_REF
&& nios2_symbol_ref_in_small_data_p (op))
return true;
else if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS)
return gprel_constant_p (XEXP (XEXP (op, 0), 0));
return false;
}
/* Likewise if this is a zero-relative accessible reference. */
bool
r0rel_constant_p (rtx op)
{
if (GET_CODE (op) == SYMBOL_REF