blob: 486fe4717ef5a6be256c1c694c421bdad7fbc0fa [file] [log] [blame]
/* Simulator for Analog Devices Blackfin processors.
Copyright (C) 2005-2021 Free Software Foundation, Inc.
Contributed by Analog Devices, Inc.
This file is part of simulators.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* This must come before any other includes. */
#include "defs.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include "ansidecl.h"
#include "opcode/bfin.h"
#include "sim-main.h"
#include "dv-bfin_cec.h"
#include "dv-bfin_mmu.h"
#define HOST_LONG_WORD_SIZE (sizeof (long) * 8)
#define SIGNEXTEND(v, n) \
(((bs32)(v) << (HOST_LONG_WORD_SIZE - (n))) >> (HOST_LONG_WORD_SIZE - (n)))
static ATTRIBUTE_NORETURN void
illegal_instruction (SIM_CPU *cpu)
{
TRACE_INSN (cpu, "ILLEGAL INSTRUCTION");
while (1)
cec_exception (cpu, VEC_UNDEF_I);
}
static ATTRIBUTE_NORETURN void
illegal_instruction_combination (SIM_CPU *cpu)
{
TRACE_INSN (cpu, "ILLEGAL INSTRUCTION COMBINATION");
while (1)
cec_exception (cpu, VEC_ILGAL_I);
}
static ATTRIBUTE_NORETURN void
illegal_instruction_or_combination (SIM_CPU *cpu)
{
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
else
illegal_instruction (cpu);
}
static ATTRIBUTE_NORETURN void
unhandled_instruction (SIM_CPU *cpu, const char *insn)
{
SIM_DESC sd = CPU_STATE (cpu);
bu16 iw0, iw1;
bu32 iw2;
TRACE_EVENTS (cpu, "unhandled instruction");
iw0 = IFETCH (PCREG);
iw1 = IFETCH (PCREG + 2);
iw2 = ((bu32)iw0 << 16) | iw1;
sim_io_eprintf (sd, "Unhandled instruction at 0x%08x (%s opcode 0x", PCREG, insn);
if ((iw0 & 0xc000) == 0xc000)
sim_io_eprintf (sd, "%08x", iw2);
else
sim_io_eprintf (sd, "%04x", iw0);
sim_io_eprintf (sd, ") ... aborting\n");
illegal_instruction (cpu);
}
static const char * const astat_names[] =
{
[ 0] = "AZ",
[ 1] = "AN",
[ 2] = "AC0_COPY",
[ 3] = "V_COPY",
[ 4] = "ASTAT_4",
[ 5] = "CC",
[ 6] = "AQ",
[ 7] = "ASTAT_7",
[ 8] = "RND_MOD",
[ 9] = "ASTAT_9",
[10] = "ASTAT_10",
[11] = "ASTAT_11",
[12] = "AC0",
[13] = "AC1",
[14] = "ASTAT_14",
[15] = "ASTAT_15",
[16] = "AV0",
[17] = "AV0S",
[18] = "AV1",
[19] = "AV1S",
[20] = "ASTAT_20",
[21] = "ASTAT_21",
[22] = "ASTAT_22",
[23] = "ASTAT_23",
[24] = "V",
[25] = "VS",
[26] = "ASTAT_26",
[27] = "ASTAT_27",
[28] = "ASTAT_28",
[29] = "ASTAT_29",
[30] = "ASTAT_30",
[31] = "ASTAT_31",
};
typedef enum
{
c_0, c_1, c_4, c_2, c_uimm2, c_uimm3, c_imm3, c_pcrel4,
c_imm4, c_uimm4s4, c_uimm4s4d, c_uimm4, c_uimm4s2, c_negimm5s4, c_imm5,
c_imm5d, c_uimm5, c_imm6, c_imm7, c_imm7d, c_imm8, c_uimm8, c_pcrel8,
c_uimm8s4, c_pcrel8s4, c_lppcrel10, c_pcrel10, c_pcrel12, c_imm16s4,
c_luimm16, c_imm16, c_imm16d, c_huimm16, c_rimm16, c_imm16s2, c_uimm16s4,
c_uimm16s4d, c_uimm16, c_pcrel24, c_uimm32, c_imm32, c_huimm32, c_huimm32e,
} const_forms_t;
static const struct
{
const char *name;
const int nbits;
const char reloc;
const char issigned;
const char pcrel;
const char scale;
const char offset;
const char negative;
const char positive;
const char decimal;
const char leading;
const char exact;
} constant_formats[] =
{
{ "0", 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "1", 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "4", 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "2", 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "uimm2", 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "uimm3", 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm3", 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "pcrel4", 4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0},
{ "imm4", 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "uimm4s4", 4, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0},
{ "uimm4s4d", 4, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0},
{ "uimm4", 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "uimm4s2", 4, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0},
{ "negimm5s4", 5, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0},
{ "imm5", 5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm5d", 5, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0},
{ "uimm5", 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm6", 6, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm7", 7, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm7d", 7, 0, 1, 0, 0, 0, 0, 0, 1, 3, 0},
{ "imm8", 8, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "uimm8", 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "pcrel8", 8, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0},
{ "uimm8s4", 8, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0},
{ "pcrel8s4", 8, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
{ "lppcrel10", 10, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0},
{ "pcrel10", 10, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0},
{ "pcrel12", 12, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0},
{ "imm16s4", 16, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0},
{ "luimm16", 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm16", 16, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm16d", 16, 0, 1, 0, 0, 0, 0, 0, 1, 3, 0},
{ "huimm16", 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "rimm16", 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm16s2", 16, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0},
{ "uimm16s4", 16, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0},
{ "uimm16s4d", 16, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0},
{ "uimm16", 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "pcrel24", 24, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0},
{ "uimm32", 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "imm32", 32, 0, 1, 0, 0, 0, 0, 0, 1, 3, 0},
{ "huimm32", 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ "huimm32e", 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1},
};
static const char *
fmtconst_str (const_forms_t cf, bs32 x, bu32 pc)
{
static char buf[60];
if (constant_formats[cf].reloc)
{
bu32 ea = (((constant_formats[cf].pcrel ? SIGNEXTEND (x, constant_formats[cf].nbits)
: x) + constant_formats[cf].offset) << constant_formats[cf].scale);
if (constant_formats[cf].pcrel)
ea += pc;
/*if (outf->symbol_at_address_func (ea, outf) || !constant_formats[cf].exact)
{
outf->print_address_func (ea, outf);
return "";
}
else*/
{
sprintf (buf, "%#x", x);
return buf;
}
}
/* Negative constants have an implied sign bit. */
if (constant_formats[cf].negative)
{
int nb = constant_formats[cf].nbits + 1;
x = x | (1 << constant_formats[cf].nbits);
x = SIGNEXTEND (x, nb);
}
else
x = constant_formats[cf].issigned ? SIGNEXTEND (x, constant_formats[cf].nbits) : x;
if (constant_formats[cf].offset)
x += constant_formats[cf].offset;
if (constant_formats[cf].scale)
x <<= constant_formats[cf].scale;
if (constant_formats[cf].decimal)
sprintf (buf, "%*i", constant_formats[cf].leading, x);
else
{
if (constant_formats[cf].issigned && x < 0)
sprintf (buf, "-0x%x", abs (x));
else
sprintf (buf, "0x%x", x);
}
return buf;
}
static bu32
fmtconst_val (const_forms_t cf, bu32 x, bu32 pc)
{
if (0 && constant_formats[cf].reloc)
{
bu32 ea = (((constant_formats[cf].pcrel
? (bu32)SIGNEXTEND (x, constant_formats[cf].nbits)
: x) + constant_formats[cf].offset)
<< constant_formats[cf].scale);
if (constant_formats[cf].pcrel)
ea += pc;
return ea;
}
/* Negative constants have an implied sign bit. */
if (constant_formats[cf].negative)
{
int nb = constant_formats[cf].nbits + 1;
x = x | (1 << constant_formats[cf].nbits);
x = SIGNEXTEND (x, nb);
}
else if (constant_formats[cf].issigned)
x = SIGNEXTEND (x, constant_formats[cf].nbits);
x += constant_formats[cf].offset;
x <<= constant_formats[cf].scale;
return x;
}
#define uimm16s4(x) fmtconst_val (c_uimm16s4, x, 0)
#define uimm16s4_str(x) fmtconst_str (c_uimm16s4, x, 0)
#define uimm16s4d(x) fmtconst_val (c_uimm16s4d, x, 0)
#define pcrel4(x) fmtconst_val (c_pcrel4, x, pc)
#define pcrel8(x) fmtconst_val (c_pcrel8, x, pc)
#define pcrel8s4(x) fmtconst_val (c_pcrel8s4, x, pc)
#define pcrel10(x) fmtconst_val (c_pcrel10, x, pc)
#define pcrel12(x) fmtconst_val (c_pcrel12, x, pc)
#define negimm5s4(x) fmtconst_val (c_negimm5s4, x, 0)
#define negimm5s4_str(x) fmtconst_str (c_negimm5s4, x, 0)
#define rimm16(x) fmtconst_val (c_rimm16, x, 0)
#define huimm16(x) fmtconst_val (c_huimm16, x, 0)
#define imm16(x) fmtconst_val (c_imm16, x, 0)
#define imm16_str(x) fmtconst_str (c_imm16, x, 0)
#define imm16d(x) fmtconst_val (c_imm16d, x, 0)
#define uimm2(x) fmtconst_val (c_uimm2, x, 0)
#define uimm3(x) fmtconst_val (c_uimm3, x, 0)
#define uimm3_str(x) fmtconst_str (c_uimm3, x, 0)
#define luimm16(x) fmtconst_val (c_luimm16, x, 0)
#define luimm16_str(x) fmtconst_str (c_luimm16, x, 0)
#define uimm4(x) fmtconst_val (c_uimm4, x, 0)
#define uimm4_str(x) fmtconst_str (c_uimm4, x, 0)
#define uimm5(x) fmtconst_val (c_uimm5, x, 0)
#define uimm5_str(x) fmtconst_str (c_uimm5, x, 0)
#define imm16s2(x) fmtconst_val (c_imm16s2, x, 0)
#define imm16s2_str(x) fmtconst_str (c_imm16s2, x, 0)
#define uimm8(x) fmtconst_val (c_uimm8, x, 0)
#define imm16s4(x) fmtconst_val (c_imm16s4, x, 0)
#define imm16s4_str(x) fmtconst_str (c_imm16s4, x, 0)
#define uimm4s2(x) fmtconst_val (c_uimm4s2, x, 0)
#define uimm4s2_str(x) fmtconst_str (c_uimm4s2, x, 0)
#define uimm4s4(x) fmtconst_val (c_uimm4s4, x, 0)
#define uimm4s4_str(x) fmtconst_str (c_uimm4s4, x, 0)
#define uimm4s4d(x) fmtconst_val (c_uimm4s4d, x, 0)
#define lppcrel10(x) fmtconst_val (c_lppcrel10, x, pc)
#define imm3(x) fmtconst_val (c_imm3, x, 0)
#define imm3_str(x) fmtconst_str (c_imm3, x, 0)
#define imm4(x) fmtconst_val (c_imm4, x, 0)
#define uimm8s4(x) fmtconst_val (c_uimm8s4, x, 0)
#define imm5(x) fmtconst_val (c_imm5, x, 0)
#define imm5d(x) fmtconst_val (c_imm5d, x, 0)
#define imm6(x) fmtconst_val (c_imm6, x, 0)
#define imm7(x) fmtconst_val (c_imm7, x, 0)
#define imm7_str(x) fmtconst_str (c_imm7, x, 0)
#define imm7d(x) fmtconst_val (c_imm7d, x, 0)
#define imm8(x) fmtconst_val (c_imm8, x, 0)
#define pcrel24(x) fmtconst_val (c_pcrel24, x, pc)
#define pcrel24_str(x) fmtconst_str (c_pcrel24, x, pc)
#define uimm16(x) fmtconst_val (c_uimm16, x, 0)
#define uimm32(x) fmtconst_val (c_uimm32, x, 0)
#define imm32(x) fmtconst_val (c_imm32, x, 0)
#define huimm32(x) fmtconst_val (c_huimm32, x, 0)
#define huimm32e(x) fmtconst_val (c_huimm32e, x, 0)
/* Table C-4. Core Register Encoding Map. */
const char * const greg_names[] =
{
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
"P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP",
"I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3",
"B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3",
"A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS",
"<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>",
"LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2",
"USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT",
};
static const char *
get_allreg_name (int grp, int reg)
{
return greg_names[(grp << 3) | reg];
}
static const char *
get_preg_name (int reg)
{
return get_allreg_name (1, reg);
}
static bool
reg_is_reserved (int grp, int reg)
{
return (grp == 4 && (reg == 4 || reg == 5)) || (grp == 5);
}
static bu32 *
get_allreg (SIM_CPU *cpu, int grp, int reg)
{
int fullreg = (grp << 3) | reg;
/* REG_R0, REG_R1, REG_R2, REG_R3, REG_R4, REG_R5, REG_R6, REG_R7,
REG_P0, REG_P1, REG_P2, REG_P3, REG_P4, REG_P5, REG_SP, REG_FP,
REG_I0, REG_I1, REG_I2, REG_I3, REG_M0, REG_M1, REG_M2, REG_M3,
REG_B0, REG_B1, REG_B2, REG_B3, REG_L0, REG_L1, REG_L2, REG_L3,
REG_A0x, REG_A0w, REG_A1x, REG_A1w, , , REG_ASTAT, REG_RETS,
, , , , , , , ,
REG_LC0, REG_LT0, REG_LB0, REG_LC1, REG_LT1, REG_LB1, REG_CYCLES,
REG_CYCLES2,
REG_USP, REG_SEQSTAT, REG_SYSCFG, REG_RETI, REG_RETX, REG_RETN, REG_RETE,
REG_LASTREG */
switch (fullreg >> 2)
{
case 0: case 1: return &DREG (reg);
case 2: case 3: return &PREG (reg);
case 4: return &IREG (reg & 3);
case 5: return &MREG (reg & 3);
case 6: return &BREG (reg & 3);
case 7: return &LREG (reg & 3);
default:
switch (fullreg)
{
case 32: return &AXREG (0);
case 33: return &AWREG (0);
case 34: return &AXREG (1);
case 35: return &AWREG (1);
case 39: return &RETSREG;
case 48: return &LCREG (0);
case 49: return &LTREG (0);
case 50: return &LBREG (0);
case 51: return &LCREG (1);
case 52: return &LTREG (1);
case 53: return &LBREG (1);
case 54: return &CYCLESREG;
case 55: return &CYCLES2REG;
case 56: return &USPREG;
case 57: return &SEQSTATREG;
case 58: return &SYSCFGREG;
case 59: return &RETIREG;
case 60: return &RETXREG;
case 61: return &RETNREG;
case 62: return &RETEREG;
case 63: return &EMUDAT_INREG;
}
illegal_instruction (cpu);
}
}
static const char *
amod0 (int s0, int x0)
{
static const char * const mod0[] = {
"", " (S)", " (CO)", " (SCO)",
};
int i = s0 + (x0 << 1);
if (i < ARRAY_SIZE (mod0))
return mod0[i];
else
return "";
}
static const char *
amod0amod2 (int s0, int x0, int aop0)
{
static const char * const mod02[] = {
"", " (S)", " (CO)", " (SCO)",
"", "", "", "",
" (ASR)", " (S, ASR)", " (CO, ASR)", " (SCO, ASR)",
" (ASL)", " (S, ASL)", " (CO, ASL)", " (SCO, ASL)",
};
int i = s0 + (x0 << 1) + (aop0 << 2);
if (i < ARRAY_SIZE (mod02))
return mod02[i];
else
return "";
}
static const char *
amod1 (int s0, int x0)
{
static const char * const mod1[] = {
" (NS)", " (S)",
};
int i = s0 + (x0 << 1);
if (i < ARRAY_SIZE (mod1))
return mod1[i];
else
return "";
}
static const char *
mac_optmode (int mmod, int MM)
{
static const char * const omode[] = {
[(M_S2RND << 1) + 0] = " (S2RND)",
[(M_T << 1) + 0] = " (T)",
[(M_W32 << 1) + 0] = " (W32)",
[(M_FU << 1) + 0] = " (FU)",
[(M_TFU << 1) + 0] = " (TFU)",
[(M_IS << 1) + 0] = " (IS)",
[(M_ISS2 << 1) + 0] = " (ISS2)",
[(M_IH << 1) + 0] = " (IH)",
[(M_IU << 1) + 0] = " (IU)",
[(M_S2RND << 1) + 1] = " (M, S2RND)",
[(M_T << 1) + 1] = " (M, T)",
[(M_W32 << 1) + 1] = " (M, W32)",
[(M_FU << 1) + 1] = " (M, FU)",
[(M_TFU << 1) + 1] = " (M, TFU)",
[(M_IS << 1) + 1] = " (M, IS)",
[(M_ISS2 << 1) + 1] = " (M, ISS2)",
[(M_IH << 1) + 1] = " (M, IH)",
[(M_IU << 1) + 1] = " (M, IU)",
};
int i = MM + (mmod << 1);
if (i < ARRAY_SIZE (omode) && omode[i])
return omode[i];
else
return "";
}
static const char *
get_store_name (SIM_CPU *cpu, bu32 *p)
{
if (p >= &DREG (0) && p <= &CYCLESREG)
return greg_names[p - &DREG (0)];
else if (p == &AXREG (0))
return greg_names[4 * 8 + 0];
else if (p == &AWREG (0))
return greg_names[4 * 8 + 1];
else if (p == &AXREG (1))
return greg_names[4 * 8 + 2];
else if (p == &AWREG (1))
return greg_names[4 * 8 + 3];
else if (p == &ASTATREG (av0))
return "ASTAT[av0]";
else if (p == &ASTATREG (av0s))
return "ASTAT[av0s]";
else if (p == &ASTATREG (av1))
return "ASTAT[av1]";
else if (p == &ASTATREG (av1s))
return "ASTAT[av1s]";
else if (p == &ASTATREG (v))
return "ASTAT[v]";
else if (p == &ASTATREG (vs))
return "ASTAT[vs]";
else if (p == &ASTATREG (v_copy))
return "ASTAT[v_copy]";
else if (p == &ASTATREG (az))
return "ASTAT[az]";
else if (p == &ASTATREG (an))
return "ASTAT[an]";
else if (p == &ASTATREG (az))
return "ASTAT[az]";
else if (p == &ASTATREG (ac0))
return "ASTAT[ac0]";
else if (p == &ASTATREG (ac0_copy))
return "ASTAT[ac0_copy]";
else
{
/* Worry about this when we start to STORE() it. */
sim_io_eprintf (CPU_STATE (cpu), "STORE(): unknown register\n");
abort ();
}
}
static void
queue_store (SIM_CPU *cpu, bu32 *addr, bu32 val)
{
struct store *s = &BFIN_CPU_STATE.stores[BFIN_CPU_STATE.n_stores];
s->addr = addr;
s->val = val;
TRACE_REGISTER (cpu, "queuing write %s = %#x",
get_store_name (cpu, addr), val);
++BFIN_CPU_STATE.n_stores;
}
#define STORE(X, Y) \
do { \
if (BFIN_CPU_STATE.n_stores == 20) abort (); \
queue_store (cpu, &(X), (Y)); \
} while (0)
static void
setflags_nz (SIM_CPU *cpu, bu32 val)
{
SET_ASTATREG (az, val == 0);
SET_ASTATREG (an, val >> 31);
}
static void
setflags_nz_2x16 (SIM_CPU *cpu, bu32 val)
{
SET_ASTATREG (an, (bs16)val < 0 || (bs16)(val >> 16) < 0);
SET_ASTATREG (az, (bs16)val == 0 || (bs16)(val >> 16) == 0);
}
static void
setflags_logical (SIM_CPU *cpu, bu32 val)
{
setflags_nz (cpu, val);
SET_ASTATREG (ac0, 0);
SET_ASTATREG (v, 0);
}
static bu32
add_brev (bu32 addend1, bu32 addend2)
{
bu32 mask, b, r;
int i, cy;
mask = 0x80000000;
r = 0;
cy = 0;
for (i = 31; i >= 0; --i)
{
b = ((addend1 & mask) >> i) + ((addend2 & mask) >> i);
b += cy;
cy = b >> 1;
b &= 1;
r |= b << i;
mask >>= 1;
}
return r;
}
/* This is a bit crazy, but we want to simulate the hardware behavior exactly
rather than worry about the circular buffers being used correctly. Which
isn't to say there isn't room for improvement here, just that we want to
be conservative. See also dagsub(). */
static bu32
dagadd (SIM_CPU *cpu, int dagno, bs32 M)
{
bu64 i = IREG (dagno);
bu64 l = LREG (dagno);
bu64 b = BREG (dagno);
bu64 m = (bu32)M;
bu64 LB, IM, IML;
bu32 im32, iml32, lb32, res;
bu64 msb, car;
/* A naïve implementation that mostly works:
res = i + m;
if (l && res >= b + l)
res -= l;
STORE (IREG (dagno), res);
*/
msb = (bu64)1 << 31;
car = (bu64)1 << 32;
IM = i + m;
im32 = IM;
LB = l + b;
lb32 = LB;
if (M < 0)
{
IML = i + m + l;
iml32 = IML;
if ((i & msb) || (IM & car))
res = (im32 < b) ? iml32 : im32;
else
res = (im32 < b) ? im32 : iml32;
}
else
{
IML = i + m - l;
iml32 = IML;
if ((IM & car) == (LB & car))
res = (im32 < lb32) ? im32 : iml32;
else
res = (im32 < lb32) ? iml32 : im32;
}
STORE (IREG (dagno), res);
return res;
}
/* See dagadd() notes above. */
static bu32
dagsub (SIM_CPU *cpu, int dagno, bs32 M)
{
bu64 i = IREG (dagno);
bu64 l = LREG (dagno);
bu64 b = BREG (dagno);
bu64 m = (bu32)M;
bu64 mbar = (bu32)(~m + 1);
bu64 LB, IM, IML;
bu32 b32, im32, iml32, lb32, res;
bu64 msb, car;
/* A naïve implementation that mostly works:
res = i - m;
if (l && newi < b)
newi += l;
STORE (IREG (dagno), newi);
*/
msb = (bu64)1 << 31;
car = (bu64)1 << 32;
IM = i + mbar;
im32 = IM;
LB = l + b;
lb32 = LB;
if (M < 0)
{
IML = i + mbar - l;
iml32 = IML;
if (!!((i & msb) && (IM & car)) == !!(LB & car))
res = (im32 < lb32) ? im32 : iml32;
else
res = (im32 < lb32) ? iml32 : im32;
}
else
{
IML = i + mbar + l;
iml32 = IML;
b32 = b;
if (M == 0 || IM & car)
res = (im32 < b32) ? iml32 : im32;
else
res = (im32 < b32) ? im32 : iml32;
}
STORE (IREG (dagno), res);
return res;
}
static bu40
ashiftrt (SIM_CPU *cpu, bu40 val, int cnt, int size)
{
int real_cnt = cnt > size ? size : cnt;
bu40 sgn = ~(((val & 0xFFFFFFFFFFull) >> (size - 1)) - 1);
int sgncnt = size - real_cnt;
if (sgncnt > 16)
sgn <<= 16, sgncnt -= 16;
sgn <<= sgncnt;
if (real_cnt > 16)
val >>= 16, real_cnt -= 16;
val >>= real_cnt;
val |= sgn;
SET_ASTATREG (an, val >> (size - 1));
SET_ASTATREG (az, val == 0);
if (size != 40)
SET_ASTATREG (v, 0);
return val;
}
static bu64
lshiftrt (SIM_CPU *cpu, bu64 val, int cnt, int size)
{
int real_cnt = cnt > size ? size : cnt;
if (real_cnt > 16)
val >>= 16, real_cnt -= 16;
val >>= real_cnt;
switch (size)
{
case 16:
val &= 0xFFFF;
break;
case 32:
val &= 0xFFFFFFFF;
break;
case 40:
val &= 0xFFFFFFFFFFull;
break;
default:
illegal_instruction (cpu);
break;
}
SET_ASTATREG (an, val >> (size - 1));
SET_ASTATREG (az, val == 0);
if (size != 40)
SET_ASTATREG (v, 0);
return val;
}
static bu64
lshift (SIM_CPU *cpu, bu64 val, int cnt, int size, bool saturate, bool overflow)
{
int v_i, real_cnt = cnt > size ? size : cnt;
bu64 sgn = ~((val >> (size - 1)) - 1);
int mask_cnt = size - 1;
bu64 masked, new_val = val;
bu64 mask = ~0;
mask <<= mask_cnt;
sgn <<= mask_cnt;
masked = val & mask;
if (real_cnt > 16)
new_val <<= 16, real_cnt -= 16;
new_val <<= real_cnt;
masked = new_val & mask;
/* If an operation would otherwise cause a positive value to overflow
and become negative, instead, saturation limits the result to the
maximum positive value for the size register being used.
Conversely, if an operation would otherwise cause a negative value
to overflow and become positive, saturation limits the result to the
maximum negative value for the register size.
However, it's a little more complex than looking at sign bits, we need
to see if we are shifting the sign information away... */
if (((val << cnt) >> size) == 0
|| (((val << cnt) >> size) == ~(~0 << cnt)
&& ((new_val >> (size - 1)) & 0x1)))
v_i = 0;
else
v_i = 1;
switch (size)
{
case 16:
new_val &= 0xFFFF;
if (saturate && (v_i || ((val >> (size - 1)) != (new_val >> (size - 1)))))
{
new_val = (val >> (size - 1)) == 0 ? 0x7fff : 0x8000;
v_i = 1;
}
break;
case 32:
new_val &= 0xFFFFFFFF;
masked &= 0xFFFFFFFF;
sgn &= 0xFFFFFFFF;
if (saturate
&& (v_i
|| (sgn != masked)
|| (!sgn && new_val == 0 && val != 0)))
{
new_val = sgn == 0 ? 0x7fffffff : 0x80000000;
v_i = 1;
}
break;
case 40:
new_val &= 0xFFFFFFFFFFull;
masked &= 0xFFFFFFFFFFull;
break;
default:
illegal_instruction (cpu);
break;
}
SET_ASTATREG (an, new_val >> (size - 1));
SET_ASTATREG (az, new_val == 0);
if (size != 40)
{
SET_ASTATREG (v, overflow && v_i);
if (overflow && v_i)
SET_ASTATREG (vs, 1);
}
return new_val;
}
static bu32
algn (bu32 l, bu32 h, bu32 aln)
{
if (aln == 0)
return l;
else
return (l >> (8 * aln)) | (h << (32 - 8 * aln));
}
static bu32
saturate_s16 (bu64 val, bu32 *overflow)
{
if ((bs64)val < -0x8000ll)
{
if (overflow)
*overflow = 1;
return 0x8000;
}
if ((bs64)val > 0x7fff)
{
if (overflow)
*overflow = 1;
return 0x7fff;
}
return val & 0xffff;
}
static bu40
rot40 (bu40 val, int shift, bu32 *cc)
{
const int nbits = 40;
bu40 ret;
shift = CLAMP (shift, -nbits, nbits);
if (shift == 0)
return val;
/* Reduce everything to rotate left. */
if (shift < 0)
shift += nbits + 1;
ret = shift == nbits ? 0 : val << shift;
ret |= shift == 1 ? 0 : val >> ((nbits + 1) - shift);
ret |= (bu40)*cc << (shift - 1);
*cc = (val >> (nbits - shift)) & 1;
return ret;
}
static bu32
rot32 (bu32 val, int shift, bu32 *cc)
{
const int nbits = 32;
bu32 ret;
shift = CLAMP (shift, -nbits, nbits);
if (shift == 0)
return val;
/* Reduce everything to rotate left. */
if (shift < 0)
shift += nbits + 1;
ret = shift == nbits ? 0 : val << shift;
ret |= shift == 1 ? 0 : val >> ((nbits + 1) - shift);
ret |= (bu32)*cc << (shift - 1);
*cc = (val >> (nbits - shift)) & 1;
return ret;
}
static bu32
add32 (SIM_CPU *cpu, bu32 a, bu32 b, int carry, int sat)
{
int flgs = (a >> 31) & 1;
int flgo = (b >> 31) & 1;
bu32 v = a + b;
int flgn = (v >> 31) & 1;
int overflow = (flgs ^ flgn) & (flgo ^ flgn);
if (sat && overflow)
{
v = (bu32)1 << 31;
if (flgn)
v -= 1;
flgn = (v >> 31) & 1;
}
SET_ASTATREG (an, flgn);
if (overflow)
SET_ASTATREG (vs, 1);
SET_ASTATREG (v, overflow);
ASTATREG (v_internal) |= overflow;
SET_ASTATREG (az, v == 0);
if (carry)
SET_ASTATREG (ac0, ~a < b);
return v;
}
static bu32
sub32 (SIM_CPU *cpu, bu32 a, bu32 b, int carry, int sat, int parallel)
{
int flgs = (a >> 31) & 1;
int flgo = (b >> 31) & 1;
bu32 v = a - b;
int flgn = (v >> 31) & 1;
int overflow = (flgs ^ flgo) & (flgn ^ flgs);
if (sat && overflow)
{
v = (bu32)1 << 31;
if (flgn)
v -= 1;
flgn = (v >> 31) & 1;
}
if (!parallel || flgn)
SET_ASTATREG (an, flgn);
if (overflow)
SET_ASTATREG (vs, 1);
if (!parallel || overflow)
SET_ASTATREG (v, overflow);
if (!parallel || overflow)
ASTATREG (v_internal) |= overflow;
if (!parallel || v == 0)
SET_ASTATREG (az, v == 0);
if (carry && (!parallel || b <= a))
SET_ASTATREG (ac0, b <= a);
return v;
}
static bu32
add16 (SIM_CPU *cpu, bu16 a, bu16 b, bu32 *carry, bu32 *overfl,
bu32 *zero, bu32 *neg, int sat, int scale)
{
int flgs = (a >> 15) & 1;
int flgo = (b >> 15) & 1;
bs64 v = (bs16)a + (bs16)b;
int flgn = (v >> 15) & 1;
int overflow = (flgs ^ flgn) & (flgo ^ flgn);
switch (scale)
{
case 0:
break;
case 2:
/* (ASR) */
v = (a >> 1) + (a & 0x8000) + (b >> 1) + (b & 0x8000)
+ (((a & 1) + (b & 1)) >> 1);
v |= -(v & 0x8000);
break;
case 3:
/* (ASL) */
v = (v << 1);
break;
default:
illegal_instruction (cpu);
}
flgn = (v >> 15) & 1;
overflow = (flgs ^ flgn) & (flgo ^ flgn);
if (v > (bs64)0xffff)
overflow = 1;
if (sat)
v = saturate_s16 (v, 0);
if (neg)
*neg |= (v >> 15) & 1;
if (overfl)
*overfl |= overflow;
if (zero)
*zero |= (v & 0xFFFF) == 0;
if (carry)
*carry |= ((bu16)~a < (bu16)b);
return v & 0xffff;
}
static bu32
sub16 (SIM_CPU *cpu, bu16 a, bu16 b, bu32 *carry, bu32 *overfl,
bu32 *zero, bu32 *neg, int sat, int scale)
{
int flgs = (a >> 15) & 1;
int flgo = (b >> 15) & 1;
bs64 v = (bs16)a - (bs16)b;
int flgn = (v >> 15) & 1;
int overflow = (flgs ^ flgo) & (flgn ^ flgs);
switch (scale)
{
case 0:
break;
case 2:
/* (ASR) */
if (sat)
v = ((a >> 1) + (a & 0x8000)) - ( (b >> 1) + (b & 0x8000))
+ (((a & 1)-(b & 1)));
else
{
v = ((v & 0xFFFF) >> 1);
if ((!flgs & !flgo & flgn)
|| (flgs & !flgo & !flgn)
|| (flgs & flgo & flgn)
|| (flgs & !flgo & flgn))
v |= 0x8000;
}
v |= -(v & 0x8000);
flgn = (v >> 15) & 1;
overflow = (flgs ^ flgo) & (flgn ^ flgs);
break;
case 3:
/* (ASL) */
v <<= 1;
if (v > (bs64)0x7fff || v < (bs64)-0xffff)
overflow = 1;
break;
default:
illegal_instruction (cpu);
}
if (sat)
{
v = saturate_s16 (v, 0);
}
if (neg)
*neg |= (v >> 15) & 1;
if (zero)
*zero |= (v & 0xFFFF) == 0;
if (overfl)
*overfl |= overflow;
if (carry)
*carry |= (bu16)b <= (bu16)a;
return v;
}
static bu32
min32 (SIM_CPU *cpu, bu32 a, bu32 b)
{
int val = a;
if ((bs32)a > (bs32)b)
val = b;
setflags_nz (cpu, val);
SET_ASTATREG (v, 0);
return val;
}
static bu32
max32 (SIM_CPU *cpu, bu32 a, bu32 b)
{
int val = a;
if ((bs32)a < (bs32)b)
val = b;
setflags_nz (cpu, val);
SET_ASTATREG (v, 0);
return val;
}
static bu32
min2x16 (SIM_CPU *cpu, bu32 a, bu32 b)
{
int val = a;
if ((bs16)a > (bs16)b)
val = (val & 0xFFFF0000) | (b & 0xFFFF);
if ((bs16)(a >> 16) > (bs16)(b >> 16))
val = (val & 0xFFFF) | (b & 0xFFFF0000);
setflags_nz_2x16 (cpu, val);
SET_ASTATREG (v, 0);
return val;
}
static bu32
max2x16 (SIM_CPU *cpu, bu32 a, bu32 b)
{
int val = a;
if ((bs16)a < (bs16)b)
val = (val & 0xFFFF0000) | (b & 0xFFFF);
if ((bs16)(a >> 16) < (bs16)(b >> 16))
val = (val & 0xFFFF) | (b & 0xFFFF0000);
setflags_nz_2x16 (cpu, val);
SET_ASTATREG (v, 0);
return val;
}
static bu32
add_and_shift (SIM_CPU *cpu, bu32 a, bu32 b, int shift)
{
int v;
ASTATREG (v_internal) = 0;
v = add32 (cpu, a, b, 0, 0);
while (shift-- > 0)
{
int x = (v >> 30) & 0x3;
if (x == 1 || x == 2)
ASTATREG (v_internal) = 1;
v <<= 1;
}
SET_ASTATREG (az, v == 0);
SET_ASTATREG (an, v & 0x80000000);
SET_ASTATREG (v, ASTATREG (v_internal));
if (ASTATREG (v))
SET_ASTATREG (vs, 1);
return v;
}
static bu32
xor_reduce (bu64 acc0, bu64 acc1)
{
int i;
bu32 v = 0;
for (i = 0; i < 40; ++i)
{
v ^= (acc0 & acc1 & 1);
acc0 >>= 1;
acc1 >>= 1;
}
return v;
}
/* DIVS ( Dreg, Dreg ) ;
Initialize for DIVQ. Set the AQ status bit based on the signs of
the 32-bit dividend and the 16-bit divisor. Left shift the dividend
one bit. Copy AQ into the dividend LSB. */
static bu32
divs (SIM_CPU *cpu, bu32 pquo, bu16 divisor)
{
bu16 r = pquo >> 16;
int aq;
aq = (r ^ divisor) >> 15; /* Extract msb's and compute quotient bit. */
SET_ASTATREG (aq, aq); /* Update global quotient state. */
pquo <<= 1;
pquo |= aq;
pquo = (pquo & 0x1FFFF) | (r << 17);
return pquo;
}
/* DIVQ ( Dreg, Dreg ) ;
Based on AQ status bit, either add or subtract the divisor from
the dividend. Then set the AQ status bit based on the MSBs of the
32-bit dividend and the 16-bit divisor. Left shift the dividend one
bit. Copy the logical inverse of AQ into the dividend LSB. */
static bu32
divq (SIM_CPU *cpu, bu32 pquo, bu16 divisor)
{
unsigned short af = pquo >> 16;
unsigned short r;
int aq;
if (ASTATREG (aq))
r = divisor + af;
else
r = af - divisor;
aq = (r ^ divisor) >> 15; /* Extract msb's and compute quotient bit. */
SET_ASTATREG (aq, aq); /* Update global quotient state. */
pquo <<= 1;
pquo |= !aq;
pquo = (pquo & 0x1FFFF) | (r << 17);
return pquo;
}
/* ONES ( Dreg ) ;
Count the number of bits set to 1 in the 32bit value. */
static bu32
ones (bu32 val)
{
bu32 i;
bu32 ret;
ret = 0;
for (i = 0; i < 32; ++i)
ret += !!(val & (1 << i));
return ret;
}
static void
reg_check_sup (SIM_CPU *cpu, int grp, int reg)
{
if (grp == 7)
cec_require_supervisor (cpu);
}
static void
reg_write (SIM_CPU *cpu, int grp, int reg, bu32 value)
{
bu32 *whichreg;
/* ASTAT is special! */
if (grp == 4 && reg == 6)
{
SET_ASTAT (value);
return;
}
/* Check supervisor after get_allreg() so exception order is correct. */
whichreg = get_allreg (cpu, grp, reg);
reg_check_sup (cpu, grp, reg);
if (whichreg == &CYCLES2REG)
/* Writes to CYCLES2 goes to the shadow. */
whichreg = &CYCLES2SHDREG;
else if (whichreg == &SEQSTATREG)
/* Register is read only -- discard writes. */
return;
else if (whichreg == &EMUDAT_INREG)
/* Writes to EMUDAT goes to the output. */
whichreg = &EMUDAT_OUTREG;
else if (whichreg == &LTREG (0) || whichreg == &LTREG (1))
/* Writes to LT clears LSB automatically. */
value &= ~0x1;
else if (whichreg == &AXREG (0) || whichreg == &AXREG (1))
value &= 0xFF;
TRACE_REGISTER (cpu, "wrote %s = %#x", get_allreg_name (grp, reg), value);
*whichreg = value;
}
static bu32
reg_read (SIM_CPU *cpu, int grp, int reg)
{
bu32 *whichreg;
bu32 value;
/* ASTAT is special! */
if (grp == 4 && reg == 6)
return ASTAT;
/* Check supervisor after get_allreg() so exception order is correct. */
whichreg = get_allreg (cpu, grp, reg);
reg_check_sup (cpu, grp, reg);
value = *whichreg;
if (whichreg == &CYCLESREG)
/* Reads of CYCLES reloads CYCLES2 from the shadow. */
SET_CYCLES2REG (CYCLES2SHDREG);
else if ((whichreg == &AXREG (1) || whichreg == &AXREG (0)) && (value & 0x80))
/* Sign extend if necessary. */
value |= 0xFFFFFF00;
return value;
}
static bu64
get_extended_cycles (SIM_CPU *cpu)
{
return ((bu64)CYCLES2SHDREG << 32) | CYCLESREG;
}
/* We can't re-use sim_events_time() because the CYCLES registers may be
written/cleared/reset/stopped/started at any time by software. */
static void
cycles_inc (SIM_CPU *cpu, bu32 inc)
{
bu64 cycles;
bu32 cycles2;
if (!(SYSCFGREG & SYSCFG_CCEN))
return;
cycles = get_extended_cycles (cpu) + inc;
SET_CYCLESREG (cycles);
cycles2 = cycles >> 32;
if (CYCLES2SHDREG != cycles2)
SET_CYCLES2SHDREG (cycles2);
}
static bu64
get_unextended_acc (SIM_CPU *cpu, int which)
{
return ((bu64)(AXREG (which) & 0xff) << 32) | AWREG (which);
}
static bu64
get_extended_acc (SIM_CPU *cpu, int which)
{
bu64 acc = AXREG (which);
/* Sign extend accumulator values before adding. */
if (acc & 0x80)
acc |= -0x80;
else
acc &= 0xFF;
acc <<= 32;
acc |= AWREG (which);
return acc;
}
/* Perform a multiplication of D registers SRC0 and SRC1, sign- or
zero-extending the result to 64 bit. H0 and H1 determine whether the
high part or the low part of the source registers is used. Store 1 in
*PSAT if saturation occurs, 0 otherwise. */
static bu64
decode_multfunc (SIM_CPU *cpu, int h0, int h1, int src0, int src1, int mmod,
int MM, bu32 *psat)
{
bu32 s0 = DREG (src0), s1 = DREG (src1);
bu32 sgn0, sgn1;
bu32 val;
bu64 val1;
if (h0)
s0 >>= 16;
if (h1)
s1 >>= 16;
s0 &= 0xffff;
s1 &= 0xffff;
sgn0 = -(s0 & 0x8000);
sgn1 = -(s1 & 0x8000);
if (MM)
s0 |= sgn0;
else
switch (mmod)
{
case 0:
case M_S2RND:
case M_T:
case M_IS:
case M_ISS2:
case M_IH:
case M_W32:
s0 |= sgn0;
s1 |= sgn1;
break;
case M_FU:
case M_IU:
case M_TFU:
break;
default:
illegal_instruction (cpu);
}
val = s0 * s1;
/* Perform shift correction if appropriate for the mode. */
*psat = 0;
if (!MM && (mmod == 0 || mmod == M_T || mmod == M_S2RND || mmod == M_W32))
{
if (val == 0x40000000)
{
if (mmod == M_W32)
val = 0x7fffffff;
else
val = 0x80000000;
*psat = 1;
}
else
val <<= 1;
}
val1 = val;
/* In signed modes, sign extend. */
if (is_macmod_signed (mmod) || MM)
val1 |= -(val1 & 0x80000000);
if (*psat)
val1 &= 0xFFFFFFFFull;
return val1;
}
static bu40
saturate_s40_astat (bu64 val, bu32 *v)
{
if ((bs64)val < -((bs64)1 << 39))
{
*v = 1;
return -((bs64)1 << 39);
}
else if ((bs64)val > ((bs64)1 << 39) - 1)
{
*v = 1;
return ((bu64)1 << 39) - 1;
}
*v = 0; /* No overflow. */
return val;
}
static bu40
saturate_s40 (bu64 val)
{
bu32 v;
return saturate_s40_astat (val, &v);
}
static bu32
saturate_s32 (bu64 val, bu32 *overflow)
{
if ((bs64)val < -0x80000000ll)
{
if (overflow)
*overflow = 1;
return 0x80000000;
}
if ((bs64)val > 0x7fffffff)
{
if (overflow)
*overflow = 1;
return 0x7fffffff;
}
return val;
}
static bu32
saturate_u32 (bu64 val, bu32 *overflow)
{
if (val > 0xffffffff)
{
if (overflow)
*overflow = 1;
return 0xffffffff;
}
return val;
}
static bu32
saturate_u16 (bu64 val, bu32 *overflow)
{
if (val > 0xffff)
{
if (overflow)
*overflow = 1;
return 0xffff;
}
return val;
}
static bu64
rnd16 (bu64 val)
{
bu64 sgnbits;
/* FIXME: Should honour rounding mode. */
if ((val & 0xffff) > 0x8000
|| ((val & 0xffff) == 0x8000 && (val & 0x10000)))
val += 0x8000;
sgnbits = val & 0xffff000000000000ull;
val >>= 16;
return val | sgnbits;
}
static bu64
trunc16 (bu64 val)
{
bu64 sgnbits = val & 0xffff000000000000ull;
val >>= 16;
return val | sgnbits;
}
static int
signbits (bu64 val, int size)
{
bu64 mask = (bu64)1 << (size - 1);
bu64 bit = val & mask;
int count = 0;
for (;;)
{
mask >>= 1;
bit >>= 1;
if (mask == 0)
break;
if ((val & mask) != bit)
break;
count++;
}
if (size == 40)
count -= 8;
return count;
}
/* Extract a 16 or 32 bit value from a 64 bit multiplication result.
These 64 bits must be sign- or zero-extended properly from the source
we want to extract, either a 32 bit multiply or a 40 bit accumulator. */
static bu32
extract_mult (SIM_CPU *cpu, bu64 res, int mmod, int MM,
int fullword, bu32 *overflow)
{
if (fullword)
switch (mmod)
{
case 0:
case M_IS:
return saturate_s32 (res, overflow);
case M_IU:
if (MM)
return saturate_s32 (res, overflow);
return saturate_u32 (res, overflow);
case M_FU:
if (MM)
return saturate_s32 (res, overflow);
return saturate_u32 (res, overflow);
case M_S2RND:
case M_ISS2:
return saturate_s32 (res << 1, overflow);
default:
illegal_instruction (cpu);
}
else
switch (mmod)
{
case 0:
case M_W32:
case M_IH:
return saturate_s16 (rnd16 (res), overflow);
case M_IS:
return saturate_s16 (res, overflow);
case M_FU:
if (MM)
return saturate_s16 (rnd16 (res), overflow);
return saturate_u16 (rnd16 (res), overflow);
case M_IU:
if (MM)
return saturate_s16 (res, overflow);
return saturate_u16 (res, overflow);
case M_T:
return saturate_s16 (trunc16 (res), overflow);
case M_TFU:
if (MM)
return saturate_s16 (trunc16 (res), overflow);
return saturate_u16 (trunc16 (res), overflow);
case M_S2RND:
return saturate_s16 (rnd16 (res << 1), overflow);
case M_ISS2:
return saturate_s16 (res << 1, overflow);
default:
illegal_instruction (cpu);
}
}
static bu32
decode_macfunc (SIM_CPU *cpu, int which, int op, int h0, int h1, int src0,
int src1, int mmod, int MM, int fullword, bu32 *overflow,
bu32 *neg)
{
bu64 acc;
bu32 sat = 0, tsat, ret;
/* Sign extend accumulator if necessary, otherwise unsigned. */
if (is_macmod_signed (mmod) || MM)
acc = get_extended_acc (cpu, which);
else
acc = get_unextended_acc (cpu, which);
if (op != 3)
{
bu8 sgn0 = (acc >> 31) & 1;
bu8 sgn40 = (acc >> 39) & 1;
bu40 nosat_acc;
/* This can't saturate, so we don't keep track of the sat flag. */
bu64 res = decode_multfunc (cpu, h0, h1, src0, src1, mmod,
MM, &tsat);
/* Perform accumulation. */
switch (op)
{
case 0:
acc = res;
sgn0 = (acc >> 31) & 1;
break;
case 1:
acc = acc + res;
break;
case 2:
acc = acc - res;
break;
}
nosat_acc = acc;
/* Saturate. */
switch (mmod)
{
case 0:
case M_T:
case M_IS:
case M_ISS2:
case M_S2RND:
if ((bs64)acc < -((bs64)1 << 39))
acc = -((bu64)1 << 39), sat = 1;
else if ((bs64)acc > 0x7fffffffffll)
acc = 0x7fffffffffull, sat = 1;
break;
case M_TFU:
if (MM)
{
if ((bs64)acc < -((bs64)1 << 39))
acc = -((bu64)1 << 39), sat = 1;
if ((bs64)acc > 0x7FFFFFFFFFll)
acc = 0x7FFFFFFFFFull, sat = 1;
}
else
{
if ((bs64)acc < 0)
acc = 0, sat = 1;
if ((bs64)acc > 0xFFFFFFFFFFull)
acc = 0xFFFFFFFFFFull, sat = 1;
}
break;
case M_IU:
if (!MM && acc & 0x8000000000000000ull)
acc = 0x0, sat = 1;
if (!MM && acc > 0xFFFFFFFFFFull)
acc = 0xFFFFFFFFFFull, sat = 1;
if (MM && acc > 0xFFFFFFFFFFull)
acc &= 0xFFFFFFFFFFull;
if (acc & 0x8000000000ull)
acc |= 0xffffff0000000000ull;
break;
case M_FU:
if (MM)
{
if ((bs64)acc < -((bs64)1 << 39))
acc = -((bu64)1 << 39), sat = 1;
if ((bs64)acc > 0x7FFFFFFFFFll)
acc = 0x7FFFFFFFFFull, sat = 1;
else if (acc & 0x8000000000ull)
acc |= 0xffffff0000000000ull;
}
else
{
if ((bs64)acc < 0)
acc = 0x0, sat = 1;
else if ((bs64)acc > (bs64)0xFFFFFFFFFFll)
acc = 0xFFFFFFFFFFull, sat = 1;
}
break;
case M_IH:
if ((bs64)acc < -0x80000000ll)
acc = -0x80000000ull, sat = 1;
else if ((bs64)acc > 0x7fffffffll)
acc = 0x7fffffffull, sat = 1;
break;
case M_W32:
/* check max negative value */
if (sgn40 && ((acc >> 31) != 0x1ffffffff)
&& ((acc >> 31) != 0x0))
acc = 0x80000000, sat = 1;
if (!sat && !sgn40 && ((acc >> 31) != 0x0)
&& ((acc >> 31) != 0x1ffffffff))
acc = 0x7FFFFFFF, sat = 1;
acc &= 0xffffffff;
if (acc & 0x80000000)
acc |= 0xffffffff00000000ull;
if (tsat)
sat = 1;
break;
default:
illegal_instruction (cpu);
}
if (acc & 0x8000000000ull)
*neg = 1;
STORE (AXREG (which), (acc >> 32) & 0xff);
STORE (AWREG (which), acc & 0xffffffff);
STORE (ASTATREG (av[which]), sat);
if (sat)
STORE (ASTATREG (avs[which]), sat);
/* Figure out the overflow bit. */
if (sat)
{
if (fullword)
*overflow = 1;
else
ret = extract_mult (cpu, nosat_acc, mmod, MM, fullword, overflow);
}
}
ret = extract_mult (cpu, acc, mmod, MM, fullword, overflow);
if (!fullword)
{
if (ret & 0x8000)
*neg = 1;
}
else
{
if (ret & 0x80000000)
*neg = 1;
}
return ret;
}
bu32
hwloop_get_next_pc (SIM_CPU *cpu, bu32 pc, bu32 insn_len)
{
int i;
if (insn_len == 0)
return pc;
/* If our PC has reached the bottom of a hardware loop,
move back up to the top of the hardware loop. */
for (i = 1; i >= 0; --i)
if (LCREG (i) > 1 && pc == LBREG (i))
{
BFIN_TRACE_BRANCH (cpu, pc, LTREG (i), i, "Hardware loop %i", i);
return LTREG (i);
}
return pc + insn_len;
}
static void
decode_ProgCtrl_0 (SIM_CPU *cpu, bu16 iw0, bu32 pc)
{
/* ProgCtrl
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |.prgfunc.......|.poprnd........|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int poprnd = ((iw0 >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask);
int prgfunc = ((iw0 >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask);
TRACE_EXTRACT (cpu, "%s: poprnd:%i prgfunc:%i", __func__, poprnd, prgfunc);
if (prgfunc == 0 && poprnd == 0)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_nop);
TRACE_INSN (cpu, "NOP;");
}
else if (prgfunc == 1 && poprnd == 0)
{
bu32 newpc = RETSREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "RTS;");
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "RTS");
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
CYCLE_DELAY = 5;
}
else if (prgfunc == 1 && poprnd == 1)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "RTI;");
/* Do not do IFETCH_CHECK here -- LSB has special meaning. */
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_return (cpu, -1);
CYCLE_DELAY = 5;
}
else if (prgfunc == 1 && poprnd == 2)
{
bu32 newpc = RETXREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "RTX;");
/* XXX: Not sure if this is what the hardware does. */
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_return (cpu, IVG_EVX);
CYCLE_DELAY = 5;
}
else if (prgfunc == 1 && poprnd == 3)
{
bu32 newpc = RETNREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "RTN;");
/* XXX: Not sure if this is what the hardware does. */
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_return (cpu, IVG_NMI);
CYCLE_DELAY = 5;
}
else if (prgfunc == 1 && poprnd == 4)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "RTE;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_return (cpu, IVG_EMU);
CYCLE_DELAY = 5;
}
else if (prgfunc == 2 && poprnd == 0)
{
SIM_DESC sd = CPU_STATE (cpu);
sim_events *events = STATE_EVENTS (sd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_sync);
/* XXX: in supervisor mode, utilizes wake up sources
in user mode, it's a NOP ... */
TRACE_INSN (cpu, "IDLE;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
/* Timewarp ! */
if (events->queue)
CYCLE_DELAY = events->time_from_event;
else
abort (); /* XXX: Should this ever happen ? */
}
else if (prgfunc == 2 && poprnd == 3)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_sync);
/* Just NOP it. */
TRACE_INSN (cpu, "CSYNC;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
CYCLE_DELAY = 10;
}
else if (prgfunc == 2 && poprnd == 4)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_sync);
/* Just NOP it. */
TRACE_INSN (cpu, "SSYNC;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
/* Really 10+, but no model info for this. */
CYCLE_DELAY = 10;
}
else if (prgfunc == 2 && poprnd == 5)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_cec);
TRACE_INSN (cpu, "EMUEXCPT;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_exception (cpu, VEC_SIM_TRAP);
}
else if (prgfunc == 3 && poprnd < 8)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_cec);
TRACE_INSN (cpu, "CLI R%i;", poprnd);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_DREG (poprnd, cec_cli (cpu));
}
else if (prgfunc == 4 && poprnd < 8)
{
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_cec);
TRACE_INSN (cpu, "STI R%i;", poprnd);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_sti (cpu, DREG (poprnd));
CYCLE_DELAY = 3;
}
else if (prgfunc == 5 && poprnd < 8)
{
bu32 newpc = PREG (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "JUMP (%s);", get_preg_name (poprnd));
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "JUMP (Preg)");
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = 5;
}
else if (prgfunc == 6 && poprnd < 8)
{
bu32 newpc = PREG (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "CALL (%s);", get_preg_name (poprnd));
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "CALL (Preg)");
/* If we're at the end of a hardware loop, RETS is going to be
the top of the loop rather than the next instruction. */
SET_RETSREG (hwloop_get_next_pc (cpu, pc, 2));
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = 5;
}
else if (prgfunc == 7 && poprnd < 8)
{
bu32 newpc = pc + PREG (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "CALL (PC + %s);", get_preg_name (poprnd));
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "CALL (PC + Preg)");
SET_RETSREG (hwloop_get_next_pc (cpu, pc, 2));
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = 5;
}
else if (prgfunc == 8 && poprnd < 8)
{
bu32 newpc = pc + PREG (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_branch);
TRACE_INSN (cpu, "JUMP (PC + %s);", get_preg_name (poprnd));
IFETCH_CHECK (newpc);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "JUMP (PC + Preg)");
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = 5;
}
else if (prgfunc == 9)
{
int raise = uimm4 (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_cec);
TRACE_INSN (cpu, "RAISE %s;", uimm4_str (raise));
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_require_supervisor (cpu);
if (raise == IVG_IVHW)
cec_hwerr (cpu, HWERR_RAISE_5);
else
cec_latch (cpu, raise);
CYCLE_DELAY = 3; /* XXX: Only if IVG is unmasked. */
}
else if (prgfunc == 10)
{
int excpt = uimm4 (poprnd);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_cec);
TRACE_INSN (cpu, "EXCPT %s;", uimm4_str (excpt));
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
cec_exception (cpu, excpt);
CYCLE_DELAY = 3;
}
else if (prgfunc == 11 && poprnd < 6)
{
bu32 addr = PREG (poprnd);
bu8 byte;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ProgCtrl_atomic);
TRACE_INSN (cpu, "TESTSET (%s);", get_preg_name (poprnd));
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
byte = GET_WORD (addr);
SET_CCREG (byte == 0);
PUT_BYTE (addr, byte | 0x80);
/* Also includes memory stalls, but we don't model that. */
CYCLE_DELAY = 2;
}
else
illegal_instruction_or_combination (cpu);
}
static void
decode_CaCTRL_0 (SIM_CPU *cpu, bu16 iw0)
{
/* CaCTRL
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |.a.|.op....|.reg.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int a = ((iw0 >> CaCTRL_a_bits) & CaCTRL_a_mask);
int op = ((iw0 >> CaCTRL_op_bits) & CaCTRL_op_mask);
int reg = ((iw0 >> CaCTRL_reg_bits) & CaCTRL_reg_mask);
bu32 preg = PREG (reg);
const char * const sinsn[] = { "PREFETCH", "FLUSHINV", "FLUSH", "IFLUSH", };
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_CaCTRL);
TRACE_EXTRACT (cpu, "%s: a:%i op:%i reg:%i", __func__, a, op, reg);
TRACE_INSN (cpu, "%s [%s%s];", sinsn[op], get_preg_name (reg), a ? "++" : "");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
/* None of these can be part of a parallel instruction. */
illegal_instruction_combination (cpu);
/* No cache simulation, so these are (mostly) all NOPs.
XXX: The hardware takes care of masking to cache lines, but need
to check behavior of the post increment. Should we be aligning
the value to the cache line before adding the cache line size, or
do we just add the cache line size ? */
if (op == 0)
{ /* PREFETCH */
mmu_check_cache_addr (cpu, preg, false, false);
}
else if (op == 1)
{ /* FLUSHINV */
mmu_check_cache_addr (cpu, preg, true, false);
}
else if (op == 2)
{ /* FLUSH */
mmu_check_cache_addr (cpu, preg, true, false);
}
else if (op == 3)
{ /* IFLUSH */
mmu_check_cache_addr (cpu, preg, false, true);
}
if (a)
SET_PREG (reg, preg + BFIN_L1_CACHE_BYTES);
}
static void
decode_PushPopReg_0 (SIM_CPU *cpu, bu16 iw0)
{
/* PushPopReg
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |.W.|.grp.......|.reg.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int W = ((iw0 >> PushPopReg_W_bits) & PushPopReg_W_mask);
int grp = ((iw0 >> PushPopReg_grp_bits) & PushPopReg_grp_mask);
int reg = ((iw0 >> PushPopReg_reg_bits) & PushPopReg_reg_mask);
const char *reg_name = get_allreg_name (grp, reg);
bu32 value;
bu32 sp = SPREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_PushPopReg);
TRACE_EXTRACT (cpu, "%s: W:%i grp:%i reg:%i", __func__, W, grp, reg);
TRACE_DECODE (cpu, "%s: reg:%s", __func__, reg_name);
/* Can't push/pop reserved registers */
if (reg_is_reserved (grp, reg))
illegal_instruction_or_combination (cpu);
if (W == 0)
{
/* Dreg and Preg are not supported by this instruction. */
if (grp == 0 || grp == 1)
illegal_instruction_or_combination (cpu);
TRACE_INSN (cpu, "%s = [SP++];", reg_name);
/* Can't pop USP while in userspace. */
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE
|| (grp == 7 && reg == 0 && cec_is_user_mode(cpu)))
illegal_instruction_combination (cpu);
/* XXX: The valid register check is in reg_write(), so we might
incorrectly do a GET_LONG() here ... */
value = GET_LONG (sp);
reg_write (cpu, grp, reg, value);
if (grp == 7 && reg == 3)
cec_pop_reti (cpu);
sp += 4;
}
else
{
TRACE_INSN (cpu, "[--SP] = %s;", reg_name);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
sp -= 4;
value = reg_read (cpu, grp, reg);
if (grp == 7 && reg == 3)
cec_push_reti (cpu);
PUT_LONG (sp, value);
}
/* Note: SP update must be delayed until after all reads/writes; see
comments in decode_PushPopMultiple_0() for more info. */
SET_SPREG (sp);
}
static void
decode_PushPopMultiple_0 (SIM_CPU *cpu, bu16 iw0)
{
/* PushPopMultiple
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 1 | 0 |.d.|.p.|.W.|.dr........|.pr........|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int p = ((iw0 >> PushPopMultiple_p_bits) & PushPopMultiple_p_mask);
int d = ((iw0 >> PushPopMultiple_d_bits) & PushPopMultiple_d_mask);
int W = ((iw0 >> PushPopMultiple_W_bits) & PushPopMultiple_W_mask);
int dr = ((iw0 >> PushPopMultiple_dr_bits) & PushPopMultiple_dr_mask);
int pr = ((iw0 >> PushPopMultiple_pr_bits) & PushPopMultiple_pr_mask);
int i;
bu32 sp = SPREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_PushPopMultiple);
TRACE_EXTRACT (cpu, "%s: d:%i p:%i W:%i dr:%i pr:%i",
__func__, d, p, W, dr, pr);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
if ((d == 0 && p == 0) || (p && imm5 (pr) > 5)
|| (d && !p && pr) || (p && !d && dr))
illegal_instruction (cpu);
if (W == 1)
{
if (d && p)
TRACE_INSN (cpu, "[--SP] = (R7:%i, P5:%i);", dr, pr);
else if (d)
TRACE_INSN (cpu, "[--SP] = (R7:%i);", dr);
else
TRACE_INSN (cpu, "[--SP] = (P5:%i);", pr);
if (d)
for (i = dr; i < 8; i++)
{
sp -= 4;
PUT_LONG (sp, DREG (i));
}
if (p)
for (i = pr; i < 6; i++)
{
sp -= 4;
PUT_LONG (sp, PREG (i));
}
CYCLE_DELAY = 14;
}
else
{
if (d && p)
TRACE_INSN (cpu, "(R7:%i, P5:%i) = [SP++];", dr, pr);
else if (d)
TRACE_INSN (cpu, "(R7:%i) = [SP++];", dr);
else
TRACE_INSN (cpu, "(P5:%i) = [SP++];", pr);
if (p)
for (i = 5; i >= pr; i--)
{
SET_PREG (i, GET_LONG (sp));
sp += 4;
}
if (d)
for (i = 7; i >= dr; i--)
{
SET_DREG (i, GET_LONG (sp));
sp += 4;
}
CYCLE_DELAY = 11;
}
/* Note: SP update must be delayed until after all reads/writes so that
if an exception does occur, the insn may be re-executed as the
SP has not yet changed. */
SET_SPREG (sp);
}
static void
decode_ccMV_0 (SIM_CPU *cpu, bu16 iw0)
{
/* ccMV
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 1 | 1 |.T.|.d.|.s.|.dst.......|.src.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int s = ((iw0 >> CCmv_s_bits) & CCmv_s_mask);
int d = ((iw0 >> CCmv_d_bits) & CCmv_d_mask);
int T = ((iw0 >> CCmv_T_bits) & CCmv_T_mask);
int src = ((iw0 >> CCmv_src_bits) & CCmv_src_mask);
int dst = ((iw0 >> CCmv_dst_bits) & CCmv_dst_mask);
int cond = T ? CCREG : ! CCREG;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ccMV);
TRACE_EXTRACT (cpu, "%s: T:%i d:%i s:%i dst:%i src:%i",
__func__, T, d, s, dst, src);
TRACE_INSN (cpu, "IF %sCC %s = %s;", T ? "" : "! ",
get_allreg_name (d, dst),
get_allreg_name (s, src));
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
if (cond)
reg_write (cpu, d, dst, reg_read (cpu, s, src));
}
static void
decode_CCflag_0 (SIM_CPU *cpu, bu16 iw0)
{
/* CCflag
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 1 |.I.|.opc.......|.G.|.y.........|.x.........|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int x = ((iw0 >> CCflag_x_bits) & CCflag_x_mask);
int y = ((iw0 >> CCflag_y_bits) & CCflag_y_mask);
int I = ((iw0 >> CCflag_I_bits) & CCflag_I_mask);
int G = ((iw0 >> CCflag_G_bits) & CCflag_G_mask);
int opc = ((iw0 >> CCflag_opc_bits) & CCflag_opc_mask);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_CCflag);
TRACE_EXTRACT (cpu, "%s: I:%i opc:%i G:%i y:%i x:%i",
__func__, I, opc, G, y, x);
if (opc > 4)
{
bs64 acc0 = get_extended_acc (cpu, 0);
bs64 acc1 = get_extended_acc (cpu, 1);
bs64 diff = acc0 - acc1;
if (x != 0 || y != 0)
illegal_instruction_or_combination (cpu);
if (opc == 5 && I == 0 && G == 0)
{
TRACE_INSN (cpu, "CC = A0 == A1;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (acc0 == acc1);
}
else if (opc == 6 && I == 0 && G == 0)
{
TRACE_INSN (cpu, "CC = A0 < A1");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (acc0 < acc1);
}
else if (opc == 7 && I == 0 && G == 0)
{
TRACE_INSN (cpu, "CC = A0 <= A1");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (acc0 <= acc1);
}
else
illegal_instruction_or_combination (cpu);
SET_ASTATREG (az, diff == 0);
SET_ASTATREG (an, diff < 0);
SET_ASTATREG (ac0, (bu40)acc1 <= (bu40)acc0);
}
else
{
int issigned = opc < 3;
const char *sign = issigned ? "" : " (IU)";
bu32 srcop = G ? PREG (x) : DREG (x);
char s = G ? 'P' : 'R';
bu32 dstop = I ? (issigned ? imm3 (y) : uimm3 (y)) : G ? PREG (y) : DREG (y);
const char *op;
char d = G ? 'P' : 'R';
int flgs = srcop >> 31;
int flgo = dstop >> 31;
bu32 result = srcop - dstop;
int cc;
int flgn = result >> 31;
int overflow = (flgs ^ flgo) & (flgn ^ flgs);
int az = result == 0;
int ac0 = dstop <= srcop;
int an;
if (issigned)
an = (flgn && !overflow) || (!flgn && overflow);
else
an = dstop > srcop;
switch (opc)
{
default: /* Shutup useless gcc warnings. */
case 0: /* signed */
op = "==";
cc = az;
break;
case 1: /* signed */
op = "<";
cc = an;
break;
case 2: /* signed */
op = "<=";
cc = an || az;
break;
case 3: /* unsigned */
op = "<";
cc = !ac0;
break;
case 4: /* unsigned */
op = "<=";
cc = !ac0 || az;
break;
}
if (I)
TRACE_INSN (cpu, "CC = %c%i %s %s%s;", s, x, op,
issigned ? imm3_str (y) : uimm3_str (y), sign);
else
{
TRACE_DECODE (cpu, "%s %c%i:%x %c%i:%x", __func__,
s, x, srcop, d, y, dstop);
TRACE_INSN (cpu, "CC = %c%i %s %c%i%s;", s, x, op, d, y, sign);
}
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (cc);
/* Pointer compares only touch CC. */
if (!G)
{
SET_ASTATREG (az, az);
SET_ASTATREG (an, an);
SET_ASTATREG (ac0, ac0);
}
}
}
static void
decode_CC2dreg_0 (SIM_CPU *cpu, bu16 iw0)
{
/* CC2dreg
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |.op....|.reg.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int op = ((iw0 >> CC2dreg_op_bits) & CC2dreg_op_mask);
int reg = ((iw0 >> CC2dreg_reg_bits) & CC2dreg_reg_mask);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_CC2dreg);
TRACE_EXTRACT (cpu, "%s: op:%i reg:%i", __func__, op, reg);
if (op == 0)
{
TRACE_INSN (cpu, "R%i = CC;", reg);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_DREG (reg, CCREG);
}
else if (op == 1)
{
TRACE_INSN (cpu, "CC = R%i;", reg);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (DREG (reg) != 0);
}
else if (op == 3 && reg == 0)
{
TRACE_INSN (cpu, "CC = !CC;");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
SET_CCREG (!CCREG);
}
else
illegal_instruction_or_combination (cpu);
}
static void
decode_CC2stat_0 (SIM_CPU *cpu, bu16 iw0)
{
/* CC2stat
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |.D.|.op....|.cbit..............|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int D = ((iw0 >> CC2stat_D_bits) & CC2stat_D_mask);
int op = ((iw0 >> CC2stat_op_bits) & CC2stat_op_mask);
int cbit = ((iw0 >> CC2stat_cbit_bits) & CC2stat_cbit_mask);
bu32 pval;
const char * const op_names[] = { "", "|", "&", "^" } ;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_CC2stat);
TRACE_EXTRACT (cpu, "%s: D:%i op:%i cbit:%i", __func__, D, op, cbit);
TRACE_INSN (cpu, "%s %s= %s;", D ? astat_names[cbit] : "CC",
op_names[op], D ? "CC" : astat_names[cbit]);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
/* CC = CC; is invalid. */
if (cbit == 5)
illegal_instruction (cpu);
pval = !!(ASTAT & (1 << cbit));
if (D == 0)
switch (op)
{
case 0: SET_CCREG (pval); break;
case 1: SET_CCREG (CCREG | pval); break;
case 2: SET_CCREG (CCREG & pval); break;
case 3: SET_CCREG (CCREG ^ pval); break;
}
else
{
switch (op)
{
case 0: pval = CCREG; break;
case 1: pval |= CCREG; break;
case 2: pval &= CCREG; break;
case 3: pval ^= CCREG; break;
}
TRACE_REGISTER (cpu, "wrote ASTAT[%s] = %i", astat_names[cbit], pval);
SET_ASTAT ((ASTAT & ~(1 << cbit)) | (pval << cbit));
}
}
static void
decode_BRCC_0 (SIM_CPU *cpu, bu16 iw0, bu32 pc)
{
/* BRCC
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 0 | 1 |.T.|.B.|.offset................................|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int B = ((iw0 >> BRCC_B_bits) & BRCC_B_mask);
int T = ((iw0 >> BRCC_T_bits) & BRCC_T_mask);
int offset = ((iw0 >> BRCC_offset_bits) & BRCC_offset_mask);
int cond = T ? CCREG : ! CCREG;
int pcrel = pcrel10 (offset);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_BRCC);
TRACE_EXTRACT (cpu, "%s: T:%i B:%i offset:%#x", __func__, T, B, offset);
TRACE_DECODE (cpu, "%s: pcrel10:%#x", __func__, pcrel);
TRACE_INSN (cpu, "IF %sCC JUMP %#x%s;", T ? "" : "! ",
pcrel, B ? " (bp)" : "");
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
if (cond)
{
bu32 newpc = pc + pcrel;
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "Conditional JUMP");
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = B ? 5 : 9;
}
else
{
PROFILE_BRANCH_UNTAKEN (cpu);
CYCLE_DELAY = B ? 9 : 1;
}
}
static void
decode_UJUMP_0 (SIM_CPU *cpu, bu16 iw0, bu32 pc)
{
/* UJUMP
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 1 | 0 |.offset........................................|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int offset = ((iw0 >> UJump_offset_bits) & UJump_offset_mask);
int pcrel = pcrel12 (offset);
bu32 newpc = pc + pcrel;
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_UJUMP);
TRACE_EXTRACT (cpu, "%s: offset:%#x", __func__, offset);
TRACE_DECODE (cpu, "%s: pcrel12:%#x", __func__, pcrel);
TRACE_INSN (cpu, "JUMP.S %#x;", pcrel);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
BFIN_TRACE_BRANCH (cpu, pc, newpc, -1, "JUMP.S");
SET_PCREG (newpc);
BFIN_CPU_STATE.did_jump = true;
PROFILE_BRANCH_TAKEN (cpu);
CYCLE_DELAY = 5;
}
static void
decode_REGMV_0 (SIM_CPU *cpu, bu16 iw0)
{
/* REGMV
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 0 | 1 | 1 |.gd........|.gs........|.dst.......|.src.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int gs = ((iw0 >> RegMv_gs_bits) & RegMv_gs_mask);
int gd = ((iw0 >> RegMv_gd_bits) & RegMv_gd_mask);
int src = ((iw0 >> RegMv_src_bits) & RegMv_src_mask);
int dst = ((iw0 >> RegMv_dst_bits) & RegMv_dst_mask);
const char *srcreg_name = get_allreg_name (gs, src);
const char *dstreg_name = get_allreg_name (gd, dst);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_REGMV);
TRACE_EXTRACT (cpu, "%s: gd:%i gs:%i dst:%i src:%i",
__func__, gd, gs, dst, src);
TRACE_DECODE (cpu, "%s: dst:%s src:%s", __func__, dstreg_name, srcreg_name);
TRACE_INSN (cpu, "%s = %s;", dstreg_name, srcreg_name);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
/* Reserved slots cannot be a src/dst. */
if (reg_is_reserved (gs, src) || reg_is_reserved (gd, dst))
goto invalid_move;
/* Standard register moves. */
if ((gs < 2) /* Dregs/Pregs src */
|| (gd < 2) /* Dregs/Pregs dst */
|| (gs == 4 && src < 4) /* Accumulators src */
|| (gd == 4 && dst < 4 && (gs < 4)) /* Accumulators dst */
|| (gs == 7 && src == 7 && !(gd == 4 && dst < 4)) /* EMUDAT src */
|| (gd == 7 && dst == 7)) /* EMUDAT dst */
goto valid_move;
/* dareg = dareg (IMBL) */
if (gs < 4 && gd < 4)
goto valid_move;
/* USP can be src to sysregs, but not dagregs. */
if ((gs == 7 && src == 0) && (gd >= 4))
goto valid_move;
/* USP can move between genregs (only check Accumulators). */
if (((gs == 7 && src == 0) && (gd == 4 && dst < 4))
|| ((gd == 7 && dst == 0) && (gs == 4 && src < 4)))
goto valid_move;
/* Still here ? Invalid reg pair. */
invalid_move:
illegal_instruction (cpu);
valid_move:
reg_write (cpu, gd, dst, reg_read (cpu, gs, src));
}
static void
decode_ALU2op_0 (SIM_CPU *cpu, bu16 iw0)
{
/* ALU2op
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 1 | 0 | 0 | 0 | 0 |.opc...........|.src.......|.dst.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int src = ((iw0 >> ALU2op_src_bits) & ALU2op_src_mask);
int opc = ((iw0 >> ALU2op_opc_bits) & ALU2op_opc_mask);
int dst = ((iw0 >> ALU2op_dst_bits) & ALU2op_dst_mask);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_ALU2op);
TRACE_EXTRACT (cpu, "%s: opc:%i src:%i dst:%i", __func__, opc, src, dst);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
if (opc == 0)
{
TRACE_INSN (cpu, "R%i >>>= R%i;", dst, src);
SET_DREG (dst, ashiftrt (cpu, DREG (dst), DREG (src), 32));
}
else if (opc == 1)
{
bu32 val;
TRACE_INSN (cpu, "R%i >>= R%i;", dst, src);
if (DREG (src) <= 0x1F)
val = lshiftrt (cpu, DREG (dst), DREG (src), 32);
else
val = 0;
SET_DREG (dst, val);
}
else if (opc == 2)
{
TRACE_INSN (cpu, "R%i <<= R%i;", dst, src);
SET_DREG (dst, lshift (cpu, DREG (dst), DREG (src), 32, 0, 0));
}
else if (opc == 3)
{
TRACE_INSN (cpu, "R%i *= R%i;", dst, src);
SET_DREG (dst, DREG (dst) * DREG (src));
CYCLE_DELAY = 3;
}
else if (opc == 4)
{
TRACE_INSN (cpu, "R%i = (R%i + R%i) << 1;", dst, dst, src);
SET_DREG (dst, add_and_shift (cpu, DREG (dst), DREG (src), 1));
}
else if (opc == 5)
{
TRACE_INSN (cpu, "R%i = (R%i + R%i) << 2;", dst, dst, src);
SET_DREG (dst, add_and_shift (cpu, DREG (dst), DREG (src), 2));
}
else if (opc == 8)
{
TRACE_INSN (cpu, "DIVQ ( R%i, R%i );", dst, src);
SET_DREG (dst, divq (cpu, DREG (dst), (bu16)DREG (src)));
}
else if (opc == 9)
{
TRACE_INSN (cpu, "DIVS ( R%i, R%i );", dst, src);
SET_DREG (dst, divs (cpu, DREG (dst), (bu16)DREG (src)));
}
else if (opc == 10)
{
TRACE_INSN (cpu, "R%i = R%i.L (X);", dst, src);
SET_DREG (dst, (bs32) (bs16) DREG (src));
setflags_logical (cpu, DREG (dst));
}
else if (opc == 11)
{
TRACE_INSN (cpu, "R%i = R%i.L (Z);", dst, src);
SET_DREG (dst, (bu32) (bu16) DREG (src));
setflags_logical (cpu, DREG (dst));
}
else if (opc == 12)
{
TRACE_INSN (cpu, "R%i = R%i.B (X);", dst, src);
SET_DREG (dst, (bs32) (bs8) DREG (src));
setflags_logical (cpu, DREG (dst));
}
else if (opc == 13)
{
TRACE_INSN (cpu, "R%i = R%i.B (Z);", dst, src);
SET_DREG (dst, (bu32) (bu8) DREG (src));
setflags_logical (cpu, DREG (dst));
}
else if (opc == 14)
{
bu32 val = DREG (src);
TRACE_INSN (cpu, "R%i = - R%i;", dst, src);
SET_DREG (dst, -val);
setflags_nz (cpu, DREG (dst));
SET_ASTATREG (v, val == 0x80000000);
if (ASTATREG (v))
SET_ASTATREG (vs, 1);
SET_ASTATREG (ac0, val == 0x0);
/* XXX: Documentation isn't entirely clear about av0 and av1. */
}
else if (opc == 15)
{
TRACE_INSN (cpu, "R%i = ~ R%i;", dst, src);
SET_DREG (dst, ~DREG (src));
setflags_logical (cpu, DREG (dst));
}
else
illegal_instruction (cpu);
}
static void
decode_PTR2op_0 (SIM_CPU *cpu, bu16 iw0)
{
/* PTR2op
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+
| 0 | 1 | 0 | 0 | 0 | 1 | 0 |.opc.......|.src.......|.dst.......|
+---+---+---+---|---+---+---+---|---+---+---+---|---+---+---+---+ */
int src = ((iw0 >> PTR2op_src_bits) & PTR2op_dst_mask);
int opc = ((iw0 >> PTR2op_opc_bits) & PTR2op_opc_mask);
int dst = ((iw0 >> PTR2op_dst_bits) & PTR2op_dst_mask);
const char *src_name = get_preg_name (src);
const char *dst_name = get_preg_name (dst);
PROFILE_COUNT_INSN (cpu, pc, BFIN_INSN_PTR2op);
TRACE_EXTRACT (cpu, "%s: opc:%i src:%i dst:%i", __func__, opc, src, dst);
if (PARALLEL_GROUP != BFIN_PARALLEL_NONE)
illegal_instruction_combination (cpu);
if (opc == 0)
{
TRACE_INSN (cpu, "%s -= %s", dst_name, src_name);
SET_PREG (dst, PREG (dst) - PREG (src));
}
else if (opc == 1)
{
TRACE_INSN (cpu, "%s = %s << 2", dst_name, src_name);
SET_PREG (dst, PREG (src) << 2);
}
else if (opc == 3)