| /* tc-arm.c -- Assemble for the ARM |
| Copyright (C) 1994-2024 Free Software Foundation, Inc. |
| Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) |
| Modified by David Taylor (dtaylor@armltd.co.uk) |
| Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com) |
| Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com) |
| Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com) |
| |
| This file is part of GAS, the GNU Assembler. |
| |
| GAS is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GAS is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GAS; see the file COPYING. If not, write to the Free |
| Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA |
| 02110-1301, USA. */ |
| |
| #include "as.h" |
| #include <limits.h> |
| #include <stdarg.h> |
| #define NO_RELOC 0 |
| #include "safe-ctype.h" |
| #include "subsegs.h" |
| #include "obstack.h" |
| #include "libiberty.h" |
| #include "opcode/arm.h" |
| #include "cpu-arm.h" |
| |
| #ifdef OBJ_ELF |
| #include "elf/arm.h" |
| #include "dw2gencfi.h" |
| #endif |
| |
| #include "dwarf2dbg.h" |
| |
| #ifdef OBJ_ELF |
| /* Must be at least the size of the largest unwind opcode (currently two). */ |
| #define ARM_OPCODE_CHUNK_SIZE 8 |
| |
| /* This structure holds the unwinding state. */ |
| |
| static struct |
| { |
| symbolS * proc_start; |
| symbolS * table_entry; |
| symbolS * personality_routine; |
| int personality_index; |
| /* The segment containing the function. */ |
| segT saved_seg; |
| subsegT saved_subseg; |
| /* Opcodes generated from this function. */ |
| unsigned char * opcodes; |
| int opcode_count; |
| int opcode_alloc; |
| /* The number of bytes pushed to the stack. */ |
| offsetT frame_size; |
| /* We don't add stack adjustment opcodes immediately so that we can merge |
| multiple adjustments. We can also omit the final adjustment |
| when using a frame pointer. */ |
| offsetT pending_offset; |
| /* These two fields are set by both unwind_movsp and unwind_setfp. They |
| hold the reg+offset to use when restoring sp from a frame pointer. */ |
| offsetT fp_offset; |
| int fp_reg; |
| /* Nonzero if an unwind_setfp directive has been seen. */ |
| unsigned fp_used:1; |
| /* Nonzero if the last opcode restores sp from fp_reg. */ |
| unsigned sp_restored:1; |
| } unwind; |
| |
| /* Whether --fdpic was given. */ |
| static int arm_fdpic; |
| |
| #endif /* OBJ_ELF */ |
| |
| /* Results from operand parsing worker functions. */ |
| |
| typedef enum |
| { |
| PARSE_OPERAND_SUCCESS, |
| PARSE_OPERAND_FAIL, |
| PARSE_OPERAND_FAIL_NO_BACKTRACK |
| } parse_operand_result; |
| |
| enum arm_float_abi |
| { |
| ARM_FLOAT_ABI_HARD, |
| ARM_FLOAT_ABI_SOFTFP, |
| ARM_FLOAT_ABI_SOFT |
| }; |
| |
| /* Types of processor to assemble for. */ |
| #ifndef CPU_DEFAULT |
| /* The code that was here used to select a default CPU depending on compiler |
| pre-defines which were only present when doing native builds, thus |
| changing gas' default behaviour depending upon the build host. |
| |
| If you have a target that requires a default CPU option then the you |
| should define CPU_DEFAULT here. */ |
| #endif |
| |
| /* Perform range checks on positive and negative overflows by checking if the |
| VALUE given fits within the range of an BITS sized immediate. */ |
| static bool out_of_range_p (offsetT value, offsetT bits) |
| { |
| gas_assert (bits < (offsetT)(sizeof (value) * 8)); |
| return (value & ~((1 << bits)-1)) |
| && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1)); |
| } |
| |
| #ifndef FPU_DEFAULT |
| # ifdef TE_LINUX |
| # define FPU_DEFAULT FPU_NONE |
| # elif defined (TE_NetBSD) |
| # ifdef OBJ_ELF |
| # define FPU_DEFAULT FPU_ARCH_SOFTVFP /* Soft-float, but VFP order. */ |
| # else |
| /* Legacy a.out format. */ |
| # define FPU_DEFAULT FPU_NONE /* Soft-float, no FPU. */ |
| # endif |
| # elif defined (TE_VXWORKS) |
| # define FPU_DEFAULT FPU_ARCH_SOFTVFP /* Soft-float, VFP order. */ |
| # else |
| /* For backwards compatibility, default to no-fpu so that we don't |
| get silent code changes of FP literal data. */ |
| # define FPU_DEFAULT FPU_NONE |
| # endif |
| #endif /* ifndef FPU_DEFAULT */ |
| |
| #define streq(a, b) (strcmp (a, b) == 0) |
| |
| /* Current set of feature bits available (CPU+FPU). Different from |
| selected_cpu + selected_fpu in case of autodetection since the CPU |
| feature bits are then all set. */ |
| static arm_feature_set cpu_variant; |
| /* Feature bits used in each execution state. Used to set build attribute |
| (in particular Tag_*_ISA_use) in CPU autodetection mode. */ |
| static arm_feature_set arm_arch_used; |
| static arm_feature_set thumb_arch_used; |
| |
| /* Flags stored in private area of BFD structure. */ |
| static int uses_apcs_26 = false; |
| static int atpcs = false; |
| static int support_interwork = false; |
| static int uses_apcs_float = false; |
| static int pic_code = false; |
| static int fix_v4bx = false; |
| /* Warn on using deprecated features. */ |
| static int warn_on_deprecated = true; |
| static int warn_on_restrict_it = false; |
| |
| /* Understand CodeComposer Studio assembly syntax. */ |
| bool codecomposer_syntax = false; |
| |
| /* Variables that we set while parsing command-line options. Once all |
| options have been read we re-process these values to set the real |
| assembly flags. */ |
| |
| /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1 |
| instead of -mcpu=arm1). */ |
| static const arm_feature_set *legacy_cpu = NULL; |
| static const arm_feature_set *legacy_fpu = NULL; |
| |
| /* CPU, extension and FPU feature bits selected by -mcpu. */ |
| static const arm_feature_set *mcpu_cpu_opt = NULL; |
| static arm_feature_set *mcpu_ext_opt = NULL; |
| static const arm_feature_set *mcpu_fpu_opt = NULL; |
| |
| /* CPU, extension and FPU feature bits selected by -march. */ |
| static const arm_feature_set *march_cpu_opt = NULL; |
| static arm_feature_set *march_ext_opt = NULL; |
| static const arm_feature_set *march_fpu_opt = NULL; |
| |
| /* Feature bits selected by -mfpu. */ |
| static const arm_feature_set *mfpu_opt = NULL; |
| |
| /* Constants for known architecture features. */ |
| static const arm_feature_set fpu_default = FPU_DEFAULT; |
| static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1; |
| static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; |
| static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3; |
| static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1; |
| static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; |
| static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; |
| |
| #ifdef CPU_DEFAULT |
| static const arm_feature_set cpu_default = CPU_DEFAULT; |
| #endif |
| |
| static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1); |
| static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2); |
| static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S); |
| static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3); |
| static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M); |
| static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4); |
| static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T); |
| static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5); |
| static const arm_feature_set arm_ext_v4t_5 = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5); |
| static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T); |
| static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E); |
| static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP); |
| static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J); |
| static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6); |
| static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K); |
| static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2); |
| /* Only for compatability of hint instructions. */ |
| static const arm_feature_set arm_ext_v6k_v6t2 = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2); |
| static const arm_feature_set arm_ext_v6_notm = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM); |
| static const arm_feature_set arm_ext_v6_dsp = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP); |
| static const arm_feature_set arm_ext_barrier = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER); |
| static const arm_feature_set arm_ext_msr = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR); |
| static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV); |
| static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7); |
| static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A); |
| static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R); |
| static const arm_feature_set arm_ext_v8r = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R); |
| #ifdef OBJ_ELF |
| static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M); |
| #endif |
| static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8); |
| static const arm_feature_set arm_ext_m = |
| ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M, |
| ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN); |
| static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP); |
| static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC); |
| static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS); |
| static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV); |
| static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT); |
| static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN); |
| static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M); |
| static const arm_feature_set arm_ext_v8m_main = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN); |
| static const arm_feature_set arm_ext_v8_1m_main = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN); |
| /* Instructions in ARMv8-M only found in M profile architectures. */ |
| static const arm_feature_set arm_ext_v8m_m_only = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN); |
| static const arm_feature_set arm_ext_v6t2_v8m = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M); |
| /* Instructions shared between ARMv8-A and ARMv8-M. */ |
| static const arm_feature_set arm_ext_atomics = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS); |
| #ifdef OBJ_ELF |
| /* DSP instructions Tag_DSP_extension refers to. */ |
| static const arm_feature_set arm_ext_dsp = |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP); |
| #endif |
| static const arm_feature_set arm_ext_ras = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS); |
| /* FP16 instructions. */ |
| static const arm_feature_set arm_ext_fp16 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST); |
| static const arm_feature_set arm_ext_fp16_fml = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML); |
| static const arm_feature_set arm_ext_v8_2 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A); |
| static const arm_feature_set arm_ext_v8_3 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A); |
| static const arm_feature_set arm_ext_sb = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB); |
| static const arm_feature_set arm_ext_predres = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES); |
| static const arm_feature_set arm_ext_bf16 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16); |
| static const arm_feature_set arm_ext_i8mm = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM); |
| static const arm_feature_set arm_ext_crc = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC); |
| static const arm_feature_set arm_ext_cde = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE); |
| static const arm_feature_set arm_ext_cde0 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0); |
| static const arm_feature_set arm_ext_cde1 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1); |
| static const arm_feature_set arm_ext_cde2 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2); |
| static const arm_feature_set arm_ext_cde3 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3); |
| static const arm_feature_set arm_ext_cde4 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4); |
| static const arm_feature_set arm_ext_cde5 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5); |
| static const arm_feature_set arm_ext_cde6 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6); |
| static const arm_feature_set arm_ext_cde7 = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7); |
| |
| static const arm_feature_set arm_arch_any = ARM_ANY; |
| static const arm_feature_set fpu_any = FPU_ANY; |
| static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1); |
| static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; |
| static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; |
| |
| static const arm_feature_set arm_cext_iwmmxt2 = |
| ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2); |
| static const arm_feature_set arm_cext_iwmmxt = |
| ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT); |
| static const arm_feature_set arm_cext_xscale = |
| ARM_FEATURE_COPROC (ARM_CEXT_XSCALE); |
| static const arm_feature_set fpu_vfp_ext_v1xd = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD); |
| static const arm_feature_set fpu_vfp_ext_v1 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_V1); |
| static const arm_feature_set fpu_vfp_ext_v2 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_V2); |
| static const arm_feature_set fpu_vfp_ext_v3xd = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD); |
| static const arm_feature_set fpu_vfp_ext_v3 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_V3); |
| static const arm_feature_set fpu_vfp_ext_d32 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_D32); |
| static const arm_feature_set fpu_neon_ext_v1 = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_V1); |
| static const arm_feature_set fpu_vfp_v3_or_neon_ext = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); |
| static const arm_feature_set mve_ext = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE); |
| static const arm_feature_set mve_fp_ext = |
| ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP); |
| /* Note: This has more than one bit set, which means using it with |
| mark_feature_used (which returns if *any* of the bits are set in the current |
| cpu variant) can give surprising results. */ |
| static const arm_feature_set armv8m_fp = |
| ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16); |
| #ifdef OBJ_ELF |
| static const arm_feature_set fpu_vfp_fp16 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16); |
| static const arm_feature_set fpu_neon_ext_fma = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA); |
| #endif |
| static const arm_feature_set fpu_vfp_ext_fma = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA); |
| static const arm_feature_set fpu_vfp_ext_armv8 = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8); |
| static const arm_feature_set fpu_vfp_ext_armv8xd = |
| ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD); |
| static const arm_feature_set fpu_neon_ext_armv8 = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8); |
| static const arm_feature_set fpu_crypto_ext_armv8 = |
| ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8); |
| static const arm_feature_set fpu_neon_ext_v8_1 = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA); |
| static const arm_feature_set fpu_neon_ext_dotprod = |
| ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD); |
| static const arm_feature_set pacbti_ext = |
| ARM_FEATURE_CORE_HIGH_HIGH (ARM_EXT3_PACBTI); |
| |
| static int mfloat_abi_opt = -1; |
| /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch |
| directive. */ |
| static arm_feature_set selected_arch = ARM_ARCH_NONE; |
| /* Extension feature bits selected by the last -mcpu/-march or .arch_extension |
| directive. */ |
| static arm_feature_set selected_ext = ARM_ARCH_NONE; |
| /* Feature bits selected by the last -mcpu/-march or by the combination of the |
| last .cpu/.arch directive .arch_extension directives since that |
| directive. */ |
| static arm_feature_set selected_cpu = ARM_ARCH_NONE; |
| /* FPU feature bits selected by the last -mfpu or .fpu directive. */ |
| static arm_feature_set selected_fpu = FPU_NONE; |
| /* Feature bits selected by the last .object_arch directive. */ |
| static arm_feature_set selected_object_arch = ARM_ARCH_NONE; |
| /* Must be long enough to hold any of the names in arm_cpus. */ |
| static const struct arm_ext_table * selected_ctx_ext_table = NULL; |
| static char selected_cpu_name[20]; |
| |
| extern FLONUM_TYPE generic_floating_point_number; |
| |
| /* Return if no cpu was selected on command-line. */ |
| static bool |
| no_cpu_selected (void) |
| { |
| return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none); |
| } |
| |
| #ifdef OBJ_ELF |
| # ifdef EABI_DEFAULT |
| static int meabi_flags = EABI_DEFAULT; |
| # else |
| static int meabi_flags = EF_ARM_EABI_UNKNOWN; |
| # endif |
| |
| static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES]; |
| |
| bool |
| arm_is_eabi (void) |
| { |
| return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4); |
| } |
| #endif |
| |
| #ifdef OBJ_ELF |
| /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ |
| symbolS * GOT_symbol; |
| #endif |
| |
| /* 0: assemble for ARM, |
| 1: assemble for Thumb, |
| 2: assemble for Thumb even though target CPU does not support thumb |
| instructions. */ |
| static int thumb_mode = 0; |
| /* A value distinct from the possible values for thumb_mode that we |
| can use to record whether thumb_mode has been copied into the |
| tc_frag_data field of a frag. */ |
| #define MODE_RECORDED (1 << 4) |
| |
| /* Specifies the intrinsic IT insn behavior mode. */ |
| enum implicit_it_mode |
| { |
| IMPLICIT_IT_MODE_NEVER = 0x00, |
| IMPLICIT_IT_MODE_ARM = 0x01, |
| IMPLICIT_IT_MODE_THUMB = 0x02, |
| IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB) |
| }; |
| static int implicit_it_mode = IMPLICIT_IT_MODE_ARM; |
| |
| /* If unified_syntax is true, we are processing the new unified |
| ARM/Thumb syntax. Important differences from the old ARM mode: |
| |
| - Immediate operands do not require a # prefix. |
| - Conditional affixes always appear at the end of the |
| instruction. (For backward compatibility, those instructions |
| that formerly had them in the middle, continue to accept them |
| there.) |
| - The IT instruction may appear, and if it does is validated |
| against subsequent conditional affixes. It does not generate |
| machine code. |
| |
| Important differences from the old Thumb mode: |
| |
| - Immediate operands do not require a # prefix. |
| - Most of the V6T2 instructions are only available in unified mode. |
| - The .N and .W suffixes are recognized and honored (it is an error |
| if they cannot be honored). |
| - All instructions set the flags if and only if they have an 's' affix. |
| - Conditional affixes may be used. They are validated against |
| preceding IT instructions. Unlike ARM mode, you cannot use a |
| conditional affix except in the scope of an IT instruction. */ |
| |
| static bool unified_syntax = false; |
| |
| /* An immediate operand can start with #, and ld*, st*, pld operands |
| can contain [ and ]. We need to tell APP not to elide whitespace |
| before a [, which can appear as the first operand for pld. |
| Likewise, a { can appear as the first operand for push, pop, vld*, etc. */ |
| const char arm_symbol_chars[] = "#[]{}"; |
| |
| enum neon_el_type |
| { |
| NT_invtype, |
| NT_untyped, |
| NT_integer, |
| NT_float, |
| NT_poly, |
| NT_signed, |
| NT_bfloat, |
| NT_unsigned |
| }; |
| |
| struct neon_type_el |
| { |
| enum neon_el_type type; |
| unsigned size; |
| }; |
| |
| #define NEON_MAX_TYPE_ELS 5 |
| |
| struct neon_type |
| { |
| struct neon_type_el el[NEON_MAX_TYPE_ELS]; |
| unsigned elems; |
| }; |
| |
| enum pred_instruction_type |
| { |
| OUTSIDE_PRED_INSN, |
| INSIDE_VPT_INSN, |
| INSIDE_IT_INSN, |
| INSIDE_IT_LAST_INSN, |
| IF_INSIDE_IT_LAST_INSN, /* Either outside or inside; |
| if inside, should be the last one. */ |
| NEUTRAL_IT_INSN, /* This could be either inside or outside, |
| i.e. BKPT and NOP. */ |
| IT_INSN, /* The IT insn has been parsed. */ |
| VPT_INSN, /* The VPT/VPST insn has been parsed. */ |
| MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without |
| a predication code. */ |
| MVE_UNPREDICABLE_INSN, /* MVE instruction that is non-predicable. */ |
| }; |
| |
| /* The maximum number of operands we need. */ |
| #define ARM_IT_MAX_OPERANDS 6 |
| #define ARM_IT_MAX_RELOCS 3 |
| |
| struct arm_it |
| { |
| const char * error; |
| unsigned long instruction; |
| unsigned int size; |
| unsigned int size_req; |
| unsigned int cond; |
| /* "uncond_value" is set to the value in place of the conditional field in |
| unconditional versions of the instruction, or -1u if nothing is |
| appropriate. */ |
| unsigned int uncond_value; |
| struct neon_type vectype; |
| /* This does not indicate an actual NEON instruction, only that |
| the mnemonic accepts neon-style type suffixes. */ |
| int is_neon; |
| /* Set to the opcode if the instruction needs relaxation. |
| Zero if the instruction is not relaxed. */ |
| unsigned long relax; |
| struct |
| { |
| bfd_reloc_code_real_type type; |
| expressionS exp; |
| int pc_rel; |
| } relocs[ARM_IT_MAX_RELOCS]; |
| |
| enum pred_instruction_type pred_insn_type; |
| |
| struct |
| { |
| unsigned reg; |
| signed int imm; |
| struct neon_type_el vectype; |
| unsigned present : 1; /* Operand present. */ |
| unsigned isreg : 1; /* Operand was a register. */ |
| unsigned immisreg : 2; /* .imm field is a second register. |
| 0: imm, 1: gpr, 2: MVE Q-register. */ |
| unsigned isscalar : 2; /* Operand is a (SIMD) scalar: |
| 0) not scalar, |
| 1) Neon scalar, |
| 2) MVE scalar. */ |
| unsigned immisalign : 1; /* Immediate is an alignment specifier. */ |
| unsigned immisfloat : 1; /* Immediate was parsed as a float. */ |
| /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV |
| instructions. This allows us to disambiguate ARM <-> vector insns. */ |
| unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ |
| unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ |
| unsigned isquad : 1; /* Operand is SIMD quad register. */ |
| unsigned issingle : 1; /* Operand is VFP single-precision register. */ |
| unsigned iszr : 1; /* Operand is ZR register. */ |
| unsigned hasreloc : 1; /* Operand has relocation suffix. */ |
| unsigned writeback : 1; /* Operand has trailing ! */ |
| unsigned preind : 1; /* Preindexed address. */ |
| unsigned postind : 1; /* Postindexed address. */ |
| unsigned negative : 1; /* Index register was negated. */ |
| unsigned shifted : 1; /* Shift applied to operation. */ |
| unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */ |
| } operands[ARM_IT_MAX_OPERANDS]; |
| }; |
| |
| static struct arm_it inst; |
| |
| #define NUM_FLOAT_VALS 8 |
| |
| const char * fp_const[] = |
| { |
| "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 |
| }; |
| |
| LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; |
| |
| #define FAIL (-1) |
| #define SUCCESS (0) |
| |
| #define SUFF_S 1 |
| #define SUFF_D 2 |
| #define SUFF_E 3 |
| #define SUFF_P 4 |
| |
| #define CP_T_X 0x00008000 |
| #define CP_T_Y 0x00400000 |
| |
| #define CONDS_BIT 0x00100000 |
| #define LOAD_BIT 0x00100000 |
| |
| #define DOUBLE_LOAD_FLAG 0x00000001 |
| |
| struct asm_cond |
| { |
| const char * template_name; |
| unsigned long value; |
| }; |
| |
| #define COND_ALWAYS 0xE |
| |
| struct asm_psr |
| { |
| const char * template_name; |
| unsigned long field; |
| }; |
| |
| struct asm_barrier_opt |
| { |
| const char * template_name; |
| unsigned long value; |
| const arm_feature_set arch; |
| }; |
| |
| /* The bit that distinguishes CPSR and SPSR. */ |
| #define SPSR_BIT (1 << 22) |
| |
| /* The individual PSR flag bits. */ |
| #define PSR_c (1 << 16) |
| #define PSR_x (1 << 17) |
| #define PSR_s (1 << 18) |
| #define PSR_f (1 << 19) |
| |
| struct reloc_entry |
| { |
| const char * name; |
| bfd_reloc_code_real_type reloc; |
| }; |
| |
| enum vfp_reg_pos |
| { |
| VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn, |
| VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn |
| }; |
| |
| enum vfp_ldstm_type |
| { |
| VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX |
| }; |
| |
| /* Bits for DEFINED field in neon_typed_alias. */ |
| #define NTA_HASTYPE 1 |
| #define NTA_HASINDEX 2 |
| |
| struct neon_typed_alias |
| { |
| unsigned char defined; |
| unsigned char index; |
| struct neon_type_el eltype; |
| }; |
| |
| /* ARM register categories. This includes coprocessor numbers and various |
| architecture extensions' registers. Each entry should have an error message |
| in reg_expected_msgs below. */ |
| enum arm_reg_type |
| { |
| REG_TYPE_RN, |
| REG_TYPE_CP, |
| REG_TYPE_CN, |
| REG_TYPE_FN, |
| REG_TYPE_VFS, |
| REG_TYPE_VFD, |
| REG_TYPE_NQ, |
| REG_TYPE_VFSD, |
| REG_TYPE_NDQ, |
| REG_TYPE_NSD, |
| REG_TYPE_NSDQ, |
| REG_TYPE_VFC, |
| REG_TYPE_MVF, |
| REG_TYPE_MVD, |
| REG_TYPE_MVFX, |
| REG_TYPE_MVDX, |
| REG_TYPE_MVAX, |
| REG_TYPE_MQ, |
| REG_TYPE_DSPSC, |
| REG_TYPE_MMXWR, |
| REG_TYPE_MMXWC, |
| REG_TYPE_MMXWCG, |
| REG_TYPE_XSCALE, |
| REG_TYPE_RNB, |
| REG_TYPE_ZR, |
| REG_TYPE_PSEUDO |
| }; |
| |
| /* Structure for a hash table entry for a register. |
| If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra |
| information which states whether a vector type or index is specified (for a |
| register alias created with .dn or .qn). Otherwise NEON should be NULL. */ |
| struct reg_entry |
| { |
| const char * name; |
| unsigned int number; |
| unsigned char type; |
| unsigned char builtin; |
| struct neon_typed_alias * neon; |
| }; |
| |
| /* Diagnostics used when we don't get a register of the expected type. */ |
| const char * const reg_expected_msgs[] = |
| { |
| [REG_TYPE_RN] = N_("ARM register expected"), |
| [REG_TYPE_CP] = N_("bad or missing co-processor number"), |
| [REG_TYPE_CN] = N_("co-processor register expected"), |
| [REG_TYPE_VFS] = N_("VFP single precision register expected"), |
| [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"), |
| [REG_TYPE_NQ] = N_("Neon quad precision register expected"), |
| [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"), |
| [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"), |
| [REG_TYPE_NSD] = N_("Neon single or double precision register expected"), |
| [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register" |
| " expected"), |
| [REG_TYPE_VFC] = N_("VFP system register expected"), |
| [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"), |
| [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"), |
| [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"), |
| [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"), |
| [REG_TYPE_MQ] = N_("MVE vector register expected"), |
| [REG_TYPE_RNB] = "", |
| [REG_TYPE_ZR] = N_("ZR register expected"), |
| [REG_TYPE_PSEUDO] = N_("Pseudo register expected"), |
| }; |
| |
| /* Some well known registers that we refer to directly elsewhere. */ |
| #define REG_R12 12 |
| #define REG_SP 13 |
| #define REG_LR 14 |
| #define REG_PC 15 |
| #define REG_RA_AUTH_CODE 143 |
| |
| /* ARM instructions take 4bytes in the object file, Thumb instructions |
| take 2: */ |
| #define INSN_SIZE 4 |
| |
| struct asm_opcode |
| { |
| /* Basic string to match. */ |
| const char * template_name; |
| |
| /* Parameters to instruction. */ |
| unsigned int operands[8]; |
| |
| /* Conditional tag - see opcode_lookup. */ |
| unsigned int tag : 4; |
| |
| /* Basic instruction code. */ |
| unsigned int avalue; |
| |
| /* Thumb-format instruction code. */ |
| unsigned int tvalue; |
| |
| /* Which architecture variant provides this instruction. */ |
| const arm_feature_set * avariant; |
| const arm_feature_set * tvariant; |
| |
| /* Function to call to encode instruction in ARM format. */ |
| void (* aencode) (void); |
| |
| /* Function to call to encode instruction in Thumb format. */ |
| void (* tencode) (void); |
| |
| /* Indicates whether this instruction may be vector predicated. */ |
| unsigned int mayBeVecPred : 1; |
| }; |
| |
| /* Defines for various bits that we will want to toggle. */ |
| #define INST_IMMEDIATE 0x02000000 |
| #define OFFSET_REG 0x02000000 |
| #define HWOFFSET_IMM 0x00400000 |
| #define SHIFT_BY_REG 0x00000010 |
| #define PRE_INDEX 0x01000000 |
| #define INDEX_UP 0x00800000 |
| #define WRITE_BACK 0x00200000 |
| #define LDM_TYPE_2_OR_3 0x00400000 |
| #define CPSI_MMOD 0x00020000 |
| |
| #define LITERAL_MASK 0xf000f000 |
| #define OPCODE_MASK 0xfe1fffff |
| #define V4_STR_BIT 0x00000020 |
| #define VLDR_VMOV_SAME 0x0040f000 |
| |
| #define T2_SUBS_PC_LR 0xf3de8f00 |
| |
| #define DATA_OP_SHIFT 21 |
| #define SBIT_SHIFT 20 |
| |
| #define T2_OPCODE_MASK 0xfe1fffff |
| #define T2_DATA_OP_SHIFT 21 |
| #define T2_SBIT_SHIFT 20 |
| |
| #define A_COND_MASK 0xf0000000 |
| #define A_PUSH_POP_OP_MASK 0x0fff0000 |
| |
| /* Opcodes for pushing/poping registers to/from the stack. */ |
| #define A1_OPCODE_PUSH 0x092d0000 |
| #define A2_OPCODE_PUSH 0x052d0004 |
| #define A2_OPCODE_POP 0x049d0004 |
| |
| /* Codes to distinguish the arithmetic instructions. */ |
| #define OPCODE_AND 0 |
| #define OPCODE_EOR 1 |
| #define OPCODE_SUB 2 |
| #define OPCODE_RSB 3 |
| #define OPCODE_ADD 4 |
| #define OPCODE_ADC 5 |
| #define OPCODE_SBC 6 |
| #define OPCODE_RSC 7 |
| #define OPCODE_TST 8 |
| #define OPCODE_TEQ 9 |
| #define OPCODE_CMP 10 |
| #define OPCODE_CMN 11 |
| #define OPCODE_ORR 12 |
| #define OPCODE_MOV 13 |
| #define OPCODE_BIC 14 |
| #define OPCODE_MVN 15 |
| |
| #define T2_OPCODE_AND 0 |
| #define T2_OPCODE_BIC 1 |
| #define T2_OPCODE_ORR 2 |
| #define T2_OPCODE_ORN 3 |
| #define T2_OPCODE_EOR 4 |
| #define T2_OPCODE_ADD 8 |
| #define T2_OPCODE_ADC 10 |
| #define T2_OPCODE_SBC 11 |
| #define T2_OPCODE_SUB 13 |
| #define T2_OPCODE_RSB 14 |
| |
| #define T_OPCODE_MUL 0x4340 |
| #define T_OPCODE_TST 0x4200 |
| #define T_OPCODE_CMN 0x42c0 |
| #define T_OPCODE_NEG 0x4240 |
| #define T_OPCODE_MVN 0x43c0 |
| |
| #define T_OPCODE_ADD_R3 0x1800 |
| #define T_OPCODE_SUB_R3 0x1a00 |
| #define T_OPCODE_ADD_HI 0x4400 |
| #define T_OPCODE_ADD_ST 0xb000 |
| #define T_OPCODE_SUB_ST 0xb080 |
| #define T_OPCODE_ADD_SP 0xa800 |
| #define T_OPCODE_ADD_PC 0xa000 |
| #define T_OPCODE_ADD_I8 0x3000 |
| #define T_OPCODE_SUB_I8 0x3800 |
| #define T_OPCODE_ADD_I3 0x1c00 |
| #define T_OPCODE_SUB_I3 0x1e00 |
| |
| #define T_OPCODE_ASR_R 0x4100 |
| #define T_OPCODE_LSL_R 0x4080 |
| #define T_OPCODE_LSR_R 0x40c0 |
| #define T_OPCODE_ROR_R 0x41c0 |
| #define T_OPCODE_ASR_I 0x1000 |
| #define T_OPCODE_LSL_I 0x0000 |
| #define T_OPCODE_LSR_I 0x0800 |
| |
| #define T_OPCODE_MOV_I8 0x2000 |
| #define T_OPCODE_CMP_I8 0x2800 |
| #define T_OPCODE_CMP_LR 0x4280 |
| #define T_OPCODE_MOV_HR 0x4600 |
| #define T_OPCODE_CMP_HR 0x4500 |
| |
| #define T_OPCODE_LDR_PC 0x4800 |
| #define T_OPCODE_LDR_SP 0x9800 |
| #define T_OPCODE_STR_SP 0x9000 |
| #define T_OPCODE_LDR_IW 0x6800 |
| #define T_OPCODE_STR_IW 0x6000 |
| #define T_OPCODE_LDR_IH 0x8800 |
| #define T_OPCODE_STR_IH 0x8000 |
| #define T_OPCODE_LDR_IB 0x7800 |
| #define T_OPCODE_STR_IB 0x7000 |
| #define T_OPCODE_LDR_RW 0x5800 |
| #define T_OPCODE_STR_RW 0x5000 |
| #define T_OPCODE_LDR_RH 0x5a00 |
| #define T_OPCODE_STR_RH 0x5200 |
| #define T_OPCODE_LDR_RB 0x5c00 |
| #define T_OPCODE_STR_RB 0x5400 |
| |
| #define T_OPCODE_PUSH 0xb400 |
| #define T_OPCODE_POP 0xbc00 |
| |
| #define T_OPCODE_BRANCH 0xe000 |
| |
| #define THUMB_SIZE 2 /* Size of thumb instruction. */ |
| #define THUMB_PP_PC_LR 0x0100 |
| #define THUMB_LOAD_BIT 0x0800 |
| #define THUMB2_LOAD_BIT 0x00100000 |
| |
| #define BAD_SYNTAX _("syntax error") |
| #define BAD_ARGS _("bad arguments to instruction") |
| #define BAD_SP _("r13 not allowed here") |
| #define BAD_PC _("r15 not allowed here") |
| #define BAD_ODD _("Odd register not allowed here") |
| #define BAD_EVEN _("Even register not allowed here") |
| #define BAD_COND _("instruction cannot be conditional") |
| #define BAD_OVERLAP _("registers may not be the same") |
| #define BAD_HIREG _("lo register required") |
| #define BAD_THUMB32 _("instruction not supported in Thumb16 mode") |
| #define BAD_ADDR_MODE _("instruction does not accept this addressing mode") |
| #define BAD_BRANCH _("branch must be last instruction in IT block") |
| #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2") |
| #define BAD_NO_VPT _("instruction not allowed in VPT block") |
| #define BAD_NOT_IT _("instruction not allowed in IT block") |
| #define BAD_NOT_VPT _("instruction missing MVE vector predication code") |
| #define BAD_FPU _("selected FPU does not support instruction") |
| #define BAD_OUT_IT _("thumb conditional instruction should be in IT block") |
| #define BAD_OUT_VPT \ |
| _("vector predicated instruction should be in VPT/VPST block") |
| #define BAD_IT_COND _("incorrect condition in IT block") |
| #define BAD_VPT_COND _("incorrect condition in VPT/VPST block") |
| #define BAD_IT_IT _("IT falling in the range of a previous IT block") |
| #define MISSING_FNSTART _("missing .fnstart before unwinding directive") |
| #define BAD_PC_ADDRESSING \ |
| _("cannot use register index with PC-relative addressing") |
| #define BAD_PC_WRITEBACK \ |
| _("cannot use writeback with PC-relative addressing") |
| #define BAD_RANGE _("branch out of range") |
| #define BAD_FP16 _("selected processor does not support fp16 instruction") |
| #define BAD_BF16 _("selected processor does not support bf16 instruction") |
| #define BAD_CDE _("selected processor does not support cde instruction") |
| #define BAD_CDE_COPROC _("coprocessor for insn is not enabled for cde") |
| #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour") |
| #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only") |
| #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \ |
| "block") |
| #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \ |
| "block") |
| #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \ |
| " operand") |
| #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \ |
| " operand") |
| #define BAD_SIMD_TYPE _("bad type in SIMD instruction") |
| #define BAD_MVE_AUTO \ |
| _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \ |
| " use a valid -march or -mcpu option.") |
| #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\ |
| "and source operands makes instruction UNPREDICTABLE") |
| #define BAD_EL_TYPE _("bad element type for instruction") |
| #define MVE_BAD_QREG _("MVE vector register Q[0..7] expected") |
| #define BAD_PACBTI _("selected processor does not support PACBTI extention") |
| |
| static htab_t arm_ops_hsh; |
| static htab_t arm_cond_hsh; |
| static htab_t arm_vcond_hsh; |
| static htab_t arm_shift_hsh; |
| static htab_t arm_psr_hsh; |
| static htab_t arm_v7m_psr_hsh; |
| static htab_t arm_reg_hsh; |
| static htab_t arm_reloc_hsh; |
| static htab_t arm_barrier_opt_hsh; |
| |
| /* Stuff needed to resolve the label ambiguity |
| As: |
| ... |
| label: <insn> |
| may differ from: |
| ... |
| label: |
| <insn> */ |
| |
| symbolS * last_label_seen; |
| static int label_is_thumb_function_name = false; |
| |
| /* Literal pool structure. Held on a per-section |
| and per-sub-section basis. */ |
| |
| #define MAX_LITERAL_POOL_SIZE 1024 |
| typedef struct literal_pool |
| { |
| expressionS literals [MAX_LITERAL_POOL_SIZE]; |
| unsigned int next_free_entry; |
| unsigned int id; |
| symbolS * symbol; |
| segT section; |
| subsegT sub_section; |
| #ifdef OBJ_ELF |
| struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE]; |
| #endif |
| struct literal_pool * next; |
| unsigned int alignment; |
| } literal_pool; |
| |
| /* Pointer to a linked list of literal pools. */ |
| literal_pool * list_of_pools = NULL; |
| |
| typedef enum asmfunc_states |
| { |
| OUTSIDE_ASMFUNC, |
| WAITING_ASMFUNC_NAME, |
| WAITING_ENDASMFUNC |
| } asmfunc_states; |
| |
| static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC; |
| |
| #ifdef OBJ_ELF |
| # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred |
| #else |
| static struct current_pred now_pred; |
| #endif |
| |
| static inline int |
| now_pred_compatible (int cond) |
| { |
| return (cond & ~1) == (now_pred.cc & ~1); |
| } |
| |
| static inline int |
| conditional_insn (void) |
| { |
| return inst.cond != COND_ALWAYS; |
| } |
| |
| static int in_pred_block (void); |
| |
| static int handle_pred_state (void); |
| |
| static void force_automatic_it_block_close (void); |
| |
| static void it_fsm_post_encode (void); |
| |
| #define set_pred_insn_type(type) \ |
| do \ |
| { \ |
| inst.pred_insn_type = type; \ |
| if (handle_pred_state () == FAIL) \ |
| return; \ |
| } \ |
| while (0) |
| |
| #define set_pred_insn_type_nonvoid(type, failret) \ |
| do \ |
| { \ |
| inst.pred_insn_type = type; \ |
| if (handle_pred_state () == FAIL) \ |
| return failret; \ |
| } \ |
| while(0) |
| |
| #define set_pred_insn_type_last() \ |
| do \ |
| { \ |
| if (inst.cond == COND_ALWAYS) \ |
| set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \ |
| else \ |
| set_pred_insn_type (INSIDE_IT_LAST_INSN); \ |
| } \ |
| while (0) |
| |
| /* Toggle value[pos]. */ |
| #define TOGGLE_BIT(value, pos) (value ^ (1 << pos)) |
| |
| /* Pure syntax. */ |
| |
| /* This array holds the chars that always start a comment. If the |
| pre-processor is disabled, these aren't very useful. */ |
| char arm_comment_chars[] = "@"; |
| |
| /* This array holds the chars that only start a comment at the beginning of |
| a line. If the line seems to have the form '# 123 filename' |
| .line and .file directives will appear in the pre-processed output. */ |
| /* Note that input_file.c hand checks for '#' at the beginning of the |
| first line of the input file. This is because the compiler outputs |
| #NO_APP at the beginning of its output. */ |
| /* Also note that comments like this one will always work. */ |
| const char line_comment_chars[] = "#"; |
| |
| char arm_line_separator_chars[] = ";"; |
| |
| /* Chars that can be used to separate mant |
| from exp in floating point numbers. */ |
| const char EXP_CHARS[] = "eE"; |
| |
| /* Chars that mean this number is a floating point constant. */ |
| /* As in 0f12.456 */ |
| /* or 0d1.2345e12 */ |
| |
| const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh"; |
| |
| /* Prefix characters that indicate the start of an immediate |
| value. */ |
| #define is_immediate_prefix(C) ((C) == '#' || (C) == '$') |
| |
| /* Separator character handling. */ |
| |
| #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) |
| |
| enum fp_16bit_format |
| { |
| ARM_FP16_FORMAT_IEEE = 0x1, |
| ARM_FP16_FORMAT_ALTERNATIVE = 0x2, |
| ARM_FP16_FORMAT_DEFAULT = 0x3 |
| }; |
| |
| static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT; |
| |
| |
| static inline int |
| skip_past_char (char ** str, char c) |
| { |
| /* PR gas/14987: Allow for whitespace before the expected character. */ |
| skip_whitespace (*str); |
| |
| if (**str == c) |
| { |
| (*str)++; |
| return SUCCESS; |
| } |
| else |
| return FAIL; |
| } |
| |
| #define skip_past_comma(str) skip_past_char (str, ',') |
| |
| /* Arithmetic expressions (possibly involving symbols). */ |
| |
| /* Return TRUE if anything in the expression is a bignum. */ |
| |
| static bool |
| walk_no_bignums (symbolS * sp) |
| { |
| if (symbol_get_value_expression (sp)->X_op == O_big) |
| return true; |
| |
| if (symbol_get_value_expression (sp)->X_add_symbol) |
| { |
| return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol) |
| || (symbol_get_value_expression (sp)->X_op_symbol |
| && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol))); |
| } |
| |
| return false; |
| } |
| |
| static bool in_my_get_expression = false; |
| |
| /* Third argument to my_get_expression. */ |
| #define GE_NO_PREFIX 0 |
| #define GE_IMM_PREFIX 1 |
| #define GE_OPT_PREFIX 2 |
| /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit) |
| immediates, as can be used in Neon VMVN and VMOV immediate instructions. */ |
| #define GE_OPT_PREFIX_BIG 3 |
| |
| static int |
| my_get_expression (expressionS * ep, char ** str, int prefix_mode) |
| { |
| char * save_in; |
| |
| /* In unified syntax, all prefixes are optional. */ |
| if (unified_syntax) |
| prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode |
| : GE_OPT_PREFIX; |
| |
| switch (prefix_mode) |
| { |
| case GE_NO_PREFIX: break; |
| case GE_IMM_PREFIX: |
| if (!is_immediate_prefix (**str)) |
| { |
| inst.error = _("immediate expression requires a # prefix"); |
| return FAIL; |
| } |
| (*str)++; |
| break; |
| case GE_OPT_PREFIX: |
| case GE_OPT_PREFIX_BIG: |
| if (is_immediate_prefix (**str)) |
| (*str)++; |
| break; |
| default: |
| abort (); |
| } |
| |
| memset (ep, 0, sizeof (expressionS)); |
| |
| save_in = input_line_pointer; |
| input_line_pointer = *str; |
| in_my_get_expression = true; |
| expression (ep); |
| in_my_get_expression = false; |
| |
| if (ep->X_op == O_illegal || ep->X_op == O_absent) |
| { |
| /* We found a bad or missing expression in md_operand(). */ |
| *str = input_line_pointer; |
| input_line_pointer = save_in; |
| if (inst.error == NULL) |
| inst.error = (ep->X_op == O_absent |
| ? _("missing expression") :_("bad expression")); |
| return 1; |
| } |
| |
| /* Get rid of any bignums now, so that we don't generate an error for which |
| we can't establish a line number later on. Big numbers are never valid |
| in instructions, which is where this routine is always called. */ |
| if (prefix_mode != GE_OPT_PREFIX_BIG |
| && (ep->X_op == O_big |
| || (ep->X_add_symbol |
| && (walk_no_bignums (ep->X_add_symbol) |
| || (ep->X_op_symbol |
| && walk_no_bignums (ep->X_op_symbol)))))) |
| { |
| inst.error = _("invalid constant"); |
| *str = input_line_pointer; |
| input_line_pointer = save_in; |
| return 1; |
| } |
| |
| *str = input_line_pointer; |
| input_line_pointer = save_in; |
| return SUCCESS; |
| } |
| |
| /* Turn a string in input_line_pointer into a floating point constant |
| of type TYPE, and store the appropriate bytes in *LITP. The number |
| of LITTLENUMS emitted is stored in *SIZEP. An error message is |
| returned, or NULL on OK. |
| |
| Note that fp constants aren't represent in the normal way on the ARM. |
| In big endian mode, things are as expected. However, in little endian |
| mode fp constants are big-endian word-wise, and little-endian byte-wise |
| within the words. For example, (double) 1.1 in big endian mode is |
| the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is |
| the byte sequence 99 99 f1 3f 9a 99 99 99. |
| |
| ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ |
| |
| const char * |
| md_atof (int type, char * litP, int * sizeP) |
| { |
| int prec; |
| LITTLENUM_TYPE words[MAX_LITTLENUMS]; |
| char *t; |
| int i; |
| |
| switch (type) |
| { |
| case 'H': |
| case 'h': |
| /* bfloat16, despite not being part of the IEEE specification, can also |
| be handled by atof_ieee(). */ |
| case 'b': |
| prec = 1; |
| break; |
| |
| case 'f': |
| case 'F': |
| case 's': |
| case 'S': |
| prec = 2; |
| break; |
| |
| case 'd': |
| case 'D': |
| case 'r': |
| case 'R': |
| prec = 4; |
| break; |
| |
| case 'x': |
| case 'X': |
| prec = 5; |
| break; |
| |
| case 'p': |
| case 'P': |
| prec = 5; |
| break; |
| |
| default: |
| *sizeP = 0; |
| return _("Unrecognized or unsupported floating point constant"); |
| } |
| |
| t = atof_ieee (input_line_pointer, type, words); |
| if (t) |
| input_line_pointer = t; |
| *sizeP = prec * sizeof (LITTLENUM_TYPE); |
| |
| if (target_big_endian || prec == 1) |
| for (i = 0; i < prec; i++) |
| { |
| md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE)); |
| litP += sizeof (LITTLENUM_TYPE); |
| } |
| else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) |
| for (i = prec - 1; i >= 0; i--) |
| { |
| md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE)); |
| litP += sizeof (LITTLENUM_TYPE); |
| } |
| else |
| /* For a 4 byte float the order of elements in `words' is 1 0. |
| For an 8 byte float the order is 1 0 3 2. */ |
| for (i = 0; i < prec; i += 2) |
| { |
| md_number_to_chars (litP, (valueT) words[i + 1], |
| sizeof (LITTLENUM_TYPE)); |
| md_number_to_chars (litP + sizeof (LITTLENUM_TYPE), |
| (valueT) words[i], sizeof (LITTLENUM_TYPE)); |
| litP += 2 * sizeof (LITTLENUM_TYPE); |
| } |
| |
| return NULL; |
| } |
| |
| /* We handle all bad expressions here, so that we can report the faulty |
| instruction in the error message. */ |
| |
| void |
| md_operand (expressionS * exp) |
| { |
| if (in_my_get_expression) |
| exp->X_op = O_illegal; |
| } |
| |
| /* Immediate values. */ |
| |
| #ifdef OBJ_ELF |
| /* Generic immediate-value read function for use in directives. |
| Accepts anything that 'expression' can fold to a constant. |
| *val receives the number. */ |
| |
| static int |
| immediate_for_directive (int *val) |
| { |
| expressionS exp; |
| exp.X_op = O_illegal; |
| |
| if (is_immediate_prefix (*input_line_pointer)) |
| { |
| input_line_pointer++; |
| expression (&exp); |
| } |
| |
| if (exp.X_op != O_constant) |
| { |
| as_bad (_("expected #constant")); |
| ignore_rest_of_line (); |
| return FAIL; |
| } |
| *val = exp.X_add_number; |
| return SUCCESS; |
| } |
| #endif |
| |
| /* Register parsing. */ |
| |
| /* Generic register parser. CCP points to what should be the |
| beginning of a register name. If it is indeed a valid register |
| name, advance CCP over it and return the reg_entry structure; |
| otherwise return NULL. Does not issue diagnostics. */ |
| |
| static struct reg_entry * |
| arm_reg_parse_multi (char **ccp) |
| { |
| char *start = *ccp; |
| char *p; |
| struct reg_entry *reg; |
| |
| skip_whitespace (start); |
| |
| #ifdef REGISTER_PREFIX |
| if (*start != REGISTER_PREFIX) |
| return NULL; |
| start++; |
| #endif |
| #ifdef OPTIONAL_REGISTER_PREFIX |
| if (*start == OPTIONAL_REGISTER_PREFIX) |
| start++; |
| #endif |
| |
| p = start; |
| if (!ISALPHA (*p) || !is_name_beginner (*p)) |
| return NULL; |
| |
| do |
| p++; |
| while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); |
| |
| reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start); |
| |
| if (!reg) |
| return NULL; |
| |
| *ccp = p; |
| return reg; |
| } |
| |
| static int |
| arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg, |
| enum arm_reg_type type) |
| { |
| /* Alternative syntaxes are accepted for a few register classes. */ |
| switch (type) |
| { |
| case REG_TYPE_MVF: |
| case REG_TYPE_MVD: |
| case REG_TYPE_MVFX: |
| case REG_TYPE_MVDX: |
| /* Generic coprocessor register names are allowed for these. */ |
| if (reg && reg->type == REG_TYPE_CN) |
| return reg->number; |
| break; |
| |
| case REG_TYPE_CP: |
| /* For backward compatibility, a bare number is valid here. */ |
| { |
| unsigned long processor = strtoul (start, ccp, 10); |
| if (*ccp != start && processor <= 15) |
| return processor; |
| } |
| /* Fall through. */ |
| |
| case REG_TYPE_MMXWC: |
| /* WC includes WCG. ??? I'm not sure this is true for all |
| instructions that take WC registers. */ |
| if (reg && reg->type == REG_TYPE_MMXWCG) |
| return reg->number; |
| break; |
| |
| default: |
| break; |
| } |
| |
| return FAIL; |
| } |
| |
| /* As arm_reg_parse_multi, but the register must be of type TYPE, and the |
| return value is the register number or FAIL. */ |
| |
| static int |
| arm_reg_parse (char **ccp, enum arm_reg_type type) |
| { |
| char *start = *ccp; |
| struct reg_entry *reg = arm_reg_parse_multi (ccp); |
| int ret; |
| |
| /* Do not allow a scalar (reg+index) to parse as a register. */ |
| if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX)) |
| return FAIL; |
| |
| if (reg && reg->type == type) |
| return reg->number; |
| |
| if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL) |
| return ret; |
| |
| *ccp = start; |
| return FAIL; |
| } |
| |
| /* Parse a Neon type specifier. *STR should point at the leading '.' |
| character. Does no verification at this stage that the type fits the opcode |
| properly. E.g., |
| |
| .i32.i32.s16 |
| .s32.f32 |
| .u16 |
| |
| Can all be legally parsed by this function. |
| |
| Fills in neon_type struct pointer with parsed information, and updates STR |
| to point after the parsed type specifier. Returns SUCCESS if this was a legal |
| type, FAIL if not. */ |
| |
| static int |
| parse_neon_type (struct neon_type *type, char **str) |
| { |
| char *ptr = *str; |
| |
| if (type) |
| type->elems = 0; |
| |
| while (type->elems < NEON_MAX_TYPE_ELS) |
| { |
| enum neon_el_type thistype = NT_untyped; |
| unsigned thissize = -1u; |
| |
| if (*ptr != '.') |
| break; |
| |
| ptr++; |
| |
| /* Just a size without an explicit type. */ |
| if (ISDIGIT (*ptr)) |
| goto parsesize; |
| |
| switch (TOLOWER (*ptr)) |
| { |
| case 'i': thistype = NT_integer; break; |
| case 'f': thistype = NT_float; break; |
| case 'p': thistype = NT_poly; break; |
| case 's': thistype = NT_signed; break; |
| case 'u': thistype = NT_unsigned; break; |
| case 'd': |
| thistype = NT_float; |
| thissize = 64; |
| ptr++; |
| goto done; |
| case 'b': |
| thistype = NT_bfloat; |
| switch (TOLOWER (*(++ptr))) |
| { |
| case 'f': |
| ptr += 1; |
| thissize = strtoul (ptr, &ptr, 10); |
| if (thissize != 16) |
| { |
| as_bad (_("bad size %d in type specifier"), thissize); |
| return FAIL; |
| } |
| goto done; |
| case '0': case '1': case '2': case '3': case '4': |
| case '5': case '6': case '7': case '8': case '9': |
| case ' ': case '.': |
| as_bad (_("unexpected type character `b' -- did you mean `bf'?")); |
| return FAIL; |
| default: |
| break; |
| } |
| break; |
| default: |
| as_bad (_("unexpected character `%c' in type specifier"), *ptr); |
| return FAIL; |
| } |
| |
| ptr++; |
| |
| /* .f is an abbreviation for .f32. */ |
| if (thistype == NT_float && !ISDIGIT (*ptr)) |
| thissize = 32; |
| else |
| { |
| parsesize: |
| thissize = strtoul (ptr, &ptr, 10); |
| |
| if (thissize != 8 && thissize != 16 && thissize != 32 |
| && thissize != 64) |
| { |
| as_bad (_("bad size %d in type specifier"), thissize); |
| return FAIL; |
| } |
| } |
| |
| done: |
| if (type) |
| { |
| type->el[type->elems].type = thistype; |
| type->el[type->elems].size = thissize; |
| type->elems++; |
| } |
| } |
| |
| /* Empty/missing type is not a successful parse. */ |
| if (type->elems == 0) |
| return FAIL; |
| |
| *str = ptr; |
| |
| return SUCCESS; |
| } |
| |
| /* Errors may be set multiple times during parsing or bit encoding |
| (particularly in the Neon bits), but usually the earliest error which is set |
| will be the most meaningful. Avoid overwriting it with later (cascading) |
| errors by calling this function. */ |
| |
| static void |
| first_error (const char *err) |
| { |
| if (!inst.error) |
| inst.error = err; |
| } |
| |
| /* Parse a single type, e.g. ".s32", leading period included. */ |
| static int |
| parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) |
| { |
| char *str = *ccp; |
| struct neon_type optype; |
| |
| if (*str == '.') |
| { |
| if (parse_neon_type (&optype, &str) == SUCCESS) |
| { |
| if (optype.elems == 1) |
| *vectype = optype.el[0]; |
| else |
| { |
| first_error (_("only one type should be specified for operand")); |
| return FAIL; |
| } |
| } |
| else |
| { |
| first_error (_("vector type expected")); |
| return FAIL; |
| } |
| } |
| else |
| return FAIL; |
| |
| *ccp = str; |
| |
| return SUCCESS; |
| } |
| |
| /* Special meanings for indices (which have a range of 0-7), which will fit into |
| a 4-bit integer. */ |
| |
| #define NEON_ALL_LANES 15 |
| #define NEON_INTERLEAVE_LANES 14 |
| |
| /* Record a use of the given feature. */ |
| static void |
| record_feature_use (const arm_feature_set *feature) |
| { |
| if (thumb_mode) |
| ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature); |
| else |
| ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature); |
| } |
| |
| /* If the given feature available in the selected CPU, mark it as used. |
| Returns TRUE iff feature is available. */ |
| static bool |
| mark_feature_used (const arm_feature_set *feature) |
| { |
| |
| /* Do not support the use of MVE only instructions when in auto-detection or |
| -march=all. */ |
| if (((feature == &mve_ext) || (feature == &mve_fp_ext)) |
| && ARM_CPU_IS_ANY (cpu_variant)) |
| { |
| first_error (BAD_MVE_AUTO); |
| return false; |
| } |
| /* Ensure the option is valid on the current architecture. */ |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature)) |
| return false; |
| |
| /* Add the appropriate architecture feature for the barrier option used. |
| */ |
| record_feature_use (feature); |
| |
| return true; |
| } |
| |
| /* Parse either a register or a scalar, with an optional type. Return the |
| register number, and optionally fill in the actual type of the register |
| when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and |
| type/index information in *TYPEINFO. */ |
| |
| static int |
| parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, |
| enum arm_reg_type *rtype, |
| struct neon_typed_alias *typeinfo) |
| { |
| char *str = *ccp; |
| struct reg_entry *reg = arm_reg_parse_multi (&str); |
| struct neon_typed_alias atype; |
| struct neon_type_el parsetype; |
| |
| atype.defined = 0; |
| atype.index = -1; |
| atype.eltype.type = NT_invtype; |
| atype.eltype.size = -1; |
| |
| /* Try alternate syntax for some types of register. Note these are mutually |
| exclusive with the Neon syntax extensions. */ |
| if (reg == NULL) |
| { |
| int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type); |
| if (altreg != FAIL) |
| *ccp = str; |
| if (typeinfo) |
| *typeinfo = atype; |
| return altreg; |
| } |
| |
| /* Undo polymorphism when a set of register types may be accepted. */ |
| if ((type == REG_TYPE_NDQ |
| && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD)) |
| || (type == REG_TYPE_VFSD |
| && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) |
| || (type == REG_TYPE_NSDQ |
| && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD |
| || reg->type == REG_TYPE_NQ)) |
| || (type == REG_TYPE_NSD |
| && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) |
| || (type == REG_TYPE_MMXWC |
| && (reg->type == REG_TYPE_MMXWCG))) |
| type = (enum arm_reg_type) reg->type; |
| |
| if (type == REG_TYPE_MQ) |
| { |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) |
| return FAIL; |
| |
| if (!reg || reg->type != REG_TYPE_NQ) |
| return FAIL; |
| |
| if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32)) |
| { |
| first_error (_("expected MVE register [q0..q7]")); |
| return FAIL; |
| } |
| type = REG_TYPE_NQ; |
| } |
| else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) |
| && (type == REG_TYPE_NQ)) |
| return FAIL; |
| |
| |
| if (type != reg->type) |
| return FAIL; |
| |
| if (reg->neon) |
| atype = *reg->neon; |
| |
| if (parse_neon_operand_type (&parsetype, &str) == SUCCESS) |
| { |
| if ((atype.defined & NTA_HASTYPE) != 0) |
| { |
| first_error (_("can't redefine type for operand")); |
| return FAIL; |
| } |
| atype.defined |= NTA_HASTYPE; |
| atype.eltype = parsetype; |
| } |
| |
| if (skip_past_char (&str, '[') == SUCCESS) |
| { |
| if (type != REG_TYPE_VFD |
| && !(type == REG_TYPE_VFS |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)) |
| && !(type == REG_TYPE_NQ |
| && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))) |
| { |
| if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) |
| first_error (_("only D and Q registers may be indexed")); |
| else |
| first_error (_("only D registers may be indexed")); |
| return FAIL; |
| } |
| |
| if ((atype.defined & NTA_HASINDEX) != 0) |
| { |
| first_error (_("can't change index for operand")); |
| return FAIL; |
| } |
| |
| atype.defined |= NTA_HASINDEX; |
| |
| if (skip_past_char (&str, ']') == SUCCESS) |
| atype.index = NEON_ALL_LANES; |
| else |
| { |
| expressionS exp; |
| |
| my_get_expression (&exp, &str, GE_NO_PREFIX); |
| |
| if (exp.X_op != O_constant) |
| { |
| first_error (_("constant expression required")); |
| return FAIL; |
| } |
| |
| if (skip_past_char (&str, ']') == FAIL) |
| return FAIL; |
| |
| atype.index = exp.X_add_number; |
| } |
| } |
| |
| if (typeinfo) |
| *typeinfo = atype; |
| |
| if (rtype) |
| *rtype = type; |
| |
| *ccp = str; |
| |
| return reg->number; |
| } |
| |
| /* Like arm_reg_parse, but also allow the following extra features: |
| - If RTYPE is non-zero, return the (possibly restricted) type of the |
| register (e.g. Neon double or quad reg when either has been requested). |
| - If this is a Neon vector type with additional type information, fill |
| in the struct pointed to by VECTYPE (if non-NULL). |
| This function will fault on encountering a scalar. */ |
| |
| static int |
| arm_typed_reg_parse (char **ccp, enum arm_reg_type type, |
| enum arm_reg_type *rtype, struct neon_type_el *vectype) |
| { |
| struct neon_typed_alias atype; |
| char *str = *ccp; |
| int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype); |
| |
| if (reg == FAIL) |
| return FAIL; |
| |
| /* Do not allow regname(... to parse as a register. */ |
| if (*str == '(') |
| return FAIL; |
| |
| /* Do not allow a scalar (reg+index) to parse as a register. */ |
| if ((atype.defined & NTA_HASINDEX) != 0) |
| { |
| first_error (_("register operand expected, but got scalar")); |
| return FAIL; |
| } |
| |
| if (vectype) |
| *vectype = atype.eltype; |
| |
| *ccp = str; |
| |
| return reg; |
| } |
| |
| #define NEON_SCALAR_REG(X) ((X) >> 4) |
| #define NEON_SCALAR_INDEX(X) ((X) & 15) |
| |
| /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't |
| have enough information to be able to do a good job bounds-checking. So, we |
| just do easy checks here, and do further checks later. */ |
| |
| static int |
| parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum |
| arm_reg_type reg_type) |
| { |
| int reg; |
| char *str = *ccp; |
| struct neon_typed_alias atype; |
| unsigned reg_size; |
| |
| reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype); |
| |
| switch (reg_type) |
| { |
| case REG_TYPE_VFS: |
| reg_size = 32; |
| break; |
| case REG_TYPE_VFD: |
| reg_size = 64; |
| break; |
| case REG_TYPE_MQ: |
| reg_size = 128; |
| break; |
| default: |
| gas_assert (0); |
| return FAIL; |
| } |
| |
| if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0) |
| return FAIL; |
| |
| if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES) |
| { |
| first_error (_("scalar must have an index")); |
| return FAIL; |
| } |
| else if (atype.index >= reg_size / elsize) |
| { |
| first_error (_("scalar index out of range")); |
| return FAIL; |
| } |
| |
| if (type) |
| *type = atype.eltype; |
| |
| *ccp = str; |
| |
| return reg * 16 + atype.index; |
| } |
| |
| /* Types of registers in a list. */ |
| |
| enum reg_list_els |
| { |
| REGLIST_RN, |
| REGLIST_PSEUDO, |
| REGLIST_CLRM, |
| REGLIST_VFP_S, |
| REGLIST_VFP_S_VPR, |
| REGLIST_VFP_D, |
| REGLIST_VFP_D_VPR, |
| REGLIST_NEON_D |
| }; |
| |
| /* Parse an ARM register list. Returns the bitmask, or FAIL. */ |
| |
| static long |
| parse_reg_list (char ** strp, enum reg_list_els etype) |
| { |
| char *str = *strp; |
| long range = 0; |
| int another_range; |
| |
| gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM |
| || etype == REGLIST_PSEUDO); |
| |
| /* We come back here if we get ranges concatenated by '+' or '|'. */ |
| do |
| { |
| skip_whitespace (str); |
| |
| another_range = 0; |
| |
| if (*str == '{') |
| { |
| int in_range = 0; |
| int cur_reg = -1; |
| |
| str++; |
| do |
| { |
| int reg; |
| const char apsr_str[] = "apsr"; |
| int apsr_str_len = strlen (apsr_str); |
| enum arm_reg_type rt; |
| |
| if (etype == REGLIST_RN || etype == REGLIST_CLRM) |
| rt = REG_TYPE_RN; |
| else |
| rt = REG_TYPE_PSEUDO; |
| |
| reg = arm_reg_parse (&str, rt); |
| |
| if (etype == REGLIST_CLRM) |
| { |
| if (reg == REG_SP || reg == REG_PC) |
| reg = FAIL; |
| else if (reg == FAIL |
| && !strncasecmp (str, apsr_str, apsr_str_len) |
| && !ISALPHA (*(str + apsr_str_len))) |
| { |
| reg = 15; |
| str += apsr_str_len; |
| } |
| |
| if (reg == FAIL) |
| { |
| first_error (_("r0-r12, lr or APSR expected")); |
| return FAIL; |
| } |
| } |
| else if (etype == REGLIST_PSEUDO) |
| { |
| if (reg == FAIL) |
| { |
| first_error (_(reg_expected_msgs[REG_TYPE_PSEUDO])); |
| return FAIL; |
| } |
| } |
| else /* etype == REGLIST_RN. */ |
| { |
| if (reg == FAIL) |
| { |
| first_error (_(reg_expected_msgs[REGLIST_RN])); |
| return FAIL; |
| } |
| } |
| |
| if (in_range) |
| { |
| int i; |
| |
| if (reg <= cur_reg) |
| { |
| first_error (_("bad range in register list")); |
| return FAIL; |
| } |
| |
| for (i = cur_reg + 1; i < reg; i++) |
| { |
| if (range & (1 << i)) |
| as_tsktsk |
| (_("Warning: duplicated register (r%d) in register list"), |
| i); |
| else |
| range |= 1 << i; |
| } |
| in_range = 0; |
| } |
| |
| if (range & (1 << reg)) |
| as_tsktsk (_("Warning: duplicated register (r%d) in register list"), |
| reg); |
| else if (reg <= cur_reg) |
| as_tsktsk (_("Warning: register range not in ascending order")); |
| |
| range |= 1 << reg; |
| cur_reg = reg; |
| } |
| while (skip_past_comma (&str) != FAIL |
| || (in_range = 1, *str++ == '-')); |
| str--; |
| |
| if (skip_past_char (&str, '}') == FAIL) |
| { |
| first_error (_("missing `}'")); |
| return FAIL; |
| } |
| } |
| else if (etype == REGLIST_RN) |
| { |
| expressionS exp; |
| |
| if (my_get_expression (&exp, &str, GE_NO_PREFIX)) |
| return FAIL; |
| |
| if (exp.X_op == O_constant) |
| { |
| if (exp.X_add_number |
| != (exp.X_add_number & 0x0000ffff)) |
| { |
| inst.error = _("invalid register mask"); |
| return FAIL; |
| } |
| |
| if ((range & exp.X_add_number) != 0) |
| { |
| int regno = range & exp.X_add_number; |
| |
| regno &= -regno; |
| regno = (1 << regno) - 1; |
| as_tsktsk |
| (_("Warning: duplicated register (r%d) in register list"), |
| regno); |
| } |
| |
| range |= exp.X_add_number; |
| } |
| else |
| { |
| if (inst.relocs[0].type != 0) |
| { |
| inst.error = _("expression too complex"); |
| return FAIL; |
| } |
| |
| memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS)); |
| inst.relocs[0].type = BFD_RELOC_ARM_MULTI; |
| inst.relocs[0].pc_rel = 0; |
| } |
| } |
| |
| if (*str == '|' || *str == '+') |
| { |
| str++; |
| another_range = 1; |
| } |
| } |
| while (another_range); |
| |
| *strp = str; |
| return range; |
| } |
| |
| /* Parse a VFP register list. If the string is invalid return FAIL. |
| Otherwise return the number of registers, and set PBASE to the first |
| register. Parses registers of type ETYPE. |
| If REGLIST_NEON_D is used, several syntax enhancements are enabled: |
| - Q registers can be used to specify pairs of D registers |
| - { } can be omitted from around a singleton register list |
| FIXME: This is not implemented, as it would require backtracking in |
| some cases, e.g.: |
| vtbl.8 d3,d4,d5 |
| This could be done (the meaning isn't really ambiguous), but doesn't |
| fit in well with the current parsing framework. |
| - 32 D registers may be used (also true for VFPv3). |
| FIXME: Types are ignored in these register lists, which is probably a |
| bug. */ |
| |
| static int |
| parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype, |
| bool *partial_match) |
| { |
| char *str = *ccp; |
| int base_reg; |
| int new_base; |
| enum arm_reg_type regtype = (enum arm_reg_type) 0; |
| int max_regs = 0; |
| int count = 0; |
| int warned = 0; |
| unsigned long mask = 0; |
| int i; |
| bool vpr_seen = false; |
| bool expect_vpr = |
| (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR); |
| |
| if (skip_past_char (&str, '{') == FAIL) |
| { |
| inst.error = _("expecting {"); |
| return FAIL; |
| } |
| |
| switch (etype) |
| { |
| case REGLIST_VFP_S: |
| case REGLIST_VFP_S_VPR: |
| regtype = REG_TYPE_VFS; |
| max_regs = 32; |
| break; |
| |
| case REGLIST_VFP_D: |
| case REGLIST_VFP_D_VPR: |
| regtype = REG_TYPE_VFD; |
| break; |
| |
| case REGLIST_NEON_D: |
| regtype = REG_TYPE_NDQ; |
| break; |
| |
| default: |
| gas_assert (0); |
| } |
| |
| if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR) |
| { |
| /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */ |
| if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32)) |
| { |
| max_regs = 32; |
| if (thumb_mode) |
| ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| fpu_vfp_ext_d32); |
| else |
| ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, |
| fpu_vfp_ext_d32); |
| } |
| else |
| max_regs = 16; |
| } |
| |
| base_reg = max_regs; |
| *partial_match = false; |
| |
| do |
| { |
| unsigned int setmask = 1, addregs = 1; |
| const char vpr_str[] = "vpr"; |
| size_t vpr_str_len = strlen (vpr_str); |
| |
| new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); |
| |
| if (expect_vpr) |
| { |
| if (new_base == FAIL |
| && !strncasecmp (str, vpr_str, vpr_str_len) |
| && !ISALPHA (*(str + vpr_str_len)) |
| && !vpr_seen) |
| { |
| vpr_seen = true; |
| str += vpr_str_len; |
| if (count == 0) |
| base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */ |
| } |
| else if (vpr_seen) |
| { |
| first_error (_("VPR expected last")); |
| return FAIL; |
| } |
| else if (new_base == FAIL) |
| { |
| if (regtype == REG_TYPE_VFS) |
| first_error (_("VFP single precision register or VPR " |
| "expected")); |
| else /* regtype == REG_TYPE_VFD. */ |
| first_error (_("VFP/Neon double precision register or VPR " |
| "expected")); |
| return FAIL; |
| } |
| } |
| else if (new_base == FAIL) |
| { |
| first_error (_(reg_expected_msgs[regtype])); |
| return FAIL; |
| } |
| |
| *partial_match = true; |
| if (vpr_seen) |
| continue; |
| |
| if (new_base >= max_regs) |
| { |
| first_error (_("register out of range in list")); |
| return FAIL; |
| } |
| |
| /* Note: a value of 2 * n is returned for the register Q<n>. */ |
| if (regtype == REG_TYPE_NQ) |
| { |
| setmask = 3; |
| addregs = 2; |
| } |
| |
| if (new_base < base_reg) |
| base_reg = new_base; |
| |
| if (mask & (setmask << new_base)) |
| { |
| first_error (_("invalid register list")); |
| return FAIL; |
| } |
| |
| if ((mask >> new_base) != 0 && ! warned && !vpr_seen) |
| { |
| as_tsktsk (_("register list not in ascending order")); |
| warned = 1; |
| } |
| |
| mask |= setmask << new_base; |
| count += addregs; |
| |
| if (*str == '-') /* We have the start of a range expression */ |
| { |
| int high_range; |
| |
| str++; |
| |
| if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL)) |
| == FAIL) |
| { |
| inst.error = gettext (reg_expected_msgs[regtype]); |
| return FAIL; |
| } |
| |
| if (high_range >= max_regs) |
| { |
| first_error (_("register out of range in list")); |
| return FAIL; |
| } |
| |
| if (regtype == REG_TYPE_NQ) |
| high_range = high_range + 1; |
| |
| if (high_range <= new_base) |
| { |
| inst.error = _("register range not in ascending order"); |
| return FAIL; |
| } |
| |
| for (new_base += addregs; new_base <= high_range; new_base += addregs) |
| { |
| if (mask & (setmask << new_base)) |
| { |
| inst.error = _("invalid register list"); |
| return FAIL; |
| } |
| |
| mask |= setmask << new_base; |
| count += addregs; |
| } |
| } |
| } |
| while (skip_past_comma (&str) != FAIL); |
| |
| str++; |
| |
| /* Sanity check -- should have raised a parse error above. */ |
| if ((!vpr_seen && count == 0) || count > max_regs) |
| abort (); |
| |
| *pbase = base_reg; |
| |
| if (expect_vpr && !vpr_seen) |
| { |
| first_error (_("VPR expected last")); |
| return FAIL; |
| } |
| |
| /* Final test -- the registers must be consecutive. */ |
| mask >>= base_reg; |
| for (i = 0; i < count; i++) |
| { |
| if ((mask & (1u << i)) == 0) |
| { |
| inst.error = _("non-contiguous register range"); |
| return FAIL; |
| } |
| } |
| |
| *ccp = str; |
| |
| return count; |
| } |
| |
| /* True if two alias types are the same. */ |
| |
| static bool |
| neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b) |
| { |
| if (!a && !b) |
| return true; |
| |
| if (!a || !b) |
| return false; |
| |
| if (a->defined != b->defined) |
| return false; |
| |
| if ((a->defined & NTA_HASTYPE) != 0 |
| && (a->eltype.type != b->eltype.type |
| || a->eltype.size != b->eltype.size)) |
| return false; |
| |
| if ((a->defined & NTA_HASINDEX) != 0 |
| && (a->index != b->index)) |
| return false; |
| |
| return true; |
| } |
| |
| /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions. |
| The base register is put in *PBASE. |
| The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of |
| the return value. |
| The register stride (minus one) is put in bit 4 of the return value. |
| Bits [6:5] encode the list length (minus one). |
| The type of the list elements is put in *ELTYPE, if non-NULL. */ |
| |
| #define NEON_LANE(X) ((X) & 0xf) |
| #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1) |
| #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1) |
| |
| static int |
| parse_neon_el_struct_list (char **str, unsigned *pbase, |
| int mve, |
| struct neon_type_el *eltype) |
| { |
| char *ptr = *str; |
| int base_reg = -1; |
| int reg_incr = -1; |
| int count = 0; |
| int lane = -1; |
| int leading_brace = 0; |
| enum arm_reg_type rtype = REG_TYPE_NDQ; |
| const char *const incr_error = mve ? _("register stride must be 1") : |
| _("register stride must be 1 or 2"); |
| const char *const type_error = _("mismatched element/structure types in list"); |
| struct neon_typed_alias firsttype; |
| firsttype.defined = 0; |
| firsttype.eltype.type = NT_invtype; |
| firsttype.eltype.size = -1; |
| firsttype.index = -1; |
| |
| if (skip_past_char (&ptr, '{') == SUCCESS) |
| leading_brace = 1; |
| |
| do |
| { |
| struct neon_typed_alias atype; |
| if (mve) |
| rtype = REG_TYPE_MQ; |
| int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype); |
| |
| if (getreg == FAIL) |
| { |
| first_error (_(reg_expected_msgs[rtype])); |
| return FAIL; |
| } |
| |
| if (base_reg == -1) |
| { |
| base_reg = getreg; |
| if (rtype == REG_TYPE_NQ) |
| { |
| reg_incr = 1; |
| } |
| firsttype = atype; |
| } |
| else if (reg_incr == -1) |
| { |
| reg_incr = getreg - base_reg; |
| if (reg_incr < 1 || reg_incr > 2) |
| { |
| first_error (_(incr_error)); |
| return FAIL; |
| } |
| } |
| else if (getreg != base_reg + reg_incr * count) |
| { |
| first_error (_(incr_error)); |
| return FAIL; |
| } |
| |
| if (! neon_alias_types_same (&atype, &firsttype)) |
| { |
| first_error (_(type_error)); |
| return FAIL; |
| } |
| |
| /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list |
| modes. */ |
| if (ptr[0] == '-') |
| { |
| struct neon_typed_alias htype; |
| int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1; |
| if (lane == -1) |
| lane = NEON_INTERLEAVE_LANES; |
| else if (lane != NEON_INTERLEAVE_LANES) |
| { |
| first_error (_(type_error)); |
| return FAIL; |
| } |
| if (reg_incr == -1) |
| reg_incr = 1; |
| else if (reg_incr != 1) |
| { |
| first_error (_("don't use Rn-Rm syntax with non-unit stride")); |
| return FAIL; |
| } |
| ptr++; |
| hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype); |
| if (hireg == FAIL) |
| { |
| first_error (_(reg_expected_msgs[rtype])); |
| return FAIL; |
| } |
| if (! neon_alias_types_same (&htype, &firsttype)) |
| { |
| first_error (_(type_error)); |
| return FAIL; |
| } |
| count += hireg + dregs - getreg; |
| continue; |
| } |
| |
| /* If we're using Q registers, we can't use [] or [n] syntax. */ |
| if (rtype == REG_TYPE_NQ) |
| { |
| count += 2; |
| continue; |
| } |
| |
| if ((atype.defined & NTA_HASINDEX) != 0) |
| { |
| if (lane == -1) |
| lane = atype.index; |
| else if (lane != atype.index) |
| { |
| first_error (_(type_error)); |
| return FAIL; |
| } |
| } |
| else if (lane == -1) |
| lane = NEON_INTERLEAVE_LANES; |
| else if (lane != NEON_INTERLEAVE_LANES) |
| { |
| first_error (_(type_error)); |
| return FAIL; |
| } |
| count++; |
| } |
| while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL); |
| |
| /* No lane set by [x]. We must be interleaving structures. */ |
| if (lane == -1) |
| lane = NEON_INTERLEAVE_LANES; |
| |
| /* Sanity check. */ |
| if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4) |
| || (count > 1 && reg_incr == -1)) |
| { |
| first_error (_("error parsing element/structure list")); |
| return FAIL; |
| } |
| |
| if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL) |
| { |
| first_error (_("expected }")); |
| return FAIL; |
| } |
| |
| if (reg_incr == -1) |
| reg_incr = 1; |
| |
| if (eltype) |
| *eltype = firsttype.eltype; |
| |
| *pbase = base_reg; |
| *str = ptr; |
| |
| return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5); |
| } |
| |
| /* Parse an explicit relocation suffix on an expression. This is |
| either nothing, or a word in parentheses. Note that if !OBJ_ELF, |
| arm_reloc_hsh contains no entries, so this function can only |
| succeed if there is no () after the word. Returns -1 on error, |
| BFD_RELOC_UNUSED if there wasn't any suffix. */ |
| |
| static int |
| parse_reloc (char **str) |
| { |
| struct reloc_entry *r; |
| char *p, *q; |
| |
| if (**str != '(') |
| return BFD_RELOC_UNUSED; |
| |
| p = *str + 1; |
| q = p; |
| |
| while (*q && *q != ')' && *q != ',') |
| q++; |
| if (*q != ')') |
| return -1; |
| |
| if ((r = (struct reloc_entry *) |
| str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL) |
| return -1; |
| |
| *str = q + 1; |
| return r->reloc; |
| } |
| |
| /* Directives: register aliases. */ |
| |
| static struct reg_entry * |
| insert_reg_alias (char *str, unsigned number, int type) |
| { |
| struct reg_entry *new_reg; |
| const char *name; |
| |
| if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0) |
| { |
| if (new_reg->builtin) |
| as_warn (_("ignoring attempt to redefine built-in register '%s'"), str); |
| |
| /* Only warn about a redefinition if it's not defined as the |
| same register. */ |
| else if (new_reg->number != number || new_reg->type != type) |
| as_warn (_("ignoring redefinition of register alias '%s'"), str); |
| |
| return NULL; |
| } |
| |
| name = xstrdup (str); |
| new_reg = XNEW (struct reg_entry); |
| |
| new_reg->name = name; |
| new_reg->number = number; |
| new_reg->type = type; |
| new_reg->builtin = false; |
| new_reg->neon = NULL; |
| |
| str_hash_insert (arm_reg_hsh, name, new_reg, 0); |
| |
| return new_reg; |
| } |
| |
| static void |
| insert_neon_reg_alias (char *str, int number, int type, |
| struct neon_typed_alias *atype) |
| { |
| struct reg_entry *reg = insert_reg_alias (str, number, type); |
| |
| if (!reg) |
| { |
| first_error (_("attempt to redefine typed alias")); |
| return; |
| } |
| |
| if (atype) |
| { |
| reg->neon = XNEW (struct neon_typed_alias); |
| *reg->neon = *atype; |
| } |
| } |
| |
| /* Look for the .req directive. This is of the form: |
| |
| new_register_name .req existing_register_name |
| |
| If we find one, or if it looks sufficiently like one that we want to |
| handle any error here, return TRUE. Otherwise return FALSE. */ |
| |
| static bool |
| create_register_alias (char * newname, char *p) |
| { |
| struct reg_entry *old; |
| char *oldname, *nbuf; |
| size_t nlen; |
| |
| /* The input scrubber ensures that whitespace after the mnemonic is |
| collapsed to single spaces. */ |
| oldname = p; |
| if (!startswith (oldname, " .req ")) |
| return false; |
| |
| oldname += 6; |
| if (*oldname == '\0') |
| return false; |
| |
| old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname); |
| if (!old) |
| { |
| as_warn (_("unknown register '%s' -- .req ignored"), oldname); |
| return true; |
| } |
| |
| /* If TC_CASE_SENSITIVE is defined, then newname already points to |
| the desired alias name, and p points to its end. If not, then |
| the desired alias name is in the global original_case_string. */ |
| #ifdef TC_CASE_SENSITIVE |
| nlen = p - newname; |
| #else |
| newname = original_case_string; |
| nlen = strlen (newname); |
| #endif |
| |
| nbuf = xmemdup0 (newname, nlen); |
| |
| /* Create aliases under the new name as stated; an all-lowercase |
| version of the new name; and an all-uppercase version of the new |
| name. */ |
| if (insert_reg_alias (nbuf, old->number, old->type) != NULL) |
| { |
| for (p = nbuf; *p; p++) |
| *p = TOUPPER (*p); |
| |
| if (strncmp (nbuf, newname, nlen)) |
| { |
| /* If this attempt to create an additional alias fails, do not bother |
| trying to create the all-lower case alias. We will fail and issue |
| a second, duplicate error message. This situation arises when the |
| programmer does something like: |
| foo .req r0 |
| Foo .req r1 |
| The second .req creates the "Foo" alias but then fails to create |
| the artificial FOO alias because it has already been created by the |
| first .req. */ |
| if (insert_reg_alias (nbuf, old->number, old->type) == NULL) |
| { |
| free (nbuf); |
| return true; |
| } |
| } |
| |
| for (p = nbuf; *p; p++) |
| *p = TOLOWER (*p); |
| |
| if (strncmp (nbuf, newname, nlen)) |
| insert_reg_alias (nbuf, old->number, old->type); |
| } |
| |
| free (nbuf); |
| return true; |
| } |
| |
| /* Create a Neon typed/indexed register alias using directives, e.g.: |
| X .dn d5.s32[1] |
| Y .qn 6.s16 |
| Z .dn d7 |
| T .dn Z[0] |
| These typed registers can be used instead of the types specified after the |
| Neon mnemonic, so long as all operands given have types. Types can also be |
| specified directly, e.g.: |
| vadd d0.s32, d1.s32, d2.s32 */ |
| |
| static bool |
| create_neon_reg_alias (char *newname, char *p) |
| { |
| enum arm_reg_type basetype; |
| struct reg_entry *basereg; |
| struct reg_entry mybasereg; |
| struct neon_type ntype; |
| struct neon_typed_alias typeinfo; |
| char *namebuf, *nameend ATTRIBUTE_UNUSED; |
| int namelen; |
| |
| typeinfo.defined = 0; |
| typeinfo.eltype.type = NT_invtype; |
| typeinfo.eltype.size = -1; |
| typeinfo.index = -1; |
| |
| nameend = p; |
| |
| if (startswith (p, " .dn ")) |
| basetype = REG_TYPE_VFD; |
| else if (startswith (p, " .qn ")) |
| basetype = REG_TYPE_NQ; |
| else |
| return false; |
| |
| p += 5; |
| |
| if (*p == '\0') |
| return false; |
| |
| basereg = arm_reg_parse_multi (&p); |
| |
| if (basereg && basereg->type != basetype) |
| { |
| as_bad (_("bad type for register")); |
| return false; |
| } |
| |
| if (basereg == NULL) |
| { |
| expressionS exp; |
| /* Try parsing as an integer. */ |
| my_get_expression (&exp, &p, GE_NO_PREFIX); |
| if (exp.X_op != O_constant) |
| { |
| as_bad (_("expression must be constant")); |
| return false; |
| } |
| basereg = &mybasereg; |
| basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2 |
| : exp.X_add_number; |
| basereg->neon = 0; |
| } |
| |
| if (basereg->neon) |
| typeinfo = *basereg->neon; |
| |
| if (parse_neon_type (&ntype, &p) == SUCCESS) |
| { |
| /* We got a type. */ |
| if (typeinfo.defined & NTA_HASTYPE) |
| { |
| as_bad (_("can't redefine the type of a register alias")); |
| return false; |
| } |
| |
| typeinfo.defined |= NTA_HASTYPE; |
| if (ntype.elems != 1) |
| { |
| as_bad (_("you must specify a single type only")); |
| return false; |
| } |
| typeinfo.eltype = ntype.el[0]; |
| } |
| |
| if (skip_past_char (&p, '[') == SUCCESS) |
| { |
| expressionS exp; |
| /* We got a scalar index. */ |
| |
| if (typeinfo.defined & NTA_HASINDEX) |
| { |
| as_bad (_("can't redefine the index of a scalar alias")); |
| return false; |
| } |
| |
| my_get_expression (&exp, &p, GE_NO_PREFIX); |
| |
| if (exp.X_op != O_constant) |
| { |
| as_bad (_("scalar index must be constant")); |
| return false; |
| } |
| |
| typeinfo.defined |= NTA_HASINDEX; |
| typeinfo.index = exp.X_add_number; |
| |
| if (skip_past_char (&p, ']') == FAIL) |
| { |
| as_bad (_("expecting ]")); |
| return false; |
| } |
| } |
| |
| /* If TC_CASE_SENSITIVE is defined, then newname already points to |
| the desired alias name, and p points to its end. If not, then |
| the desired alias name is in the global original_case_string. */ |
| #ifdef TC_CASE_SENSITIVE |
| namelen = nameend - newname; |
| #else |
| newname = original_case_string; |
| namelen = strlen (newname); |
| #endif |
| |
| namebuf = xmemdup0 (newname, namelen); |
| |
| insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| typeinfo.defined != 0 ? &typeinfo : NULL); |
| |
| /* Insert name in all uppercase. */ |
| for (p = namebuf; *p; p++) |
| *p = TOUPPER (*p); |
| |
| if (strncmp (namebuf, newname, namelen)) |
| insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| typeinfo.defined != 0 ? &typeinfo : NULL); |
| |
| /* Insert name in all lowercase. */ |
| for (p = namebuf; *p; p++) |
| *p = TOLOWER (*p); |
| |
| if (strncmp (namebuf, newname, namelen)) |
| insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| typeinfo.defined != 0 ? &typeinfo : NULL); |
| |
| free (namebuf); |
| return true; |
| } |
| |
| /* Should never be called, as .req goes between the alias and the |
| register name, not at the beginning of the line. */ |
| |
| static void |
| s_req (int a ATTRIBUTE_UNUSED) |
| { |
| as_bad (_("invalid syntax for .req directive")); |
| } |
| |
| static void |
| s_dn (int a ATTRIBUTE_UNUSED) |
| { |
| as_bad (_("invalid syntax for .dn directive")); |
| } |
| |
| static void |
| s_qn (int a ATTRIBUTE_UNUSED) |
| { |
| as_bad (_("invalid syntax for .qn directive")); |
| } |
| |
| /* The .unreq directive deletes an alias which was previously defined |
| by .req. For example: |
| |
| my_alias .req r11 |
| .unreq my_alias */ |
| |
| static void |
| s_unreq (int a ATTRIBUTE_UNUSED) |
| { |
| char * name; |
| char saved_char; |
| |
| name = input_line_pointer; |
| input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri); |
| saved_char = *input_line_pointer; |
| *input_line_pointer = 0; |
| |
| if (!*name) |
| as_bad (_("invalid syntax for .unreq directive")); |
| else |
| { |
| struct reg_entry *reg |
| = (struct reg_entry *) str_hash_find (arm_reg_hsh, name); |
| |
| if (!reg) |
| as_bad (_("unknown register alias '%s'"), name); |
| else if (reg->builtin) |
| as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"), |
| name); |
| else |
| { |
| char * p; |
| char * nbuf; |
| |
| str_hash_delete (arm_reg_hsh, name); |
| free ((char *) reg->name); |
| free (reg->neon); |
| free (reg); |
| |
| /* Also locate the all upper case and all lower case versions. |
| Do not complain if we cannot find one or the other as it |
| was probably deleted above. */ |
| |
| nbuf = strdup (name); |
| for (p = nbuf; *p; p++) |
| *p = TOUPPER (*p); |
| reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf); |
| if (reg) |
| { |
| str_hash_delete (arm_reg_hsh, nbuf); |
| free ((char *) reg->name); |
| free (reg->neon); |
| free (reg); |
| } |
| |
| for (p = nbuf; *p; p++) |
| *p = TOLOWER (*p); |
| reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf); |
| if (reg) |
| { |
| str_hash_delete (arm_reg_hsh, nbuf); |
| free ((char *) reg->name); |
| free (reg->neon); |
| free (reg); |
| } |
| |
| free (nbuf); |
| } |
| } |
| |
| *input_line_pointer = saved_char; |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Directives: Instruction set selection. */ |
| |
| #ifdef OBJ_ELF |
| /* This code is to handle mapping symbols as defined in the ARM ELF spec. |
| (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0). |
| Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), |
| and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ |
| |
| /* Create a new mapping symbol for the transition to STATE. */ |
| |
| static void |
| make_mapping_symbol (enum mstate state, valueT value, fragS *frag) |
| { |
| symbolS * symbolP; |
| const char * symname; |
| int type; |
| |
| switch (state) |
| { |
| case MAP_DATA: |
| symname = "$d"; |
| type = BSF_NO_FLAGS; |
| break; |
| case MAP_ARM: |
| symname = "$a"; |
| type = BSF_NO_FLAGS; |
| break; |
| case MAP_THUMB: |
| symname = "$t"; |
| type = BSF_NO_FLAGS; |
| break; |
| default: |
| abort (); |
| } |
| |
| symbolP = symbol_new (symname, now_seg, frag, value); |
| symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; |
| |
| switch (state) |
| { |
| case MAP_ARM: |
| THUMB_SET_FUNC (symbolP, 0); |
| ARM_SET_THUMB (symbolP, 0); |
| ARM_SET_INTERWORK (symbolP, support_interwork); |
| break; |
| |
| case MAP_THUMB: |
| THUMB_SET_FUNC (symbolP, 1); |
| ARM_SET_THUMB (symbolP, 1); |
| ARM_SET_INTERWORK (symbolP, support_interwork); |
| break; |
| |
| case MAP_DATA: |
| default: |
| break; |
| } |
| |
| /* Save the mapping symbols for future reference. Also check that |
| we do not place two mapping symbols at the same offset within a |
| frag. We'll handle overlap between frags in |
| check_mapping_symbols. |
| |
| If .fill or other data filling directive generates zero sized data, |
| the mapping symbol for the following code will have the same value |
| as the one generated for the data filling directive. In this case, |
| we replace the old symbol with the new one at the same address. */ |
| if (value == 0) |
| { |
| if (frag->tc_frag_data.first_map != NULL) |
| { |
| know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0); |
| symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP); |
| } |
| frag->tc_frag_data.first_map = symbolP; |
| } |
| if (frag->tc_frag_data.last_map != NULL) |
| { |
| know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP)); |
| if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP)) |
| symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP); |
| } |
| frag->tc_frag_data.last_map = symbolP; |
| } |
| |
| /* We must sometimes convert a region marked as code to data during |
| code alignment, if an odd number of bytes have to be padded. The |
| code mapping symbol is pushed to an aligned address. */ |
| |
| static void |
| insert_data_mapping_symbol (enum mstate state, |
| valueT value, fragS *frag, offsetT bytes) |
| { |
| /* If there was already a mapping symbol, remove it. */ |
| if (frag->tc_frag_data.last_map != NULL |
| && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value) |
| { |
| symbolS *symp = frag->tc_frag_data.last_map; |
| |
| if (value == 0) |
| { |
| know (frag->tc_frag_data.first_map == symp); |
| frag->tc_frag_data.first_map = NULL; |
| } |
| frag->tc_frag_data.last_map = NULL; |
| symbol_remove (symp, &symbol_rootP, &symbol_lastP); |
| } |
| |
| make_mapping_symbol (MAP_DATA, value, frag); |
| make_mapping_symbol (state, value + bytes, frag); |
| } |
| |
| static void mapping_state_2 (enum mstate state, int max_chars); |
| |
| /* Set the mapping state to STATE. Only call this when about to |
| emit some STATE bytes to the file. */ |
| |
| #define TRANSITION(from, to) (mapstate == (from) && state == (to)) |
| void |
| mapping_state (enum mstate state) |
| { |
| enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; |
| |
| if (mapstate == state) |
| /* The mapping symbol has already been emitted. |
| There is nothing else to do. */ |
| return; |
| |
| if (state == MAP_ARM || state == MAP_THUMB) |
| /* PR gas/12931 |
| All ARM instructions require 4-byte alignment. |
| (Almost) all Thumb instructions require 2-byte alignment. |
| |
| When emitting instructions into any section, mark the section |
| appropriately. |
| |
| Some Thumb instructions are alignment-sensitive modulo 4 bytes, |
| but themselves require 2-byte alignment; this applies to some |
| PC- relative forms. However, these cases will involve implicit |
| literal pool generation or an explicit .align >=2, both of |
| which will cause the section to me marked with sufficient |
| alignment. Thus, we don't handle those cases here. */ |
| record_alignment (now_seg, state == MAP_ARM ? 2 : 1); |
| |
| if (TRANSITION (MAP_UNDEFINED, MAP_DATA)) |
| /* This case will be evaluated later. */ |
| return; |
| |
| mapping_state_2 (state, 0); |
| } |
| |
| /* Same as mapping_state, but MAX_CHARS bytes have already been |
| allocated. Put the mapping symbol that far back. */ |
| |
| static void |
| mapping_state_2 (enum mstate state, int max_chars) |
| { |
| enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; |
| |
| if (!SEG_NORMAL (now_seg)) |
| return; |
| |
| if (mapstate == state) |
| /* The mapping symbol has already been emitted. |
| There is nothing else to do. */ |
| return; |
| |
| if (TRANSITION (MAP_UNDEFINED, MAP_ARM) |
| || TRANSITION (MAP_UNDEFINED, MAP_THUMB)) |
| { |
| struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root; |
| const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0); |
| |
| if (add_symbol) |
| make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); |
| } |
| |
| seg_info (now_seg)->tc_segment_info_data.mapstate = state; |
| make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); |
| } |
| #undef TRANSITION |
| #else |
| #define mapping_state(x) ((void)0) |
| #define mapping_state_2(x, y) ((void)0) |
| #endif |
| |
| /* Find the real, Thumb encoded start of a Thumb function. */ |
| |
| #ifdef OBJ_COFF |
| static symbolS * |
| find_real_start (symbolS * symbolP) |
| { |
| char * real_start; |
| const char * name = S_GET_NAME (symbolP); |
| symbolS * new_target; |
| |
| /* This definition must agree with the one in gcc/config/arm/thumb.c. */ |
| #define STUB_NAME ".real_start_of" |
| |
| if (name == NULL) |
| abort (); |
| |
| /* The compiler may generate BL instructions to local labels because |
| it needs to perform a branch to a far away location. These labels |
| do not have a corresponding ".real_start_of" label. We check |
| both for S_IS_LOCAL and for a leading dot, to give a way to bypass |
| the ".real_start_of" convention for nonlocal branches. */ |
| if (S_IS_LOCAL (symbolP) || name[0] == '.') |
| return symbolP; |
| |
| real_start = concat (STUB_NAME, name, NULL); |
| new_target = symbol_find (real_start); |
| free (real_start); |
| |
| if (new_target == NULL) |
| { |
| as_warn (_("Failed to find real start of function: %s\n"), name); |
| new_target = symbolP; |
| } |
| |
| return new_target; |
| } |
| #endif |
| |
| static void |
| opcode_select (int width) |
| { |
| switch (width) |
| { |
| case 16: |
| if (! thumb_mode) |
| { |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) |
| as_bad (_("selected processor does not support THUMB opcodes")); |
| |
| thumb_mode = 1; |
| /* No need to force the alignment, since we will have been |
| coming from ARM mode, which is word-aligned. */ |
| record_alignment (now_seg, 1); |
| } |
| break; |
| |
| case 32: |
| if (thumb_mode) |
| { |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) |
| as_bad (_("selected processor does not support ARM opcodes")); |
| |
| thumb_mode = 0; |
| |
| if (!need_pass_2) |
| frag_align (2, 0, 0); |
| |
| record_alignment (now_seg, 1); |
| } |
| break; |
| |
| default: |
| as_bad (_("invalid instruction size selected (%d)"), width); |
| } |
| } |
| |
| static void |
| s_arm (int ignore ATTRIBUTE_UNUSED) |
| { |
| opcode_select (32); |
| demand_empty_rest_of_line (); |
| } |
| |
| static void |
| s_thumb (int ignore ATTRIBUTE_UNUSED) |
| { |
| opcode_select (16); |
| demand_empty_rest_of_line (); |
| } |
| |
| static void |
| s_code (int unused ATTRIBUTE_UNUSED) |
| { |
| int temp; |
| |
| temp = get_absolute_expression (); |
| switch (temp) |
| { |
| case 16: |
| case 32: |
| opcode_select (temp); |
| break; |
| |
| default: |
| as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp); |
| } |
| demand_empty_rest_of_line (); |
| } |
| |
| static void |
| s_force_thumb (int ignore ATTRIBUTE_UNUSED) |
| { |
| /* If we are not already in thumb mode go into it, EVEN if |
| the target processor does not support thumb instructions. |
| This is used by gcc/config/arm/lib1funcs.asm for example |
| to compile interworking support functions even if the |
| target processor should not support interworking. */ |
| if (! thumb_mode) |
| { |
| thumb_mode = 2; |
| record_alignment (now_seg, 1); |
| } |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| static void |
| s_thumb_func (int ignore ATTRIBUTE_UNUSED) |
| { |
| s_thumb (0); /* Will check for end-of-line. */ |
| |
| /* The following label is the name/address of the start of a Thumb function. |
| We need to know this for the interworking support. */ |
| label_is_thumb_function_name = true; |
| } |
| |
| /* Perform a .set directive, but also mark the alias as |
| being a thumb function. */ |
| |
| static void |
| s_thumb_set (int equiv) |
| { |
| /* XXX the following is a duplicate of the code for s_set() in read.c |
| We cannot just call that code as we need to get at the symbol that |
| is created. */ |
| char * name; |
| char delim; |
| char * end_name; |
| symbolS * symbolP; |
| |
| /* Especial apologies for the random logic: |
| This just grew, and could be parsed much more simply! |
| Dean - in haste. */ |
| delim = get_symbol_name (& name); |
| end_name = input_line_pointer; |
| (void) restore_line_pointer (delim); |
| |
| if (*input_line_pointer != ',') |
| { |
| *end_name = 0; |
| as_bad (_("expected comma after name \"%s\""), name); |
| *end_name = delim; |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| input_line_pointer++; |
| *end_name = 0; |
| |
| if (name[0] == '.' && name[1] == '\0') |
| { |
| /* XXX - this should not happen to .thumb_set. */ |
| abort (); |
| } |
| |
| if ((symbolP = symbol_find (name)) == NULL |
| && (symbolP = md_undefined_symbol (name)) == NULL) |
| { |
| #ifndef NO_LISTING |
| /* When doing symbol listings, play games with dummy fragments living |
| outside the normal fragment chain to record the file and line info |
| for this symbol. */ |
| if (listing & LISTING_SYMBOLS) |
| { |
| extern struct list_info_struct * listing_tail; |
| fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS)); |
| |
| memset (dummy_frag, 0, sizeof (fragS)); |
| dummy_frag->fr_type = rs_fill; |
| dummy_frag->line = listing_tail; |
| symbolP = symbol_new (name, undefined_section, dummy_frag, 0); |
| dummy_frag->fr_symbol = symbolP; |
| } |
| else |
| #endif |
| symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0); |
| |
| #ifdef OBJ_COFF |
| /* "set" symbols are local unless otherwise specified. */ |
| SF_SET_LOCAL (symbolP); |
| #endif /* OBJ_COFF */ |
| } /* Make a new symbol. */ |
| |
| symbol_table_insert (symbolP); |
| |
| * end_name = delim; |
| |
| if (equiv |
| && S_IS_DEFINED (symbolP) |
| && S_GET_SEGMENT (symbolP) != reg_section) |
| as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP)); |
| |
| pseudo_set (symbolP); |
| |
| demand_empty_rest_of_line (); |
| |
| /* XXX Now we come to the Thumb specific bit of code. */ |
| |
| THUMB_SET_FUNC (symbolP, 1); |
| ARM_SET_THUMB (symbolP, 1); |
| #if defined OBJ_ELF || defined OBJ_COFF |
| ARM_SET_INTERWORK (symbolP, support_interwork); |
| #endif |
| } |
| |
| /* Directives: Mode selection. */ |
| |
| /* .syntax [unified|divided] - choose the new unified syntax |
| (same for Arm and Thumb encoding, modulo slight differences in what |
| can be represented) or the old divergent syntax for each mode. */ |
| static void |
| s_syntax (int unused ATTRIBUTE_UNUSED) |
| { |
| char *name, delim; |
| |
| delim = get_symbol_name (& name); |
| |
| if (!strcasecmp (name, "unified")) |
| unified_syntax = true; |
| else if (!strcasecmp (name, "divided")) |
| unified_syntax = false; |
| else |
| { |
| as_bad (_("unrecognized syntax mode \"%s\""), name); |
| return; |
| } |
| (void) restore_line_pointer (delim); |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Directives: alignment. */ |
| |
| static void |
| s_even (int ignore ATTRIBUTE_UNUSED) |
| { |
| /* Never make frag if expect extra pass. */ |
| if (!need_pass_2) |
| frag_align (1, 0, 0); |
| |
| record_alignment (now_seg, 1); |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Directives: CodeComposer Studio. */ |
| |
| /* .ref (for CodeComposer Studio syntax only). */ |
| static void |
| s_ccs_ref (int unused ATTRIBUTE_UNUSED) |
| { |
| if (codecomposer_syntax) |
| ignore_rest_of_line (); |
| else |
| as_bad (_(".ref pseudo-op only available with -mccs flag.")); |
| } |
| |
| /* If name is not NULL, then it is used for marking the beginning of a |
| function, whereas if it is NULL then it means the function end. */ |
| static void |
| asmfunc_debug (const char * name) |
| { |
| static const char * last_name = NULL; |
| |
| if (name != NULL) |
| { |
| gas_assert (last_name == NULL); |
| last_name = name; |
| |
| if (debug_type == DEBUG_STABS) |
| stabs_generate_asm_func (name, name); |
| } |
| else |
| { |
| gas_assert (last_name != NULL); |
| |
| if (debug_type == DEBUG_STABS) |
| stabs_generate_asm_endfunc (last_name, last_name); |
| |
| last_name = NULL; |
| } |
| } |
| |
| static void |
| s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED) |
| { |
| if (codecomposer_syntax) |
| { |
| switch (asmfunc_state) |
| { |
| case OUTSIDE_ASMFUNC: |
| asmfunc_state = WAITING_ASMFUNC_NAME; |
| break; |
| |
| case WAITING_ASMFUNC_NAME: |
| as_bad (_(".asmfunc repeated.")); |
| break; |
| |
| case WAITING_ENDASMFUNC: |
| as_bad (_(".asmfunc without function.")); |
| break; |
| } |
| demand_empty_rest_of_line (); |
| } |
| else |
| as_bad (_(".asmfunc pseudo-op only available with -mccs flag.")); |
| } |
| |
| static void |
| s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED) |
| { |
| if (codecomposer_syntax) |
| { |
| switch (asmfunc_state) |
| { |
| case OUTSIDE_ASMFUNC: |
| as_bad (_(".endasmfunc without a .asmfunc.")); |
| break; |
| |
| case WAITING_ASMFUNC_NAME: |
| as_bad (_(".endasmfunc without function.")); |
| break; |
| |
| case WAITING_ENDASMFUNC: |
| asmfunc_state = OUTSIDE_ASMFUNC; |
| asmfunc_debug (NULL); |
| break; |
| } |
| demand_empty_rest_of_line (); |
| } |
| else |
| as_bad (_(".endasmfunc pseudo-op only available with -mccs flag.")); |
| } |
| |
| static void |
| s_ccs_def (int name) |
| { |
| if (codecomposer_syntax) |
| s_globl (name); |
| else |
| as_bad (_(".def pseudo-op only available with -mccs flag.")); |
| } |
| |
| /* Directives: Literal pools. */ |
| |
| static literal_pool * |
| find_literal_pool (void) |
| { |
| literal_pool * pool; |
| |
| for (pool = list_of_pools; pool != NULL; pool = pool->next) |
| { |
| if (pool->section == now_seg |
| && pool->sub_section == now_subseg) |
| break; |
| } |
| |
| return pool; |
| } |
| |
| static literal_pool * |
| find_or_make_literal_pool (void) |
| { |
| /* Next literal pool ID number. */ |
| static unsigned int latest_pool_num = 1; |
| literal_pool * pool; |
| |
| pool = find_literal_pool (); |
| |
| if (pool == NULL) |
| { |
| /* Create a new pool. */ |
| pool = XNEW (literal_pool); |
| if (! pool) |
| return NULL; |
| |
| pool->next_free_entry = 0; |
| pool->section = now_seg; |
| pool->sub_section = now_subseg; |
| pool->next = list_of_pools; |
| pool->symbol = NULL; |
| pool->alignment = 2; |
| |
| /* Add it to the list. */ |
| list_of_pools = pool; |
| } |
| |
| /* New pools, and emptied pools, will have a NULL symbol. */ |
| if (pool->symbol == NULL) |
| { |
| pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, |
| &zero_address_frag, 0); |
| pool->id = latest_pool_num ++; |
| } |
| |
| /* Done. */ |
| return pool; |
| } |
| |
| /* Add the literal in the global 'inst' |
| structure to the relevant literal pool. */ |
| |
| static int |
| add_to_lit_pool (unsigned int nbytes) |
| { |
| #define PADDING_SLOT 0x1 |
| #define LIT_ENTRY_SIZE_MASK 0xFF |
| literal_pool * pool; |
| unsigned int entry, pool_size = 0; |
| bool padding_slot_p = false; |
| unsigned imm1 = 0; |
| unsigned imm2 = 0; |
| |
| if (nbytes == 8) |
| { |
| imm1 = inst.operands[1].imm; |
| imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg |
| : inst.relocs[0].exp.X_unsigned ? 0 |
| : (int64_t) inst.operands[1].imm >> 32); |
| if (target_big_endian) |
| { |
| imm1 = imm2; |
| imm2 = inst.operands[1].imm; |
| } |
| } |
| |
| pool = find_or_make_literal_pool (); |
| |
| /* Check if this literal value is already in the pool. */ |
| for (entry = 0; entry < pool->next_free_entry; entry ++) |
| { |
| if (nbytes == 4) |
| { |
| if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op) |
| && (inst.relocs[0].exp.X_op == O_constant) |
| && (pool->literals[entry].X_add_number |
| == inst.relocs[0].exp.X_add_number) |
| && (pool->literals[entry].X_md == nbytes) |
| && (pool->literals[entry].X_unsigned |
| == inst.relocs[0].exp.X_unsigned)) |
| break; |
| |
| if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op) |
| && (inst.relocs[0].exp.X_op == O_symbol) |
| && (pool->literals[entry].X_add_number |
| == inst.relocs[0].exp.X_add_number) |
| && (pool->literals[entry].X_add_symbol |
| == inst.relocs[0].exp.X_add_symbol) |
| && (pool->literals[entry].X_op_symbol |
| == inst.relocs[0].exp.X_op_symbol) |
| && (pool->literals[entry].X_md == nbytes)) |
| break; |
| } |
| else if ((nbytes == 8) |
| && !(pool_size & 0x7) |
| && ((entry + 1) != pool->next_free_entry) |
| && (pool->literals[entry].X_op == O_constant) |
| && (pool->literals[entry].X_add_number == (offsetT) imm1) |
| && (pool->literals[entry].X_unsigned |
| == inst.relocs[0].exp.X_unsigned) |
| && (pool->literals[entry + 1].X_op == O_constant) |
| && (pool->literals[entry + 1].X_add_number == (offsetT) imm2) |
| && (pool->literals[entry + 1].X_unsigned |
| == inst.relocs[0].exp.X_unsigned)) |
| break; |
| |
| padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT); |
| if (padding_slot_p && (nbytes == 4)) |
| break; |
| |
| pool_size += 4; |
| } |
| |
| /* Do we need to create a new entry? */ |
| if (entry == pool->next_free_entry) |
| { |
| if (entry >= MAX_LITERAL_POOL_SIZE) |
| { |
| inst.error = _("literal pool overflow"); |
| return FAIL; |
| } |
| |
| if (nbytes == 8) |
| { |
| /* For 8-byte entries, we align to an 8-byte boundary, |
| and split it into two 4-byte entries, because on 32-bit |
| host, 8-byte constants are treated as big num, thus |
| saved in "generic_bignum" which will be overwritten |
| by later assignments. |
| |
| We also need to make sure there is enough space for |
| the split. |
| |
| We also check to make sure the literal operand is a |
| constant number. */ |
| if (!(inst.relocs[0].exp.X_op == O_constant |
| || inst.relocs[0].exp.X_op == O_big)) |
| { |
| inst.error = _("invalid type for literal pool"); |
| return FAIL; |
| } |
| else if (pool_size & 0x7) |
| { |
| if ((entry + 2) >= MAX_LITERAL_POOL_SIZE) |
| { |
| inst.error = _("literal pool overflow"); |
| return FAIL; |
| } |
| |
| pool->literals[entry] = inst.relocs[0].exp; |
| pool->literals[entry].X_op = O_constant; |
| pool->literals[entry].X_add_number = 0; |
| pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4; |
| pool->next_free_entry += 1; |
| pool_size += 4; |
| } |
| else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE) |
| { |
| inst.error = _("literal pool overflow"); |
| return FAIL; |
| } |
| |
| pool->literals[entry] = inst.relocs[0].exp; |
| pool->literals[entry].X_op = O_constant; |
| pool->literals[entry].X_add_number = imm1; |
| pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned; |
| pool->literals[entry++].X_md = 4; |
| pool->literals[entry] = inst.relocs[0].exp; |
| pool->literals[entry].X_op = O_constant; |
| pool->literals[entry].X_add_number = imm2; |
| pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned; |
| pool->literals[entry].X_md = 4; |
| pool->alignment = 3; |
| pool->next_free_entry += 1; |
| } |
| else |
| { |
| pool->literals[entry] = inst.relocs[0].exp; |
| pool->literals[entry].X_md = 4; |
| } |
| |
| #ifdef OBJ_ELF |
| /* PR ld/12974: Record the location of the first source line to reference |
| this entry in the literal pool. If it turns out during linking that the |
| symbol does not exist we will be able to give an accurate line number for |
| the (first use of the) missing reference. */ |
| if (debug_type == DEBUG_DWARF2) |
| dwarf2_where (pool->locs + entry); |
| #endif |
| pool->next_free_entry += 1; |
| } |
| else if (padding_slot_p) |
| { |
| pool->literals[entry] = inst.relocs[0].exp; |
| pool->literals[entry].X_md = nbytes; |
| } |
| |
| inst.relocs[0].exp.X_op = O_symbol; |
| inst.relocs[0].exp.X_add_number = pool_size; |
| inst.relocs[0].exp.X_add_symbol = pool->symbol; |
| |
| return SUCCESS; |
| } |
| |
| bool |
| tc_start_label_without_colon (void) |
| { |
| bool ret = true; |
| |
| if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME) |
| { |
| const char *label = input_line_pointer; |
| |
| while (!is_end_of_line[(int) label[-1]]) |
| --label; |
| |
| if (*label == '.') |
| { |
| as_bad (_("Invalid label '%s'"), label); |
| ret = false; |
| } |
| |
| asmfunc_debug (label); |
| |
| asmfunc_state = WAITING_ENDASMFUNC; |
| } |
| |
| return ret; |
| } |
| |
| /* Can't use symbol_new here, so have to create a symbol and then at |
| a later date assign it a value. That's what these functions do. */ |
| |
| static void |
| symbol_locate (symbolS * symbolP, |
| const char * name, /* It is copied, the caller can modify. */ |
| segT segment, /* Segment identifier (SEG_<something>). */ |
| valueT valu, /* Symbol value. */ |
| fragS * frag) /* Associated fragment. */ |
| { |
| size_t name_length; |
| char * preserved_copy_of_name; |
| |
| name_length = strlen (name) + 1; /* +1 for \0. */ |
| obstack_grow (¬es, name, name_length); |
| preserved_copy_of_name = (char *) obstack_finish (¬es); |
| |
| #ifdef tc_canonicalize_symbol_name |
| preserved_copy_of_name = |
| tc_canonicalize_symbol_name (preserved_copy_of_name); |
| #endif |
| |
| S_SET_NAME (symbolP, preserved_copy_of_name); |
| |
| S_SET_SEGMENT (symbolP, segment); |
| S_SET_VALUE (symbolP, valu); |
| symbol_clear_list_pointers (symbolP); |
| |
| symbol_set_frag (symbolP, frag); |
| |
| /* Link to end of symbol chain. */ |
| { |
| extern int symbol_table_frozen; |
| |
| if (symbol_table_frozen) |
| abort (); |
| } |
| |
| symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP); |
| |
| obj_symbol_new_hook (symbolP); |
| |
| #ifdef tc_symbol_new_hook |
| tc_symbol_new_hook (symbolP); |
| #endif |
| |
| #ifdef DEBUG_SYMS |
| verify_symbol_chain (symbol_rootP, symbol_lastP); |
| #endif /* DEBUG_SYMS */ |
| } |
| |
| static void |
| s_ltorg (int ignored ATTRIBUTE_UNUSED) |
| { |
| unsigned int entry; |
| literal_pool * pool; |
| char sym_name[20]; |
| |
| demand_empty_rest_of_line (); |
| pool = find_literal_pool (); |
| if (pool == NULL |
| || pool->symbol == NULL |
| || pool->next_free_entry == 0) |
| return; |
| |
| /* Align pool as you have word accesses. |
| Only make a frag if we have to. */ |
| if (!need_pass_2) |
| frag_align (pool->alignment, 0, 0); |
| |
| record_alignment (now_seg, 2); |
| |
| #ifdef OBJ_ELF |
| seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA; |
| make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now); |
| #endif |
| sprintf (sym_name, "$$lit_\002%x", pool->id); |
| |
| symbol_locate (pool->symbol, sym_name, now_seg, |
| (valueT) frag_now_fix (), frag_now); |
| symbol_table_insert (pool->symbol); |
| |
| ARM_SET_THUMB (pool->symbol, thumb_mode); |
| |
| #if defined OBJ_COFF || defined OBJ_ELF |
| ARM_SET_INTERWORK (pool->symbol, support_interwork); |
| #endif |
| |
| for (entry = 0; entry < pool->next_free_entry; entry ++) |
| { |
| #ifdef OBJ_ELF |
| if (debug_type == DEBUG_DWARF2) |
| dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry); |
| #endif |
| /* First output the expression in the instruction to the pool. */ |
| emit_expr (&(pool->literals[entry]), |
| pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK); |
| } |
| |
| /* Mark the pool as empty. */ |
| pool->next_free_entry = 0; |
| pool->symbol = NULL; |
| } |
| |
| #ifdef OBJ_ELF |
| /* Forward declarations for functions below, in the MD interface |
| section. */ |
| static void fix_new_arm (fragS *, int, short, expressionS *, int, int); |
| static valueT create_unwind_entry (int); |
| static void start_unwind_section (const segT, int); |
| static void add_unwind_opcode (valueT, int); |
| static void flush_pending_unwind (void); |
| |
| /* Directives: Data. */ |
| |
| static void |
| s_arm_elf_cons (int nbytes) |
| { |
| expressionS exp; |
| |
| #ifdef md_flush_pending_output |
| md_flush_pending_output (); |
| #endif |
| |
| if (is_it_end_of_statement ()) |
| { |
| demand_empty_rest_of_line (); |
| return; |
| } |
| |
| #ifdef md_cons_align |
| md_cons_align (nbytes); |
| #endif |
| |
| mapping_state (MAP_DATA); |
| do |
| { |
| int reloc; |
| char *base = input_line_pointer; |
| |
| expression (& exp); |
| |
| if (exp.X_op != O_symbol) |
| emit_expr (&exp, (unsigned int) nbytes); |
| else |
| { |
| char *before_reloc = input_line_pointer; |
| reloc = parse_reloc (&input_line_pointer); |
| if (reloc == -1) |
| { |
| as_bad (_("unrecognized relocation suffix")); |
| ignore_rest_of_line (); |
| return; |
| } |
| else if (reloc == BFD_RELOC_UNUSED) |
| emit_expr (&exp, (unsigned int) nbytes); |
| else |
| { |
| reloc_howto_type *howto = (reloc_howto_type *) |
| bfd_reloc_type_lookup (stdoutput, |
| (bfd_reloc_code_real_type) reloc); |
| int size = bfd_get_reloc_size (howto); |
| |
| if (reloc == BFD_RELOC_ARM_PLT32) |
| { |
| as_bad (_("(plt) is only valid on branch targets")); |
| reloc = BFD_RELOC_UNUSED; |
| size = 0; |
| } |
| |
| if (size > nbytes) |
| as_bad (ngettext ("%s relocations do not fit in %d byte", |
| "%s relocations do not fit in %d bytes", |
| nbytes), |
| howto->name, nbytes); |
| else |
| { |
| /* We've parsed an expression stopping at O_symbol. |
| But there may be more expression left now that we |
| have parsed the relocation marker. Parse it again. |
| XXX Surely there is a cleaner way to do this. */ |
| char *p = input_line_pointer; |
| int offset; |
| char *save_buf = XNEWVEC (char, input_line_pointer - base); |
| |
| memcpy (save_buf, base, input_line_pointer - base); |
| memmove (base + (input_line_pointer - before_reloc), |
| base, before_reloc - base); |
| |
| input_line_pointer = base + (input_line_pointer-before_reloc); |
| expression (&exp); |
| memcpy (base, save_buf, p - base); |
| |
| offset = nbytes - size; |
| p = frag_more (nbytes); |
| memset (p, 0, nbytes); |
| fix_new_exp (frag_now, p - frag_now->fr_literal + offset, |
| size, &exp, 0, (enum bfd_reloc_code_real) reloc); |
| free (save_buf); |
| } |
| } |
| } |
| } |
| while (*input_line_pointer++ == ','); |
| |
| /* Put terminator back into stream. */ |
| input_line_pointer --; |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Emit an expression containing a 32-bit thumb instruction. |
| Implementation based on put_thumb32_insn. */ |
| |
| static void |
| emit_thumb32_expr (expressionS * exp) |
| { |
| expressionS exp_high = *exp; |
| |
| exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16; |
| emit_expr (& exp_high, (unsigned int) THUMB_SIZE); |
| exp->X_add_number &= 0xffff; |
| emit_expr (exp, (unsigned int) THUMB_SIZE); |
| } |
| |
| /* Guess the instruction size based on the opcode. */ |
| |
| static int |
| thumb_insn_size (int opcode) |
| { |
| if ((unsigned int) opcode < 0xe800u) |
| return 2; |
| else if ((unsigned int) opcode >= 0xe8000000u) |
| return 4; |
| else |
| return 0; |
| } |
| |
| static bool |
| emit_insn (expressionS *exp, int nbytes) |
| { |
| int size = 0; |
| |
| if (exp->X_op == O_constant) |
| { |
| size = nbytes; |
| |
| if (size == 0) |
| size = thumb_insn_size (exp->X_add_number); |
| |
| if (size != 0) |
| { |
| if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu) |
| { |
| as_bad (_(".inst.n operand too big. "\ |
| "Use .inst.w instead")); |
| size = 0; |
| } |
| else |
| { |
| if (now_pred.state == AUTOMATIC_PRED_BLOCK) |
| set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0); |
| else |
| set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0); |
| |
| if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian) |
| emit_thumb32_expr (exp); |
| else |
| emit_expr (exp, (unsigned int) size); |
| |
| it_fsm_post_encode (); |
| } |
| } |
| else |
| as_bad (_("cannot determine Thumb instruction size. " \ |
| "Use .inst.n/.inst.w instead")); |
| } |
| else |
| as_bad (_("constant expression required")); |
| |
| return (size != 0); |
| } |
| |
| /* Like s_arm_elf_cons but do not use md_cons_align and |
| set the mapping state to MAP_ARM/MAP_THUMB. */ |
| |
| static void |
| s_arm_elf_inst (int nbytes) |
| { |
| if (is_it_end_of_statement ()) |
| { |
| demand_empty_rest_of_line (); |
| return; |
| } |
| |
| /* Calling mapping_state () here will not change ARM/THUMB, |
| but will ensure not to be in DATA state. */ |
| |
| if (thumb_mode) |
| mapping_state (MAP_THUMB); |
| else |
| { |
| if (nbytes != 0) |
| { |
| as_bad (_("width suffixes are invalid in ARM mode")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| nbytes = 4; |
| |
| mapping_state (MAP_ARM); |
| } |
| |
| dwarf2_emit_insn (0); |
| |
| do |
| { |
| expressionS exp; |
| |
| expression (& exp); |
| |
| if (! emit_insn (& exp, nbytes)) |
| { |
| ignore_rest_of_line (); |
| return; |
| } |
| } |
| while (*input_line_pointer++ == ','); |
| |
| /* Put terminator back into stream. */ |
| input_line_pointer --; |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Parse a .rel31 directive. */ |
| |
| static void |
| s_arm_rel31 (int ignored ATTRIBUTE_UNUSED) |
| { |
| expressionS exp; |
| char *p; |
| valueT highbit; |
| |
| highbit = 0; |
| if (*input_line_pointer == '1') |
| highbit = 0x80000000; |
| else if (*input_line_pointer != '0') |
| as_bad (_("expected 0 or 1")); |
| |
| input_line_pointer++; |
| if (*input_line_pointer != ',') |
| as_bad (_("missing comma")); |
| input_line_pointer++; |
| |
| #ifdef md_flush_pending_output |
| md_flush_pending_output (); |
| #endif |
| |
| #ifdef md_cons_align |
| md_cons_align (4); |
| #endif |
| |
| mapping_state (MAP_DATA); |
| |
| expression (&exp); |
| |
| p = frag_more (4); |
| md_number_to_chars (p, highbit, 4); |
| fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1, |
| BFD_RELOC_ARM_PREL31); |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Directives: AEABI stack-unwind tables. */ |
| |
| /* Parse an unwind_fnstart directive. Simply records the current location. */ |
| |
| static void |
| s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED) |
| { |
| demand_empty_rest_of_line (); |
| if (unwind.proc_start) |
| { |
| as_bad (_("duplicate .fnstart directive")); |
| return; |
| } |
| |
| /* Mark the start of the function. */ |
| unwind.proc_start = expr_build_dot (); |
| |
| /* Reset the rest of the unwind info. */ |
| unwind.opcode_count = 0; |
| unwind.table_entry = NULL; |
| unwind.personality_routine = NULL; |
| unwind.personality_index = -1; |
| unwind.frame_size = 0; |
| unwind.fp_offset = 0; |
| unwind.fp_reg = REG_SP; |
| unwind.fp_used = 0; |
| unwind.sp_restored = 0; |
| } |
| |
| /* Parse a handlerdata directive. Creates the exception handling table entry |
| for the function. */ |
| |
| static void |
| s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED) |
| { |
| demand_empty_rest_of_line (); |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| if (unwind.table_entry) |
| as_bad (_("duplicate .handlerdata directive")); |
| |
| create_unwind_entry (1); |
| } |
| |
| /* Parse an unwind_fnend directive. Generates the index table entry. */ |
| |
| static void |
| s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED) |
| { |
| long where; |
| char *ptr; |
| valueT val; |
| unsigned int marked_pr_dependency; |
| |
| demand_empty_rest_of_line (); |
| |
| if (!unwind.proc_start) |
| { |
| as_bad (_(".fnend directive without .fnstart")); |
| return; |
| } |
| |
| /* Add eh table entry. */ |
| if (unwind.table_entry == NULL) |
| val = create_unwind_entry (0); |
| else |
| val = 0; |
| |
| /* Add index table entry. This is two words. */ |
| start_unwind_section (unwind.saved_seg, 1); |
| frag_align (2, 0, 0); |
| record_alignment (now_seg, 2); |
| |
| ptr = frag_more (8); |
| memset (ptr, 0, 8); |
| where = frag_now_fix () - 8; |
| |
| /* Self relative offset of the function start. */ |
| fix_new (frag_now, where, 4, unwind.proc_start, 0, 1, |
| BFD_RELOC_ARM_PREL31); |
| |
| /* Indicate dependency on EHABI-defined personality routines to the |
| linker, if it hasn't been done already. */ |
| marked_pr_dependency |
| = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency; |
| if (unwind.personality_index >= 0 && unwind.personality_index < 3 |
| && !(marked_pr_dependency & (1 << unwind.personality_index))) |
| { |
| static const char *const name[] = |
| { |
| "__aeabi_unwind_cpp_pr0", |
| "__aeabi_unwind_cpp_pr1", |
| "__aeabi_unwind_cpp_pr2" |
| }; |
| symbolS *pr = symbol_find_or_make (name[unwind.personality_index]); |
| fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE); |
| seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency |
| |= 1 << unwind.personality_index; |
| } |
| |
| if (val) |
| /* Inline exception table entry. */ |
| md_number_to_chars (ptr + 4, val, 4); |
| else |
| /* Self relative offset of the table entry. */ |
| fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1, |
| BFD_RELOC_ARM_PREL31); |
| |
| /* Restore the original section. */ |
| subseg_set (unwind.saved_seg, unwind.saved_subseg); |
| |
| unwind.proc_start = NULL; |
| } |
| |
| |
| /* Parse an unwind_cantunwind directive. */ |
| |
| static void |
| s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED) |
| { |
| demand_empty_rest_of_line (); |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| if (unwind.personality_routine || unwind.personality_index != -1) |
| as_bad (_("personality routine specified for cantunwind frame")); |
| |
| unwind.personality_index = -2; |
| } |
| |
| |
| /* Parse a personalityindex directive. */ |
| |
| static void |
| s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED) |
| { |
| expressionS exp; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| if (unwind.personality_routine || unwind.personality_index != -1) |
| as_bad (_("duplicate .personalityindex directive")); |
| |
| expression (&exp); |
| |
| if (exp.X_op != O_constant |
| || exp.X_add_number < 0 || exp.X_add_number > 15) |
| { |
| as_bad (_("bad personality routine number")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| unwind.personality_index = exp.X_add_number; |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| |
| /* Parse a personality directive. */ |
| |
| static void |
| s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED) |
| { |
| char *name, *p, c; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| if (unwind.personality_routine || unwind.personality_index != -1) |
| as_bad (_("duplicate .personality directive")); |
| |
| c = get_symbol_name (& name); |
| p = input_line_pointer; |
| if (c == '"') |
| ++ input_line_pointer; |
| unwind.personality_routine = symbol_find_or_make (name); |
| *p = c; |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Parse a directive saving pseudo registers. */ |
| |
| static void |
| s_arm_unwind_save_pseudo (int regno) |
| { |
| valueT op; |
| |
| switch (regno) |
| { |
| case REG_RA_AUTH_CODE: |
| /* Opcode for restoring RA_AUTH_CODE. */ |
| op = 0xb4; |
| add_unwind_opcode (op, 1); |
| break; |
| default: |
| as_bad (_("Unknown register no. encountered: %d\n"), regno); |
| } |
| } |
| |
| |
| /* Parse a directive saving core registers. */ |
| |
| static void |
| s_arm_unwind_save_core (long range) |
| { |
| valueT op; |
| int n; |
| |
| /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...} |
| into .unwind_save {..., sp...}. We aren't bothered about the value of |
| ip because it is clobbered by calls. */ |
| if (unwind.sp_restored && unwind.fp_reg == 12 |
| && (range & 0x3000) == 0x1000) |
| { |
| unwind.opcode_count--; |
| unwind.sp_restored = 0; |
| range = (range | 0x2000) & ~0x1000; |
| unwind.pending_offset = 0; |
| } |
| |
| /* Pop r4-r15. */ |
| if (range & 0xfff0) |
| { |
| /* See if we can use the short opcodes. These pop a block of up to 8 |
| registers starting with r4, plus maybe r14. */ |
| for (n = 0; n < 8; n++) |
| { |
| /* Break at the first non-saved register. */ |
| if ((range & (1 << (n + 4))) == 0) |
| break; |
| } |
| /* See if there are any other bits set. */ |
| if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0) |
| { |
| /* Use the long form. */ |
| op = 0x8000 | ((range >> 4) & 0xfff); |
| add_unwind_opcode (op, 2); |
| } |
| else |
| { |
| /* Use the short form. */ |
| if (range & 0x4000) |
| op = 0xa8; /* Pop r14. */ |
| else |
| op = 0xa0; /* Do not pop r14. */ |
| op |= (n - 1); |
| add_unwind_opcode (op, 1); |
| } |
| } |
| |
| /* Pop r0-r3. */ |
| if (range & 0xf) |
| { |
| op = 0xb100 | (range & 0xf); |
| add_unwind_opcode (op, 2); |
| } |
| |
| /* Record the number of bytes pushed. */ |
| for (n = 0; n < 16; n++) |
| { |
| if (range & (1 << n)) |
| unwind.frame_size += 4; |
| } |
| } |
| |
| /* Implement correct handling of .save lists enabling the split into |
| sublists where necessary, while preserving correct sublist ordering. */ |
| |
| static void |
| parse_dot_save (char **str_p, int prev_reg) |
| { |
| long core_regs = 0; |
| int reg; |
| int in_range = 0; |
| |
| if (**str_p == ',') |
| *str_p += 1; |
| if (**str_p == '}') |
| { |
| *str_p += 1; |
| return; |
| } |
| |
| while ((reg = arm_reg_parse (str_p, REG_TYPE_RN)) != FAIL) |
| { |
| if (!in_range) |
| { |
| if (core_regs & (1 << reg)) |
| as_tsktsk (_("Warning: duplicated register (r%d) in register list"), |
| reg); |
| else if (reg <= prev_reg) |
| as_tsktsk (_("Warning: register list not in ascending order")); |
| |
| core_regs |= (1 << reg); |
| prev_reg = reg; |
| if (skip_past_char(str_p, '-') != FAIL) |
| in_range = 1; |
| else if (skip_past_comma(str_p) == FAIL) |
| first_error (_("bad register list")); |
| } |
| else |
| { |
| int i; |
| if (reg <= prev_reg) |
| first_error (_("bad range in register list")); |
| for (i = prev_reg + 1; i <= reg; i++) |
| { |
| if (core_regs & (1 << i)) |
| as_tsktsk (_("Warning: duplicated register (r%d) in register list"), |
| i); |
| else |
| core_regs |= 1 << i; |
| } |
| in_range = 0; |
| } |
| } |
| if (core_regs) |
| { |
| /* Higher register numbers go in higher memory addresses. When splitting a list, |
| right-most sublist should therefore be .saved first. Use recursion for this. */ |
| parse_dot_save (str_p, reg); |
| /* We're back from recursion, so emit .save insn for sublist. */ |
| s_arm_unwind_save_core (core_regs); |
| return; |
| } |
| /* Handle pseudo-regs, under assumption these are emitted singly. */ |
| else if ((reg = arm_reg_parse (str_p, REG_TYPE_PSEUDO)) != FAIL) |
| { |
| /* recurse for remainder of input. Note: No assumption is made regarding which |
| register in core register set holds pseudo-register. It's not considered in |
| ordering check beyond ensuring it's not sandwiched between 2 consecutive |
| registers. */ |
| parse_dot_save (str_p, prev_reg + 1); |
| s_arm_unwind_save_pseudo (reg); |
| return; |
| } |
| else |
| as_bad (BAD_SYNTAX); |
| } |
| |
| /* Parse a directive saving VFP registers for ARMv6 and above. */ |
| |
| static void |
| s_arm_unwind_save_vfp_armv6 (void) |
| { |
| int count; |
| unsigned int start; |
| valueT op; |
| int num_vfpv3_regs = 0; |
| int num_regs_below_16; |
| bool partial_match; |
| |
| count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D, |
| &partial_match); |
| if (count == FAIL) |
| { |
| as_bad (_("expected register list")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| demand_empty_rest_of_line (); |
| |
| /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather |
| than FSTMX/FLDMX-style ones). */ |
| |
| /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */ |
| if (start >= 16) |
| num_vfpv3_regs = count; |
| else if (start + count > 16) |
| num_vfpv3_regs = start + count - 16; |
| |
| if (num_vfpv3_regs > 0) |
| { |
| int start_offset = start > 16 ? start - 16 : 0; |
| op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1); |
| add_unwind_opcode (op, 2); |
| } |
| |
| /* Generate opcode for registers numbered in the range 0 .. 15. */ |
| num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count; |
| gas_assert (num_regs_below_16 + num_vfpv3_regs == count); |
| if (num_regs_below_16 > 0) |
| { |
| op = 0xc900 | (start << 4) | (num_regs_below_16 - 1); |
| add_unwind_opcode (op, 2); |
| } |
| |
| unwind.frame_size += count * 8; |
| } |
| |
| |
| /* Parse a directive saving VFP registers for pre-ARMv6. */ |
| |
| static void |
| s_arm_unwind_save_vfp (void) |
| { |
| int count; |
| unsigned int reg; |
| valueT op; |
| bool partial_match; |
| |
| count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D, |
| &partial_match); |
| if (count == FAIL) |
| { |
| as_bad (_("expected register list")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| demand_empty_rest_of_line (); |
| |
| if (reg == 8) |
| { |
| /* Short form. */ |
| op = 0xb8 | (count - 1); |
| add_unwind_opcode (op, 1); |
| } |
| else |
| { |
| /* Long form. */ |
| op = 0xb300 | (reg << 4) | (count - 1); |
| add_unwind_opcode (op, 2); |
| } |
| unwind.frame_size += count * 8 + 4; |
| } |
| |
| |
| /* Parse a directive saving iWMMXt data registers. */ |
| |
| static void |
| s_arm_unwind_save_mmxwr (void) |
| { |
| int reg; |
| int hi_reg; |
| int i; |
| unsigned mask = 0; |
| valueT op; |
| |
| if (*input_line_pointer == '{') |
| input_line_pointer++; |
| |
| do |
| { |
| reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); |
| |
| if (reg == FAIL) |
| { |
| as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR])); |
| goto error; |
| } |
| |
| if (mask >> reg) |
| as_tsktsk (_("register list not in ascending order")); |
| mask |= 1 << reg; |
| |
| if (*input_line_pointer == '-') |
| { |
| input_line_pointer++; |
| hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); |
| if (hi_reg == FAIL) |
| { |
| as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR])); |
| goto error; |
| } |
| else if (reg >= hi_reg) |
| { |
| as_bad (_("bad register range")); |
| goto error; |
| } |
| for (; reg < hi_reg; reg++) |
| mask |= 1 << reg; |
| } |
| } |
| while (skip_past_comma (&input_line_pointer) != FAIL); |
| |
| skip_past_char (&input_line_pointer, '}'); |
| |
| demand_empty_rest_of_line (); |
| |
| /* Generate any deferred opcodes because we're going to be looking at |
| the list. */ |
| flush_pending_unwind (); |
| |
| for (i = 0; i < 16; i++) |
| { |
| if (mask & (1 << i)) |
| unwind.frame_size += 8; |
| } |
| |
| /* Attempt to combine with a previous opcode. We do this because gcc |
| likes to output separate unwind directives for a single block of |
| registers. */ |
| if (unwind.opcode_count > 0) |
| { |
| i = unwind.opcodes[unwind.opcode_count - 1]; |
| if ((i & 0xf8) == 0xc0) |
| { |
| i &= 7; |
| /* Only merge if the blocks are contiguous. */ |
| if (i < 6) |
| { |
| if ((mask & 0xfe00) == (1 << 9)) |
| { |
| mask |= ((1 << (i + 11)) - 1) & 0xfc00; |
| unwind.opcode_count--; |
| } |
| } |
| else if (i == 6 && unwind.opcode_count >= 2) |
| { |
| i = unwind.opcodes[unwind.opcode_count - 2]; |
| reg = i >> 4; |
| i &= 0xf; |
| |
| op = 0xffff << (reg - 1); |
| if (reg > 0 |
| && ((mask & op) == (1u << (reg - 1)))) |
| { |
| op = (1 << (reg + i + 1)) - 1; |
| op &= ~((1 << reg) - 1); |
| mask |= op; |
| unwind.opcode_count -= 2; |
| } |
| } |
| } |
| } |
| |
| hi_reg = 15; |
| /* We want to generate opcodes in the order the registers have been |
| saved, ie. descending order. */ |
| for (reg = 15; reg >= -1; reg--) |
| { |
| /* Save registers in blocks. */ |
| if (reg < 0 |
| || !(mask & (1 << reg))) |
| { |
| /* We found an unsaved reg. Generate opcodes to save the |
| preceding block. */ |
| if (reg != hi_reg) |
| { |
| if (reg == 9) |
| { |
| /* Short form. */ |
| op = 0xc0 | (hi_reg - 10); |
| add_unwind_opcode (op, 1); |
| } |
| else |
| { |
| /* Long form. */ |
| op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1); |
| add_unwind_opcode (op, 2); |
| } |
| } |
| hi_reg = reg - 1; |
| } |
| } |
| |
| return; |
| error: |
| ignore_rest_of_line (); |
| } |
| |
| static void |
| s_arm_unwind_save_mmxwcg (void) |
| { |
| int reg; |
| int hi_reg; |
| unsigned mask = 0; |
| valueT op; |
| |
| if (*input_line_pointer == '{') |
| input_line_pointer++; |
| |
| skip_whitespace (input_line_pointer); |
| |
| do |
| { |
| reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); |
| |
| if (reg == FAIL) |
| { |
| as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG])); |
| goto error; |
| } |
| |
| reg -= 8; |
| if (mask >> reg) |
| as_tsktsk (_("register list not in ascending order")); |
| mask |= 1 << reg; |
| |
| if (*input_line_pointer == '-') |
| { |
| input_line_pointer++; |
| hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); |
| if (hi_reg == FAIL) |
| { |
| as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG])); |
| goto error; |
| } |
| else if (reg >= hi_reg) |
| { |
| as_bad (_("bad register range")); |
| goto error; |
| } |
| for (; reg < hi_reg; reg++) |
| mask |= 1 << reg; |
| } |
| } |
| while (skip_past_comma (&input_line_pointer) != FAIL); |
| |
| skip_past_char (&input_line_pointer, '}'); |
| |
| demand_empty_rest_of_line (); |
| |
| /* Generate any deferred opcodes because we're going to be looking at |
| the list. */ |
| flush_pending_unwind (); |
| |
| for (reg = 0; reg < 16; reg++) |
| { |
| if (mask & (1 << reg)) |
| unwind.frame_size += 4; |
| } |
| op = 0xc700 | mask; |
| add_unwind_opcode (op, 2); |
| return; |
| error: |
| ignore_rest_of_line (); |
| } |
| |
| /* Parse an unwind_save directive. |
| If the argument is non-zero, this is a .vsave directive. */ |
| |
| static void |
| s_arm_unwind_save (int arch_v6) |
| { |
| char *peek; |
| struct reg_entry *reg; |
| bool had_brace = false; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| /* Figure out what sort of save we have. */ |
| peek = input_line_pointer; |
| |
| if (*peek == '{') |
| { |
| had_brace = true; |
| peek++; |
| } |
| |
| reg = arm_reg_parse_multi (&peek); |
| |
| if (!reg) |
| { |
| as_bad (_("register expected")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| switch (reg->type) |
| { |
| case REG_TYPE_PSEUDO: |
| case REG_TYPE_RN: |
| { |
| if (had_brace) |
| input_line_pointer++; |
| parse_dot_save (&input_line_pointer, -1); |
| demand_empty_rest_of_line (); |
| return; |
| } |
| |
| case REG_TYPE_VFD: |
| if (arch_v6) |
| s_arm_unwind_save_vfp_armv6 (); |
| else |
| s_arm_unwind_save_vfp (); |
| return; |
| |
| case REG_TYPE_MMXWR: |
| s_arm_unwind_save_mmxwr (); |
| return; |
| |
| case REG_TYPE_MMXWCG: |
| s_arm_unwind_save_mmxwcg (); |
| return; |
| |
| default: |
| as_bad (_(".unwind_save does not support this kind of register")); |
| ignore_rest_of_line (); |
| } |
| } |
| |
| |
| /* Parse an unwind_movsp directive. */ |
| |
| static void |
| s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED) |
| { |
| int reg; |
| valueT op; |
| int offset; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| if (reg == FAIL) |
| { |
| as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN])); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| /* Optional constant. */ |
| if (skip_past_comma (&input_line_pointer) != FAIL) |
| { |
| if (immediate_for_directive (&offset) == FAIL) |
| return; |
| } |
| else |
| offset = 0; |
| |
| demand_empty_rest_of_line (); |
| |
| if (reg == REG_SP || reg == REG_PC) |
| { |
| as_bad (_("SP and PC not permitted in .unwind_movsp directive")); |
| return; |
| } |
| |
| if (unwind.fp_reg != REG_SP) |
| as_bad (_("unexpected .unwind_movsp directive")); |
| |
| /* Generate opcode to restore the value. */ |
| op = 0x90 | reg; |
| add_unwind_opcode (op, 1); |
| |
| /* Record the information for later. */ |
| unwind.fp_reg = reg; |
| unwind.fp_offset = unwind.frame_size - offset; |
| unwind.sp_restored = 1; |
| } |
| |
| /* Parse an unwind_pad directive. */ |
| |
| static void |
| s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED) |
| { |
| int offset; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| if (immediate_for_directive (&offset) == FAIL) |
| return; |
| |
| if (offset & 3) |
| { |
| as_bad (_("stack increment must be multiple of 4")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| /* Don't generate any opcodes, just record the details for later. */ |
| unwind.frame_size += offset; |
| unwind.pending_offset += offset; |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| /* Parse an unwind_pacspval directive. */ |
| |
| static void |
| s_arm_unwind_pacspval (int ignored ATTRIBUTE_UNUSED) |
| { |
| valueT op; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| demand_empty_rest_of_line (); |
| |
| op = 0xb5; |
| add_unwind_opcode (op, 1); |
| } |
| |
| /* Parse an unwind_setfp directive. */ |
| |
| static void |
| s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED) |
| { |
| int sp_reg; |
| int fp_reg; |
| int offset; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| if (skip_past_comma (&input_line_pointer) == FAIL) |
| sp_reg = FAIL; |
| else |
| sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| |
| if (fp_reg == FAIL || sp_reg == FAIL) |
| { |
| as_bad (_("expected <reg>, <reg>")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| /* Optional constant. */ |
| if (skip_past_comma (&input_line_pointer) != FAIL) |
| { |
| if (immediate_for_directive (&offset) == FAIL) |
| return; |
| } |
| else |
| offset = 0; |
| |
| demand_empty_rest_of_line (); |
| |
| if (sp_reg != REG_SP && sp_reg != unwind.fp_reg) |
| { |
| as_bad (_("register must be either sp or set by a previous" |
| "unwind_movsp directive")); |
| return; |
| } |
| |
| /* Don't generate any opcodes, just record the information for later. */ |
| unwind.fp_reg = fp_reg; |
| unwind.fp_used = 1; |
| if (sp_reg == REG_SP) |
| unwind.fp_offset = unwind.frame_size - offset; |
| else |
| unwind.fp_offset -= offset; |
| } |
| |
| /* Parse an unwind_raw directive. */ |
| |
| static void |
| s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED) |
| { |
| expressionS exp; |
| /* This is an arbitrary limit. */ |
| unsigned char op[16]; |
| int count; |
| |
| if (!unwind.proc_start) |
| as_bad (MISSING_FNSTART); |
| |
| expression (&exp); |
| if (exp.X_op == O_constant |
| && skip_past_comma (&input_line_pointer) != FAIL) |
| { |
| unwind.frame_size += exp.X_add_number; |
| expression (&exp); |
| } |
| else |
| exp.X_op = O_illegal; |
| |
| if (exp.X_op != O_constant) |
| { |
| as_bad (_("expected <offset>, <opcode>")); |
| ignore_rest_of_line (); |
| return; |
| } |
| |
| count = 0; |
| |
| /* Parse the opcode. */ |
| for (;;) |
| { |
| if (count >= 16) |
| { |
| as_bad (_("unwind opcode too long")); |
| ignore_rest_of_line (); |
| } |
| if (exp.X_op != O_constant || exp.X_add_number & ~0xff) |
| { |
| as_bad (_("invalid unwind opcode")); |
| ignore_rest_of_line (); |
| return; |
| } |
| op[count++] = exp.X_add_number; |
| |
| /* Parse the next byte. */ |
| if (skip_past_comma (&input_line_pointer) == FAIL) |
| break; |
| |
| expression (&exp); |
| } |
| |
| /* Add the opcode bytes in reverse order. */ |
| while (count--) |
| add_unwind_opcode (op[count], 1); |
| |
| demand_empty_rest_of_line (); |
| } |
| |
| |
| /* Parse a .eabi_attribute directive. */ |
| |
| static void |
| s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED) |
| { |
| int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC); |
| |
| if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES) |
| attributes_set_explicitly[tag] = 1; |
| } |
| |
| /* Emit a tls fix for the symbol. */ |
| |
| static void |
| s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED) |
| { |
| char *p; |
| expressionS exp; |
| #ifdef md_flush_pending_output |
| md_flush_pending_output (); |
| #endif |
| |
| #ifdef md_cons_align |
| md_cons_align (4); |
| #endif |
| |
| /* Since we're just labelling the code, there's no need to define a |
| mapping symbol. */ |
| expression (&exp); |
| p = obstack_next_free (&frchain_now->frch_obstack); |
| fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0, |
| thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ |
| : BFD_RELOC_ARM_TLS_DESCSEQ); |
| } |
| #endif /* OBJ_ELF */ |
| |
| static void s_arm_arch (int); |
| static void s_arm_object_arch (int); |
| static void s_arm_cpu (int); |
| static void s_arm_fpu (int); |
| static void s_arm_arch_extension (int); |
| |
| #ifdef TE_PE |
| |
| static void |
| pe_directive_secrel (int dummy ATTRIBUTE_UNUSED) |
| { |
| expressionS exp; |
| |
| do |
| { |
| expression (&exp); |
| if (exp.X_op == O_symbol) |
| exp.X_op = O_secrel; |
| |
| emit_expr (&exp, 4); |
| } |
| while (*input_line_pointer++ == ','); |
| |
| input_line_pointer--; |
| demand_empty_rest_of_line (); |
| } |
| #endif /* TE_PE */ |
| |
| int |
| arm_is_largest_exponent_ok (int precision) |
| { |
| /* precision == 1 ensures that this will only return |
| true for 16 bit floats. */ |
| return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE); |
| } |
| |
| static void |
| set_fp16_format (int dummy ATTRIBUTE_UNUSED) |
| { |
| char saved_char; |
| char* name; |
| enum fp_16bit_format new_format; |
| |
| new_format = ARM_FP16_FORMAT_DEFAULT; |
| |
| name = input_line_pointer; |
| while (*input_line_pointer && !ISSPACE (*input_line_pointer)) |
| input_line_pointer++; |
| |
| saved_char = *input_line_pointer; |
| *input_line_pointer = 0; |
| |
| if (strcasecmp (name, "ieee") == 0) |
| new_format = ARM_FP16_FORMAT_IEEE; |
| else if (strcasecmp (name, "alternative") == 0) |
| new_format = ARM_FP16_FORMAT_ALTERNATIVE; |
| else |
| { |
| as_bad (_("unrecognised float16 format \"%s\""), name); |
| goto cleanup; |
| } |
| |
| /* Only set fp16_format if it is still the default (aka not already |
| been set yet). */ |
| if (fp16_format == ARM_FP16_FORMAT_DEFAULT) |
| fp16_format = new_format; |
| else |
| { |
| if (new_format != fp16_format) |
| as_warn (_("float16 format cannot be set more than once, ignoring.")); |
| } |
| |
| cleanup: |
| *input_line_pointer = saved_char; |
| ignore_rest_of_line (); |
| } |
| |
| static void s_arm_float_cons (int float_type) |
| { |
| /* We still parse the directive on error, so that any syntactic issues |
| are picked up. */ |
| if (ARM_FEATURE_ZERO (selected_fpu)) |
| as_bad (_("the floating-point format has not been set (or has been disabled)")); |
| float_cons (float_type); |
| } |
| /* This table describes all the machine specific pseudo-ops the assembler |
| has to support. The fields are: |
| pseudo-op name without dot |
| function to call to execute this pseudo-op |
| Integer arg to pass to the function. */ |
| |
| const pseudo_typeS md_pseudo_table[] = |
| { |
| /* Never called because '.req' does not start a line. */ |
| { "req", s_req, 0 }, |
| /* Following two are likewise never called. */ |
| { "dn", s_dn, 0 }, |
| { "qn", s_qn, 0 }, |
| { "unreq", s_unreq, 0 }, |
| { "align", s_align_ptwo, 2 }, |
| { "arm", s_arm, 0 }, |
| { "thumb", s_thumb, 0 }, |
| { "code", s_code, 0 }, |
| { "force_thumb", s_force_thumb, 0 }, |
| { "thumb_func", s_thumb_func, 0 }, |
| { "thumb_set", s_thumb_set, 0 }, |
| { "even", s_even, 0 }, |
| { "ltorg", s_ltorg, 0 }, |
| { "pool", s_ltorg, 0 }, |
| { "syntax", s_syntax, 0 }, |
| { "cpu", s_arm_cpu, 0 }, |
| { "arch", s_arm_arch, 0 }, |
| { "object_arch", s_arm_object_arch, 0 }, |
| { "fpu", s_arm_fpu, 0 }, |
| { "arch_extension", s_arm_arch_extension, 0 }, |
| #ifdef OBJ_ELF |
| { "word", s_arm_elf_cons, 4 }, |
| { "long", s_arm_elf_cons, 4 }, |
| { "inst.n", s_arm_elf_inst, 2 }, |
| { "inst.w", s_arm_elf_inst, 4 }, |
| { "inst", s_arm_elf_inst, 0 }, |
| { "rel31", s_arm_rel31, 0 }, |
| { "fnstart", s_arm_unwind_fnstart, 0 }, |
| { "fnend", s_arm_unwind_fnend, 0 }, |
| { "cantunwind", s_arm_unwind_cantunwind, 0 }, |
| { "personality", s_arm_unwind_personality, 0 }, |
| { "personalityindex", s_arm_unwind_personalityindex, 0 }, |
| { "handlerdata", s_arm_unwind_handlerdata, 0 }, |
| { "save", s_arm_unwind_save, 0 }, |
| { "vsave", s_arm_unwind_save, 1 }, |
| { "movsp", s_arm_unwind_movsp, 0 }, |
| { "pad", s_arm_unwind_pad, 0 }, |
| { "pacspval", s_arm_unwind_pacspval, 0 }, |
| { "setfp", s_arm_unwind_setfp, 0 }, |
| { "unwind_raw", s_arm_unwind_raw, 0 }, |
| { "eabi_attribute", s_arm_eabi_attribute, 0 }, |
| { "tlsdescseq", s_arm_tls_descseq, 0 }, |
| #else |
| { "word", cons, 4}, |
| |
| /* These are used for dwarf. */ |
| {"2byte", cons, 2}, |
| {"4byte", cons, 4}, |
| {"8byte", cons, 8}, |
| /* These are used for dwarf2. */ |
| { "file", dwarf2_directive_file, 0 }, |
| { "loc", dwarf2_directive_loc, 0 }, |
| { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 }, |
| #endif |
| /* Override the default float_cons handling so that we can validate |
| the FPU setting. */ |
| { "float", s_arm_float_cons, 'f' }, |
| { "single", s_arm_float_cons, 'f' }, |
| { "double", s_arm_float_cons, 'd' }, |
| { "dc.s", s_arm_float_cons, 'f' }, |
| { "dc.d", s_arm_float_cons, 'd' }, |
| { "extend", s_arm_float_cons, 'x' }, |
| { "ldouble", s_arm_float_cons, 'x' }, |
| { "packed", s_arm_float_cons, 'p' }, |
| { "bfloat16", s_arm_float_cons, 'b' }, |
| #ifdef TE_PE |
| {"secrel32", pe_directive_secrel, 0}, |
| #endif |
| |
| /* These are for compatibility with CodeComposer Studio. */ |
| {"ref", s_ccs_ref, 0}, |
| {"def", s_ccs_def, 0}, |
| {"asmfunc", s_ccs_asmfunc, 0}, |
| {"endasmfunc", s_ccs_endasmfunc, 0}, |
| |
| {"float16", s_arm_float_cons, 'h' }, |
| {"float16_format", set_fp16_format, 0 }, |
| |
| { 0, 0, 0 } |
| }; |
| |
| /* Parser functions used exclusively in instruction operands. */ |
| |
| /* Generic immediate-value read function for use in insn parsing. |
| STR points to the beginning of the immediate (the leading #); |
| VAL receives the value; if the value is outside [MIN, MAX] |
| issue an error. PREFIX_OPT is true if the immediate prefix is |
| optional. */ |
| |
| static int |
| parse_immediate (char **str, int *val, int min, int max, |
| bool prefix_opt) |
| { |
| expressionS exp; |
| |
| my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX); |
| if (exp.X_op != O_constant) |
| { |
| inst.error = _("constant expression required"); |
| return FAIL; |
| } |
| |
| if (exp.X_add_number < min || exp.X_add_number > max) |
| { |
| inst.error = _("immediate value out of range"); |
| return FAIL; |
| } |
| |
| *val = exp.X_add_number; |
| return SUCCESS; |
| } |
| |
| /* Less-generic immediate-value read function with the possibility of loading a |
| big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate |
| instructions. Puts the result directly in inst.operands[i]. */ |
| |
| static int |
| parse_big_immediate (char **str, int i, expressionS *in_exp, |
| bool allow_symbol_p) |
| { |
| expressionS exp; |
| expressionS *exp_p = in_exp ? in_exp : &exp; |
| char *ptr = *str; |
| |
| my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG); |
| |
| if (exp_p->X_op == O_constant) |
| { |
| inst.operands[i].imm = exp_p->X_add_number & 0xffffffff; |
| /* If we're on a 64-bit host, then a 64-bit number can be returned using |
| O_constant. We have to be careful not to break compilation for |
| 32-bit X_add_number, though. */ |
| if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0) |
| { |
| /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */ |
| inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16) |
| & 0xffffffff); |
| inst.operands[i].regisimm = 1; |
| } |
| } |
| else if (exp_p->X_op == O_big |
| && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32) |
| { |
| unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; |
| |
| /* Bignums have their least significant bits in |
| generic_bignum[0]. Make sure we put 32 bits in imm and |
| 32 bits in reg, in a (hopefully) portable way. */ |
| gas_assert (parts != 0); |
| |
| /* Make sure that the number is not too big. |
| PR 11972: Bignums can now be sign-extended to the |
| size of a .octa so check that the out of range bits |
| are all zero or all one. */ |
| if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64) |
| { |
| LITTLENUM_TYPE m = -1; |
| |
| if (generic_bignum[parts * 2] != 0 |
| && generic_bignum[parts * 2] != m) |
| return FAIL; |
| |
| for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++) |
| if (generic_bignum[j] != generic_bignum[j-1]) |
| return FAIL; |
| } |
| |
| inst.operands[i].imm = 0; |
| for (j = 0; j < parts; j++, idx++) |
| inst.operands[i].imm |= ((unsigned) generic_bignum[idx] |
| << (LITTLENUM_NUMBER_OF_BITS * j)); |
| inst.operands[i].reg = 0; |
| for (j = 0; j < parts; j++, idx++) |
| inst.operands[i].reg |= ((unsigned) generic_bignum[idx] |
| << (LITTLENUM_NUMBER_OF_BITS * j)); |
| inst.operands[i].regisimm = 1; |
| } |
| else if (!(exp_p->X_op == O_symbol && allow_symbol_p)) |
| return FAIL; |
| |
| *str = ptr; |
| |
| return SUCCESS; |
| } |
| |
| /* Returns 1 if a number has "quarter-precision" float format |
| 0baBbbbbbc defgh000 00000000 00000000. */ |
| |
| static int |
| is_quarter_float (unsigned imm) |
| { |
| int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000; |
| return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; |
| } |
| |
| |
| /* Detect the presence of a floating point or integer zero constant, |
| i.e. #0.0 or #0. */ |
| |
| static bool |
| parse_ifimm_zero (char **in) |
| { |
| int error_code; |
| |
| if (!is_immediate_prefix (**in)) |
| { |
| /* In unified syntax, all prefixes are optional. */ |
| if (!unified_syntax) |
| return false; |
| } |
| else |
| ++*in; |
| |
| /* Accept #0x0 as a synonym for #0. */ |
| if (startswith (*in, "0x")) |
| { |
| int val; |
| if (parse_immediate (in, &val, 0, 0, true) == FAIL) |
| return false; |
| return true; |
| } |
| |
| error_code = atof_generic (in, ".", EXP_CHARS, |
| &generic_floating_point_number); |
| |
| if (!error_code |
| && generic_floating_point_number.sign == '+' |
| && (generic_floating_point_number.low |
| > generic_floating_point_number.leader)) |
| return true; |
| |
| return false; |
| } |
| |
| /* Parse an 8-bit "quarter-precision" floating point number of the form: |
| 0baBbbbbbc defgh000 00000000 00000000. |
| The zero and minus-zero cases need special handling, since they can't be |
| encoded in the "quarter-precision" float format, but can nonetheless be |
| loaded as integer constants. */ |
| |
| static unsigned |
| parse_qfloat_immediate (char **ccp, int *immed) |
| { |
| char *str = *ccp; |
| char *fpnum; |
| LITTLENUM_TYPE words[MAX_LITTLENUMS]; |
| int found_fpchar = 0; |
| |
| skip_past_char (&str, '#'); |
| |
| /* We must not accidentally parse an integer as a floating-point number. Make |
| sure that the value we parse is not an integer by checking for special |
| characters '.' or 'e'. |
| FIXME: This is a horrible hack, but doing better is tricky because type |
| information isn't in a very usable state at parse time. */ |
| fpnum = str; |
| skip_whitespace (fpnum); |
| |
| if (startswith (fpnum, "0x")) |
| return FAIL; |
| else |
| { |
| for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) |
| if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') |
| { |
| found_fpchar = 1; |
| break; |
| } |
| |
| if (!found_fpchar) |
| return FAIL; |
| } |
| |
| if ((str = atof_ieee (str, 's', words)) != NULL) |
| { |
| unsigned fpword = 0; |
| int i; |
| |
| /* Our FP word must be 32 bits (single-precision FP). */ |
| for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) |
| { |
| fpword <<= LITTLENUM_NUMBER_OF_BITS; |
| fpword |= words[i]; |
| } |
| |
| if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0) |
| *immed = fpword; |
| else |
| return FAIL; |
| |
| *ccp = str; |
| |
| return SUCCESS; |
| } |
| |
| return FAIL; |
| } |
| |
| /* Shift operands. */ |
| enum shift_kind |
| { |
| SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW |
| }; |
| |
| struct asm_shift_name |
| { |
| const char *name; |
| enum shift_kind kind; |
| }; |
| |
| /* Third argument to parse_shift. */ |
| enum parse_shift_mode |
| { |
| NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */ |
| SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */ |
| SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */ |
| SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */ |
| SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */ |
| SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */ |
| }; |
| |
| /* Parse a <shift> specifier on an ARM data processing instruction. |
| This has three forms: |
| |
| (LSL|LSR|ASL|ASR|ROR) Rs |
| (LSL|LSR|ASL|ASR|ROR) #imm |
| RRX |
| |
| Note that ASL is assimilated to LSL in the instruction encoding, and |
| RRX to ROR #0 (which cannot be written as such). */ |
| |
| static int |
| parse_shift (char **str, int i, enum parse_shift_mode mode) |
| { |
| const struct asm_shift_name *shift_name; |
| enum shift_kind shift; |
| char *s = *str; |
| char *p = s; |
| int reg; |
| |
| for (p = *str; ISALPHA (*p); p++) |
| ; |
| |
| if (p == *str) |
| { |
| inst.error = _("shift expression expected"); |
| return FAIL; |
| } |
| |
| shift_name |
| = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str, |
| p - *str); |
| |
| if (shift_name == NULL) |
| { |
| inst.error = _("shift expression expected"); |
| return FAIL; |
| } |
| |
| shift = shift_name->kind; |
| |
| switch (mode) |
| { |
| case NO_SHIFT_RESTRICT: |
| case SHIFT_IMMEDIATE: |
| if (shift == SHIFT_UXTW) |
| { |
| inst.error = _("'UXTW' not allowed here"); |
| return FAIL; |
| } |
| break; |
| |
| case SHIFT_LSL_OR_ASR_IMMEDIATE: |
| if (shift != SHIFT_LSL && shift != SHIFT_ASR) |
| { |
| inst.error = _("'LSL' or 'ASR' required"); |
| return FAIL; |
| } |
| break; |
| |
| case SHIFT_LSL_IMMEDIATE: |
| if (shift != SHIFT_LSL) |
| { |
| inst.error = _("'LSL' required"); |
| return FAIL; |
| } |
| break; |
| |
| case SHIFT_ASR_IMMEDIATE: |
| if (shift != SHIFT_ASR) |
| { |
| inst.error = _("'ASR' required"); |
| return FAIL; |
| } |
| break; |
| case SHIFT_UXTW_IMMEDIATE: |
| if (shift != SHIFT_UXTW) |
| { |
| inst.error = _("'UXTW' required"); |
| return FAIL; |
| } |
| break; |
| |
| default: abort (); |
| } |
| |
| if (shift != SHIFT_RRX) |
| { |
| /* Whitespace can appear here if the next thing is a bare digit. */ |
| skip_whitespace (p); |
| |
| if (mode == NO_SHIFT_RESTRICT |
| && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| { |
| inst.operands[i].imm = reg; |
| inst.operands[i].immisreg = 1; |
| } |
| else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX)) |
| return FAIL; |
| } |
| inst.operands[i].shift_kind = shift; |
| inst.operands[i].shifted = 1; |
| *str = p; |
| return SUCCESS; |
| } |
| |
| /* Parse a <shifter_operand> for an ARM data processing instruction: |
| |
| #<immediate> |
| #<immediate>, <rotate> |
| <Rm> |
| <Rm>, <shift> |
| |
| where <shift> is defined by parse_shift above, and <rotate> is a |
| multiple of 2 between 0 and 30. Validation of immediate operands |
| is deferred to md_apply_fix. */ |
| |
| static int |
| parse_shifter_operand (char **str, int i) |
| { |
| int value; |
| expressionS exp; |
| |
| if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL) |
| { |
| inst.operands[i].reg = value; |
| inst.operands[i].isreg = 1; |
| |
| /* parse_shift will override this if appropriate */ |
| inst.relocs[0].exp.X_op = O_constant; |
| inst.relocs[0].exp.X_add_number = 0; |
| |
| if (skip_past_comma (str) == FAIL) |
| return SUCCESS; |
| |
| /* Shift operation on register. */ |
| return parse_shift (str, i, NO_SHIFT_RESTRICT); |
| } |
| |
| if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX)) |
| return FAIL; |
| |
| if (skip_past_comma (str) == SUCCESS) |
| { |
| /* #x, y -- ie explicit rotation by Y. */ |
| if (my_get_expression (&exp, str, GE_NO_PREFIX)) |
| return FAIL; |
| |
| if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant) |
| { |
| inst.error = _("constant expression expected"); |
| return FAIL; |
| } |
| |
| value = exp.X_add_number; |
| if (value < 0 || value > 30 || value % 2 != 0) |
| { |
| inst.error = _("invalid rotation"); |
| return FAIL; |
| } |
| if (inst.relocs[0].exp.X_add_number < 0 |
| || inst.relocs[0].exp.X_add_number > 255) |
| { |
| inst.error = _("invalid constant"); |
| return FAIL; |
| } |
| |
| /* Encode as specified. */ |
| inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7; |
| return SUCCESS; |
| } |
| |
| inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE; |
| inst.relocs[0].pc_rel = 0; |
| return SUCCESS; |
| } |
| |
| /* Group relocation information. Each entry in the table contains the |
| textual name of the relocation as may appear in assembler source |
| and must end with a colon. |
| Along with this textual name are the relocation codes to be used if |
| the corresponding instruction is an ALU instruction (ADD or SUB only), |
| an LDR, an LDRS, or an LDC. */ |
| |
| struct group_reloc_table_entry |
| { |
| const char *name; |
| int alu_code; |
| int ldr_code; |
| int ldrs_code; |
| int ldc_code; |
| }; |
| |
| typedef enum |
| { |
| /* Varieties of non-ALU group relocation. */ |
| |
| GROUP_LDR, |
| GROUP_LDRS, |
| GROUP_LDC, |
| GROUP_MVE |
| } group_reloc_type; |
| |
| static struct group_reloc_table_entry group_reloc_table[] = |
| { /* Program counter relative: */ |
| { "pc_g0_nc", |
| BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */ |
| 0, /* LDR */ |
| 0, /* LDRS */ |
| 0 }, /* LDC */ |
| { "pc_g0", |
| BFD_RELOC_ARM_ALU_PC_G0, /* ALU */ |
| BFD_RELOC_ARM_LDR_PC_G0, /* LDR */ |
| BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */ |
| BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */ |
| { "pc_g1_nc", |
| BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */ |
| 0, /* LDR */ |
| 0, /* LDRS */ |
| 0 }, /* LDC */ |
| { "pc_g1", |
| BFD_RELOC_ARM_ALU_PC_G1, /* ALU */ |
| BFD_RELOC_ARM_LDR_PC_G1, /* LDR */ |
| BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */ |
| BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */ |
| { "pc_g2", |
| BFD_RELOC_ARM_ALU_PC_G2, /* ALU */ |
| BFD_RELOC_ARM_LDR_PC_G2, /* LDR */ |
| BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */ |
| BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */ |
| /* Section base relative */ |
| { "sb_g0_nc", |
| BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */ |
| 0, /* LDR */ |
| 0, /* LDRS */ |
| 0 }, /* LDC */ |
| { "sb_g0", |
| BFD_RELOC_ARM_ALU_SB_G0, /* ALU */ |
| BFD_RELOC_ARM_LDR_SB_G0, /* LDR */ |
| BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */ |
| BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */ |
| { "sb_g1_nc", |
| BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */ |
| 0, /* LDR */ |
| 0, /* LDRS */ |
| 0 }, /* LDC */ |
| { "sb_g1", |
| BFD_RELOC_ARM_ALU_SB_G1, /* ALU */ |
| BFD_RELOC_ARM_LDR_SB_G1, /* LDR */ |
| BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */ |
| BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */ |
| { "sb_g2", |
| BFD_RELOC_ARM_ALU_SB_G2, /* ALU */ |
| BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ |
| BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ |
| BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */ |
| /* Absolute thumb alu relocations. */ |
| { "lower0_7", |
| BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */ |
| 0, /* LDR. */ |
| 0, /* LDRS. */ |
| 0 }, /* LDC. */ |
| { "lower8_15", |
| BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */ |
| 0, /* LDR. */ |
| 0, /* LDRS. */ |
| 0 }, /* LDC. */ |
| { "upper0_7", |
| BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */ |
| 0, /* LDR. */ |
| 0, /* LDRS. */ |
| 0 }, /* LDC. */ |
| { "upper8_15", |
| BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */ |
| 0, /* LDR. */ |
| 0, /* LDRS. */ |
| 0 } }; /* LDC. */ |
| |
| /* Given the address of a pointer pointing to the textual name of a group |
| relocation as may appear in assembler source, attempt to find its details |
| in group_reloc_table. The pointer will be updated to the character after |
| the trailing colon. On failure, FAIL will be returned; SUCCESS |
| otherwise. On success, *entry will be updated to point at the relevant |
| group_reloc_table entry. */ |
| |
| static int |
| find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out) |
| { |
| unsigned int i; |
| for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++) |
| { |
| int length = strlen (group_reloc_table[i].name); |
| |
| if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 |
| && (*str)[length] == ':') |
| { |
| *out = &group_reloc_table[i]; |
| *str += (length + 1); |
| return SUCCESS; |
| } |
| } |
| |
| return FAIL; |
| } |
| |
| /* Parse a <shifter_operand> for an ARM data processing instruction |
| (as for parse_shifter_operand) where group relocations are allowed: |
| |
| #<immediate> |
| #<immediate>, <rotate> |
| #:<group_reloc>:<expression> |
| <Rm> |
| <Rm>, <shift> |
| |
| where <group_reloc> is one of the strings defined in group_reloc_table. |
| The hashes are optional. |
| |
| Everything else is as for parse_shifter_operand. */ |
| |
| static parse_operand_result |
| parse_shifter_operand_group_reloc (char **str, int i) |
| { |
| /* Determine if we have the sequence of characters #: or just : |
| coming next. If we do, then we check for a group relocation. |
| If we don't, punt the whole lot to parse_shifter_operand. */ |
| |
| if (((*str)[0] == '#' && (*str)[1] == ':') |
| || (*str)[0] == ':') |
| { |
| struct group_reloc_table_entry *entry; |
| |
| if ((*str)[0] == '#') |
| (*str) += 2; |
| else |
| (*str)++; |
| |
| /* Try to parse a group relocation. Anything else is an error. */ |
| if (find_group_reloc_table_entry (str, &entry) == FAIL) |
| { |
| inst.error = _("unknown group relocation"); |
| return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| } |
| |
| /* We now have the group relocation table entry corresponding to |
| the name in the assembler source. Next, we parse the expression. */ |
| if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX)) |
| return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| |
| /* Record the relocation type (always the ALU variant here). */ |
| inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code; |
| gas_assert (inst.relocs[0].type != 0); |
| |
| return PARSE_OPERAND_SUCCESS; |
| } |
| else |
| return parse_shifter_operand (str, i) == SUCCESS |
| ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL; |
| |
| /* Never reached. */ |
| } |
| |
| /* Parse a Neon alignment expression. Information is written to |
| inst.operands[i]. We assume the initial ':' has been skipped. |
| |
| align .imm = align << 8, .immisalign=1, .preind=0 */ |
| static parse_operand_result |
| parse_neon_alignment (char **str, int i) |
| { |
| char *p = *str; |
| expressionS exp; |
| |
| my_get_expression (&exp, &p, GE_NO_PREFIX); |
| |
| if (exp.X_op != O_constant) |
| { |
| inst.error = _("alignment must be constant"); |
| return PARSE_OPERAND_FAIL; |
| } |
| |
| inst.operands[i].imm = exp.X_add_number << 8; |
| inst.operands[i].immisalign = 1; |
| /* Alignments are not pre-indexes. */ |
| inst.operands[i].preind = 0; |
| |
| *str = p; |
| return PARSE_OPERAND_SUCCESS; |
| } |
| |
| /* Parse all forms of an ARM address expression. Information is written |
| to inst.operands[i] and/or inst.relocs[0]. |
| |
| Preindexed addressing (.preind=1): |
| |
| [Rn, #offset] .reg=Rn .relocs[0].exp=offset |
| [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| .shift_kind=shift .relocs[0].exp=shift_imm |
| |
| These three may have a trailing ! which causes .writeback to be set also. |
| |
| Postindexed addressing (.postind=1, .writeback=1): |
| |
| [Rn], #offset .reg=Rn .relocs[0].exp=offset |
| [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| .shift_kind=shift .relocs[0].exp=shift_imm |
| |
| Unindexed addressing (.preind=0, .postind=0): |
| |
| [Rn], {option} .reg=Rn .imm=option .immisreg=0 |
| |
| Other: |
| |
| [Rn]{!} shorthand for [Rn,#0]{!} |
| =immediate .isreg=0 .relocs[0].exp=immediate |
| label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label |
| |
| It is the caller's responsibility to check for addressing modes not |
| supported by the instruction, and to set inst.relocs[0].type. */ |
| |
| static parse_operand_result |
| parse_address_main (char **str, int i, int group_relocations, |
| group_reloc_type group_type) |
| { |
| char *p = *str; |
| int reg; |
| |
| if (skip_past_char (&p, '[') == FAIL) |
| { |
| if (group_type == GROUP_MVE |
| && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| { |
| /* [r0-r15] expected as argument but receiving r0-r15 without |
| [] brackets. */ |
| inst.error = BAD_SYNTAX; |
| return PARSE_OPERAND_FAIL; |
| } |
| else if (skip_past_char (&p, '=') == FAIL) |
| { |
| /* Bare address - translate to PC-relative offset. */ |
| inst.relocs[0].pc_rel = 1; |
| inst.operands[i].reg = REG_PC; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].preind = 1; |
| |
| if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG)) |
| return PARSE_OPERAND_FAIL; |
| } |
| else if (parse_big_immediate (&p, i, &inst.relocs[0].exp, |
| /*allow_symbol_p=*/true)) |
| return PARSE_OPERAND_FAIL; |
| |
| *str = p; |
| return PARSE_OPERAND_SUCCESS; |
| } |
| |
| /* PR gas/14887: Allow for whitespace after the opening bracket. */ |
| skip_whitespace (p); |
| |
| if (group_type == GROUP_MVE) |
| { |
| enum arm_reg_type rtype = REG_TYPE_MQ; |
| struct neon_type_el et; |
| if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL) |
| { |
| inst.operands[i].isquad = 1; |
| } |
| else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| { |
| inst.error = BAD_ADDR_MODE; |
| return PARSE_OPERAND_FAIL; |
| } |
| } |
| else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| { |
| if (group_type == GROUP_MVE) |
| inst.error = BAD_ADDR_MODE; |
| else |
| inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| return PARSE_OPERAND_FAIL; |
| } |
| inst.operands[i].reg = reg; |
| inst.operands[i].isreg = 1; |
| |
| if (skip_past_comma (&p) == SUCCESS) |
| { |
| inst.operands[i].preind = 1; |
| |
| if (*p == '+') p++; |
| else if (*p == '-') p++, inst.operands[i].negative = 1; |
| |
| enum arm_reg_type rtype = REG_TYPE_MQ; |
| struct neon_type_el et; |
| if (group_type == GROUP_MVE |
| && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL) |
| { |
| inst.operands[i].immisreg = 2; |
| inst.operands[i].imm = reg; |
| |
| if (skip_past_comma (&p) == SUCCESS) |
| { |
| if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS) |
| { |
| inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5; |
| inst.relocs[0].exp.X_add_number = 0; |
| } |
| else |
| return PARSE_OPERAND_FAIL; |
| } |
| } |
| else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| { |
| inst.operands[i].imm = reg; |
| inst.operands[i].immisreg = 1; |
| |
| if (skip_past_comma (&p) == SUCCESS) |
| if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) |
| return PARSE_OPERAND_FAIL; |
| } |
| else if (skip_past_char (&p, ':') == SUCCESS) |
| { |
| /* FIXME: '@' should be used here, but it's filtered out by generic |
| code before we get to see it here. This may be subject to |
| change. */ |
| parse_operand_result result = parse_neon_alignment (&p, i); |
| |
| if (result != PARSE_OPERAND_SUCCESS) |
| return result; |
| } |
| else |
| { |
| if (inst.operands[i].negative) |
| { |
| inst.operands[i].negative = 0; |
| p--; |
| } |
| |
| if (group_relocations |
| && ((*p == '#' && *(p + 1) == ':') || *p == ':')) |
| { |
| struct group_reloc_table_entry *entry; |
| |
| /* Skip over the #: or : sequence. */ |
| if (*p == '#') |
| p += 2; |
| else |
| p++; |
| |
| /* Try to parse a group relocation. Anything else is an |
| error. */ |
| if (find_group_reloc_table_entry (&p, &entry) == FAIL) |
| { |
| inst.error = _("unknown group relocation"); |
| return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| } |
| |
| /* We now have the group relocation table entry corresponding to |
| the name in the assembler source. Next, we parse the |
| expression. */ |
| if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX)) |
| return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| |
| /* Record the relocation type. */ |
| switch (group_type) |
| { |
| case GROUP_LDR: |
| inst.relocs[0].type |
| = (bfd_reloc_code_real_type) entry->ldr_code; |
| break; |
| |
| case GROUP_LDRS: |
| inst.relocs[0].type |
| = (bfd_reloc_code_real_type) entry->ldrs_code; |
| break; |
| |
| case GROUP_LDC: |
| inst.relocs[0].type |
| = (bfd_reloc_code_real_type) entry->ldc_code; |
| break; |
| |
| default: |
| gas_assert (0); |
| } |
| |
| if (inst.relocs[0].type == 0) |
| { |
| inst.error = _("this group relocation is not allowed on this instruction"); |
| return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| } |
| } |
| else |
| { |
| char *q = p; |
| |
| if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX)) |
| return PARSE_OPERAND_FAIL; |
| /* If the offset is 0, find out if it's a +0 or -0. */ |
| if (inst.relocs[0].exp.X_op == O_constant |
| && inst.relocs[0].exp.X_add_number == 0) |
| { |
| skip_whitespace (q); |
| if (*q == '#') |
| { |
| q++; |
| skip_whitespace (q); |
| } |
| if (*q == '-') |
| inst.operands[i].negative = 1; |
| } |
| } |
| } |
| } |
| else if (skip_past_char (&p, ':') == SUCCESS) |
| { |
| /* FIXME: '@' should be used here, but it's filtered out by generic code |
| before we get to see it here. This may be subject to change. */ |
| parse_operand_result result = parse_neon_alignment (&p, i); |
| |
| if (result != PARSE_OPERAND_SUCCESS) |
| return result; |
| } |
| |
| if (skip_past_char (&p, ']') == FAIL) |
| { |
| inst.error = _("']' expected"); |
| return PARSE_OPERAND_FAIL; |
| } |
| |
| if (skip_past_char (&p, '!') == SUCCESS) |
| inst.operands[i].writeback = 1; |
| |
| else if (skip_past_comma (&p) == SUCCESS) |
| { |
| if (skip_past_char (&p, '{') == SUCCESS) |
| { |
| /* [Rn], {expr} - unindexed, with option */ |
| if (parse_immediate (&p, &inst.operands[i].imm, |
| 0, 255, true) == FAIL) |
| return PARSE_OPERAND_FAIL; |
| |
| if (skip_past_char (&p, '}') == FAIL) |
| { |
| inst.error = _("'}' expected at end of 'option' field"); |
| return PARSE_OPERAND_FAIL; |
| } |
| if (inst.operands[i].preind) |
| { |
| inst.error = _("cannot combine index with option"); |
| return PARSE_OPERAND_FAIL; |
| } |
| *str = p; |
| return PARSE_OPERAND_SUCCESS; |
| } |
| else |
| { |
| inst.operands[i].postind = 1; |
| inst.operands[i].writeback = 1; |
| |
| if (inst.operands[i].preind) |
| { |
| inst.error = _("cannot combine pre- and post-indexing"); |
| return PARSE_OPERAND_FAIL; |
| } |
| |
| if (*p == '+') p++; |
| else if (*p == '-') p++, inst.operands[i].negative = 1; |
| |
| enum arm_reg_type rtype = REG_TYPE_MQ; |
| struct neon_type_el et; |
| if (group_type == GROUP_MVE |
| && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL) |
| { |
| inst.operands[i].immisreg = 2; |
| inst.operands[i].imm = reg; |
| } |
| else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| { |
| /* We might be using the immediate for alignment already. If we |
| are, OR the register number into the low-order bits. */ |
| if (inst.operands[i].immisalign) |
| inst.operands[i].imm |= reg; |
| else |
| inst.operands[i].imm = reg; |
| inst.operands[i].immisreg = 1; |
| |
| if (skip_past_comma (&p) == SUCCESS) |
| if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) |
| return PARSE_OPERAND_FAIL; |
| } |
| else |
| { |
| char *q = p; |
| |
| if (inst.operands[i].negative) |
| { |
| inst.operands[i].negative = 0; |
| p--; |
| } |
| if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX)) |
| return PARSE_OPERAND_FAIL; |
| /* If the offset is 0, find out if it's a +0 or -0. */ |
| if (inst.relocs[0].exp.X_op == O_constant |
| && inst.relocs[0].exp.X_add_number == 0) |
| { |
| skip_whitespace (q); |
| if (*q == '#') |
| { |
| q++; |
| skip_whitespace (q); |
| } |
| if (*q == '-') |
| inst.operands[i].negative = 1; |
| } |
| } |
| } |
| } |
| |
| /* If at this point neither .preind nor .postind is set, we have a |
| bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */ |
| if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0) |
| { |
| inst.operands[i].preind = 1; |
| inst.relocs[0].exp.X_op = O_constant; |
| inst.relocs[0].exp.X_add_number = 0; |
| } |
| *str = p; |
| return PARSE_OPERAND_SUCCESS; |
| } |
| |
| static int |
| parse_address (char **str, int i) |
| { |
| return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS |
| ? SUCCESS : FAIL; |
| } |
| |
| static parse_operand_result |
| parse_address_group_reloc (char **str, int i, group_reloc_type type) |
| { |
| return parse_address_main (str, i, 1, type); |
| } |
| |
| /* Parse an operand for a MOVW or MOVT instruction. */ |
| static int |
| parse_half (char **str) |
| { |
| char * p; |
| |
| p = *str; |
| skip_past_char (&p, '#'); |
| if (strncasecmp (p, ":lower16:", 9) == 0) |
| inst.relocs[0].type = BFD_RELOC_ARM_MOVW; |
| else if (strncasecmp (p, ":upper16:", 9) == 0) |
| inst.relocs[0].type = BFD_RELOC_ARM_MOVT; |
| |
| if (inst.relocs[0].type != BFD_RELOC_UNUSED) |
| { |
| p += 9; |
| skip_whitespace (p); |
| } |
| |
| if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX)) |
| return FAIL; |
| |
| if (inst.relocs[0].type == BFD_RELOC_UNUSED) |
| { |
| if (inst.relocs[0].exp.X_op != O_constant) |
| { |
| inst.error = _("constant expression expected"); |
| return FAIL; |
| } |
| if (inst.relocs[0].exp.X_add_number < 0 |
| || inst.relocs[0].exp.X_add_number > 0xffff) |
| { |
| inst.error = _("immediate value out of range"); |
| return FAIL; |
| } |
| } |
| *str = p; |
| return SUCCESS; |
| } |
| |
| /* Miscellaneous. */ |
| |
| /* Parse a PSR flag operand. The value returned is FAIL on syntax error, |
| or a bitmask suitable to be or-ed into the ARM msr instruction. */ |
| static int |
| parse_psr (char **str, bool lhs) |
| { |
| char *p; |
| unsigned long psr_field; |
| const struct asm_psr *psr; |
| char *start; |
| bool is_apsr = false; |
| bool m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m); |
| |
| /* PR gas/12698: If the user has specified -march=all then m_profile will |
| be TRUE, but we want to ignore it in this case as we are building for any |
| CPU type, including non-m variants. */ |
| if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any)) |
| m_profile = false; |
| |
| /* CPSR's and SPSR's can now be lowercase. This is just a convenience |
| feature for ease of use and backwards compatibility. */ |
| p = *str; |
| if (strncasecmp (p, "SPSR", 4) == 0) |
| { |
| if (m_profile) |
| goto unsupported_psr; |
| |
| psr_field = SPSR_BIT; |
| } |
| else if (strncasecmp (p, "CPSR", 4) == 0) |
| { |
| if (m_profile) |
| goto unsupported_psr; |
| |
| psr_field = 0; |
| } |
| else if (strncasecmp (p, "APSR", 4) == 0) |
| { |
| /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A |
| and ARMv7-R architecture CPUs. */ |
| is_apsr = true; |
| psr_field = 0; |
| } |
| else if (m_profile) |
| { |
| start = p; |
| do |
| p++; |
| while (ISALNUM (*p) || *p == '_'); |
| |
| if (strncasecmp (start, "iapsr", 5) == 0 |
| || strncasecmp (start, "eapsr", 5) == 0 |
| || strncasecmp (start, "xpsr", 4) == 0 |
| || strncasecmp (start, "psr", 3) == 0) |
| p = start + strcspn (start, "rR") + 1; |
| |
| psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start, |
| p - start); |
| |
| if (!psr) |
| return FAIL; |
| |
| /* If APSR is being written, a bitfield may be specified. Note that |
| APSR itself is handled above. */ |
| if (psr->field <= 3) |
| { |
| psr_field = psr->field; |
| is_apsr = true; |
| goto check_suffix; |
| } |
| |
| *str = p; |
| /* M-profile MSR instructions have the mask field set to "10", except |
| *PSR variants which modify APSR, which may use a different mask (and |
| have been handled already). Do that by setting the PSR_f field |
| here. */ |
| return psr->field | (lhs ? PSR_f : 0); |
| } |
| else |
| goto unsupported_psr; |
| |
| p += 4; |
| check_suffix: |
| if (*p == '_') |
| { |
| /* A suffix follows. */ |
| p++; |
| start = p; |
| |
| do |
| p++; |
| while (ISALNUM (*p) || *p == '_'); |
| |
| if (is_apsr) |
| { |
| /* APSR uses a notation for bits, rather than fields. */ |
| unsigned int nzcvq_bits = 0; |
| unsigned int g_bit = 0; |
| char *bit; |
| |
| for (bit = start; bit != p; bit++) |
| { |
| switch (TOLOWER (*bit)) |
| { |
| case 'n': |
| nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01; |
| break; |
| |
| case 'z': |
| nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02; |
| break; |
| |
| case 'c': |
| nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04; |
| break; |
| |
| case 'v': |
| nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08; |
| break; |
| |
| case 'q': |
| nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10; |
| break; |
| |
| case 'g': |
| g_bit |= (g_bit & 0x1) ? 0x2 : 0x1; |
| break; |
| |
| default: |
| inst.error = _("unexpected bit specified after APSR"); |
| return FAIL; |
| } |
| } |
| |
| if (nzcvq_bits == 0x1f) |
| psr_field |= PSR_f; |
| |
| if (g_bit == 0x1) |
| { |
| if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)) |
| { |
| inst.error = _("selected processor does not " |
| "support DSP extension"); |
| return FAIL; |
| } |
| |
| psr_field |= PSR_s; |
| } |
| |
| if ((nzcvq_bits & 0x20) != 0 |
| || (nzcvq_bits != 0x1f && nzcvq_bits != 0) |
| || (g_bit & 0x2) != 0) |
| { |
| inst.error = _("bad bitmask specified after APSR"); |
| return FAIL; |
| } |
| } |
| else |
| { |
| psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start, |
| p - start); |
| if (!psr) |
| goto error; |
| |
| psr_field |= psr->field; |
| } |
| } |
| else |
| { |
| if (ISALNUM (*p)) |
| goto error; /* Garbage after "[CS]PSR". */ |
| |
| /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This |
| is deprecated, but allow it anyway. */ |
| if (is_apsr && lhs) |
| { |
| psr_field |= PSR_f; |
| as_tsktsk (_("writing to APSR without specifying a bitmask is " |
| "deprecated")); |
| } |
| else if (!m_profile) |
| /* These bits are never right for M-profile devices: don't set them |
| (only code paths which read/write APSR reach here). */ |
| psr_field |= (PSR_c | PSR_f); |
| } |
| *str = p; |
| return psr_field; |
| |
| unsupported_psr: |
| inst.error = _("selected processor does not support requested special " |
| "purpose register"); |
| return FAIL; |
| |
| error: |
| inst.error = _("flag for {c}psr instruction expected"); |
| return FAIL; |
| } |
| |
| static int |
| parse_sys_vldr_vstr (char **str) |
| { |
| unsigned i; |
| int val = FAIL; |
| struct { |
| const char *name; |
| int regl; |
| int regh; |
| } sysregs[] = { |
| {"FPSCR", 0x1, 0x0}, |
| {"FPSCR_nzcvqc", 0x2, 0x0}, |
| {"VPR", 0x4, 0x1}, |
| {"P0", 0x5, 0x1}, |
| {"FPCXTNS", 0x6, 0x1}, |
| {"FPCXT_NS", 0x6, 0x1}, |
| {"fpcxtns", 0x6, 0x1}, |
| {"fpcxt_ns", 0x6, 0x1}, |
| {"FPCXTS", 0x7, 0x1}, |
| {"FPCXT_S", 0x7, 0x1}, |
| {"fpcxts", 0x7, 0x1}, |
| {"fpcxt_s", 0x7, 0x1} |
| }; |
| char *op_end = strchr (*str, ','); |
| size_t op_strlen = op_end - *str; |
| |
| for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++) |
| { |
| if (!strncmp (*str, sysregs[i].name, op_strlen)) |
| { |
| val = sysregs[i].regl | (sysregs[i].regh << 3); |
| *str = op_end; |
| break; |
| } |
| } |
| |
| return val; |
| } |
| |
| /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a |
| value suitable for splatting into the AIF field of the instruction. */ |
| |
| static int |
| parse_cps_flags (char **str) |
| { |
| int val = 0; |
| int saw_a_flag = 0; |
| char *s = *str; |
| |
| for (;;) |
| switch (*s++) |
| { |
| case '\0': case ',': |
| goto done; |
| |
| case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break; |
| case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break; |
| case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break; |
| |
| default: |
| inst.error = _("unrecognized CPS flag"); |
| return FAIL; |
| } |
| |
| done: |
| if (saw_a_flag == 0) |
| { |
| inst.error = _("missing CPS flags"); |
| return FAIL; |
| } |
| |
| *str = s - 1; |
| return val; |
| } |
| |
| /* Parse an endian specifier ("BE" or "LE", case insensitive); |
| returns 0 for big-endian, 1 for little-endian, FAIL for an error. */ |
| |
| static int |
| parse_endian_specifier (char **str) |
| { |
| int little_endian; |
| char *s = *str; |
| |
| if (strncasecmp (s, "BE", 2)) |
| little_endian = 0; |
| else if (strncasecmp (s, "LE", 2)) |
| little_endian = 1; |
| else |
| { |
| inst.error = _("valid endian specifiers are be or le"); |
| return FAIL; |
| } |
| |
| if (ISALNUM (s[2]) || s[2] == '_') |
| { |
| inst.error = _("valid endian specifiers are be or le"); |
| return FAIL; |
| } |
| |
| *str = s + 2; |
| return little_endian; |
| } |
| |
| /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a |
| value suitable for poking into the rotate field of an sxt or sxta |
| instruction, or FAIL on error. */ |
| |
| static int |
| parse_ror (char **str) |
| { |
| int rot; |
| char *s = *str; |
| |
| if (strncasecmp (s, "ROR", 3) == 0) |
| s += 3; |
| else |
| { |
| inst.error = _("missing rotation field after comma"); |
| return FAIL; |
| } |
| |
| if (parse_immediate (&s, &rot, 0, 24, false) == FAIL) |
| return FAIL; |
| |
| switch (rot) |
| { |
| case 0: *str = s; return 0x0; |
| case 8: *str = s; return 0x1; |
| case 16: *str = s; return 0x2; |
| case 24: *str = s; return 0x3; |
| |
| default: |
| inst.error = _("rotation can only be 0, 8, 16, or 24"); |
| return FAIL; |
| } |
| } |
| |
| /* Parse a conditional code (from conds[] below). The value returned is in the |
| range 0 .. 14, or FAIL. */ |
| static int |
| parse_cond (char **str) |
| { |
| char *q; |
| const struct asm_cond *c; |
| int n; |
| /* Condition codes are always 2 characters, so matching up to |
| 3 characters is sufficient. */ |
| char cond[3]; |
| |
| q = *str; |
| n = 0; |
| while (ISALPHA (*q) && n < 3) |
| { |
| cond[n] = TOLOWER (*q); |
| q++; |
| n++; |
| } |
| |
| c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n); |
| if (!c) |
| { |
| inst.error = _("condition required"); |
| return FAIL; |
| } |
| |
| *str = q; |
| return c->value; |
| } |
| |
| /* Parse an option for a barrier instruction. Returns the encoding for the |
| option, or FAIL. */ |
| static int |
| parse_barrier (char **str) |
| { |
| char *p, *q; |
| const struct asm_barrier_opt *o; |
| |
| p = q = *str; |
| while (ISALPHA (*q)) |
| q++; |
| |
| o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p, |
| q - p); |
| if (!o) |
| return FAIL; |
| |
| if (!mark_feature_used (&o->arch)) |
| return FAIL; |
| |
| *str = q; |
| return o->value; |
| } |
| |
| /* Parse the operands of a table branch instruction. Similar to a memory |
| operand. */ |
| static int |
| parse_tb (char **str) |
| { |
| char * p = *str; |
| int reg; |
| |
| if (skip_past_char (&p, '[') == FAIL) |
| { |
| inst.error = _("'[' expected"); |
| return FAIL; |
| } |
| |
| if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| { |
| inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| return FAIL; |
| } |
| inst.operands[0].reg = reg; |
| |
| if (skip_past_comma (&p) == FAIL) |
| { |
| inst.error = _("',' expected"); |
| return FAIL; |
| } |
| |
| if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| { |
| inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| return FAIL; |
| } |
| inst.operands[0].imm = reg; |
| |
| if (skip_past_comma (&p) == SUCCESS) |
| { |
| if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL) |
| return FAIL; |
| if (inst.relocs[0].exp.X_add_number != 1) |
| { |
| inst.error = _("invalid shift"); |
| return FAIL; |
| } |
| inst.operands[0].shifted = 1; |
| } |
| |
| if (skip_past_char (&p, ']') == FAIL) |
| { |
| inst.error = _("']' expected"); |
| return FAIL; |
| } |
| *str = p; |
| return SUCCESS; |
| } |
| |
| /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more |
| information on the types the operands can take and how they are encoded. |
| Up to four operands may be read; this function handles setting the |
| ".present" field for each read operand itself. |
| Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS, |
| else returns FAIL. */ |
| |
| static int |
| parse_neon_mov (char **str, int *which_operand) |
| { |
| int i = *which_operand, val; |
| enum arm_reg_type rtype; |
| char *ptr = *str; |
| struct neon_type_el optype; |
| |
| if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL) |
| { |
| /* Cases 17 or 19. */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].isscalar = 2; |
| inst.operands[i].vectype = optype; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| { |
| /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| } |
| else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL) |
| { |
| /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].isscalar = 2; |
| inst.operands[i].vectype = optype; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| } |
| else |
| { |
| first_error (_("expected ARM or MVE vector register")); |
| return FAIL; |
| } |
| } |
| else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL) |
| { |
| /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isscalar = 1; |
| inst.operands[i].vectype = optype; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| } |
| else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype)) |
| != FAIL) |
| || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype)) |
| != FAIL)) |
| { |
| /* Cases 0, 1, 2, 3, 5 (D only). */ |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].isquad = (rtype == REG_TYPE_NQ); |
| inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| inst.operands[i].isvec = 1; |
| inst.operands[i].vectype = optype; |
| inst.operands[i++].present = 1; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| { |
| /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. |
| Case 13: VMOV <Sd>, <Rm> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| |
| if (rtype == REG_TYPE_NQ) |
| { |
| first_error (_("can't use Neon quad register here")); |
| return FAIL; |
| } |
| else if (rtype != REG_TYPE_VFS) |
| { |
| i++; |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| } |
| } |
| else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, |
| &optype)) != FAIL) |
| || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, |
| &optype)) != FAIL)) |
| { |
| /* Case 0: VMOV<c><q> <Qd>, <Qm> |
| Case 1: VMOV<c><q> <Dd>, <Dm> |
| Case 8: VMOV.F32 <Sd>, <Sm> |
| Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */ |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].isquad = (rtype == REG_TYPE_NQ); |
| inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| inst.operands[i].isvec = 1; |
| inst.operands[i].vectype = optype; |
| inst.operands[i].present = 1; |
| |
| if (skip_past_comma (&ptr) == SUCCESS) |
| { |
| /* Case 15. */ |
| i++; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| goto wanted_arm; |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].present = 1; |
| } |
| } |
| else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS) |
| /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm> |
| Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> |
| Case 10: VMOV.F32 <Sd>, #<imm> |
| Case 11: VMOV.F64 <Dd>, #<imm> */ |
| inst.operands[i].immisfloat = 1; |
| else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/false) |
| == SUCCESS) |
| /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm> |
| Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */ |
| ; |
| else |
| { |
| first_error (_("expected <Rm> or <Dm> or <Qm> operand")); |
| return FAIL; |
| } |
| } |
| else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| { |
| /* Cases 6, 7, 16, 18. */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL) |
| { |
| /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isscalar = 2; |
| inst.operands[i].present = 1; |
| inst.operands[i].vectype = optype; |
| } |
| else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL) |
| { |
| /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isscalar = 1; |
| inst.operands[i].present = 1; |
| inst.operands[i].vectype = optype; |
| } |
| else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| { |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype)) |
| != FAIL) |
| { |
| /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */ |
| |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| inst.operands[i].vectype = optype; |
| inst.operands[i].present = 1; |
| |
| if (rtype == REG_TYPE_VFS) |
| { |
| /* Case 14. */ |
| i++; |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, |
| &optype)) == FAIL) |
| { |
| first_error (_(reg_expected_msgs[REG_TYPE_VFS])); |
| return FAIL; |
| } |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].issingle = 1; |
| inst.operands[i].vectype = optype; |
| inst.operands[i].present = 1; |
| } |
| } |
| else |
| { |
| if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) |
| != FAIL) |
| { |
| /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].isscalar = 2; |
| inst.operands[i].vectype = optype; |
| inst.operands[i++].present = 1; |
| |
| if (skip_past_comma (&ptr) == FAIL) |
| goto wanted_comma; |
| |
| if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) |
| == FAIL) |
| { |
| first_error (_(reg_expected_msgs[REG_TYPE_MQ])); |
| return FAIL; |
| } |
| inst.operands[i].reg = val; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].isscalar = 2; |
| inst.operands[i].vectype = optype; |
| inst.operands[i].present = 1; |
| } |
| else |
| { |
| first_error (_("VFP single, double or MVE vector register" |
| " expected")); |
| return FAIL; |
| } |
| } |
| } |
| else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype)) |
| != FAIL) |
| { |
| /* Case 13. */ |
| inst.operands[i].reg = val; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].isvec = 1; |
| inst.operands[i].issingle = 1; |
| inst.operands[i].vectype = optype; |
| inst.operands[i].present = 1; |
| } |
| } |
| else |
| { |
| first_error (_("parse error")); |
| return FAIL; |
| } |
| |
| /* Successfully parsed the operands. Update args. */ |
| *which_operand = i; |
| *str = ptr; |
| return SUCCESS; |
| |
| wanted_comma: |
| first_error (_("expected comma")); |
| return FAIL; |
| |
| wanted_arm: |
| first_error (_(reg_expected_msgs[REG_TYPE_RN])); |
| return FAIL; |
| } |
| |
| /* Use this macro when the operand constraints are different |
| for ARM and THUMB (e.g. ldrd). */ |
| #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \ |
| ((arm_operand) | ((thumb_operand) << 16)) |
| |
| /* Matcher codes for parse_operands. */ |
| enum operand_parse_code |
| { |
| OP_stop, /* end of line */ |
| |
| OP_RR, /* ARM register */ |
| OP_RRnpc, /* ARM register, not r15 */ |
| OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */ |
| OP_RRnpcb, /* ARM register, not r15, in square brackets */ |
| OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback, |
| optional trailing ! */ |
| OP_RRw, /* ARM register, not r15, optional trailing ! */ |
| OP_RCP, /* Coprocessor number */ |
| OP_RCN, /* Coprocessor register */ |
| OP_RVS, /* VFP single precision register */ |
| OP_RVD, /* VFP double precision register (0..15) */ |
| OP_RND, /* Neon double precision register (0..31) */ |
| OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */ |
| OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register. |
| */ |
| OP_RNSDMQR, /* Neon single or double precision, MVE vector or ARM register. |
| */ |
| OP_RNQ, /* Neon quad precision register */ |
| OP_RNQMQ, /* Neon quad or MVE vector register. */ |
| OP_RVSD, /* VFP single or double precision register */ |
| OP_RVSD_COND, /* VFP single, double precision register or condition code. */ |
| OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */ |
| OP_RNSD, /* Neon single or double precision register */ |
| OP_RNDQ, /* Neon double or quad precision register */ |
| OP_RNDQMQ, /* Neon double, quad or MVE vector register. */ |
| OP_RNDQMQR, /* Neon double, quad, MVE vector or ARM register. */ |
| OP_RNSDQ, /* Neon single, double or quad precision register */ |
| OP_RNSC, /* Neon scalar D[X] */ |
| OP_RVC, /* VFP control register */ |
| OP_RIWR, /* iWMMXt wR register */ |
| OP_RIWC, /* iWMMXt wC register */ |
| OP_RIWG, /* iWMMXt wCG register */ |
| OP_RXA, /* XScale accumulator register */ |
| |
| OP_RNSDMQ, /* Neon single, double or MVE vector register */ |
| OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register |
| */ |
| OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or |
| GPR (no SP/SP) */ |
| OP_RMQ, /* MVE vector register. */ |
| OP_RMQRZ, /* MVE vector or ARM register including ZR. */ |
| OP_RMQRR, /* MVE vector or ARM register. */ |
| |
| /* New operands for Armv8.1-M Mainline. */ |
| OP_LR, /* ARM LR register */ |
| OP_SP, /* ARM SP register */ |
| OP_R12, |
| OP_RRe, /* ARM register, only even numbered. */ |
| OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */ |
| OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */ |
| OP_RR_ZR, /* ARM register or ZR but no PC */ |
| |
| OP_REGLST, /* ARM register list */ |
| OP_CLRMLST, /* CLRM register list */ |
| OP_VRSLST, /* VFP single-precision register list */ |
| OP_VRDLST, /* VFP double-precision register list */ |
| OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ |
| OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ |
| OP_NSTRLST, /* Neon element/structure list */ |
| OP_VRSDVLST, /* VFP single or double-precision register list and VPR */ |
| OP_MSTRLST2, /* MVE vector list with two elements. */ |
| OP_MSTRLST4, /* MVE vector list with four elements. */ |
| |
| OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ |
| OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ |
| OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */ |
| OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate |
| zero. */ |
| OP_RR_RNSC, /* ARM reg or Neon scalar. */ |
| OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */ |
| OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ |
| OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register. |
| */ |
| OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon |
| scalar, or ARM register. */ |
| OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ |
| OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register. */ |
| OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM |
| register. */ |
| OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar. */ |
| OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ |
| OP_VMOV, /* Neon VMOV operands. */ |
| OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */ |
| /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */ |
| OP_RNDQMQ_Ibig, |
| OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ |
| OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or |
| ARM register. */ |
| OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ |
| OP_VLDR, /* VLDR operand. */ |
| |
| OP_I0, /* immediate zero */ |
| OP_I7, /* immediate value 0 .. 7 */ |
| OP_I15, /* 0 .. 15 */ |
| OP_I16, /* 1 .. 16 */ |
| OP_I16z, /* 0 .. 16 */ |
| OP_I31, /* 0 .. 31 */ |
| OP_I31w, /* 0 .. 31, optional trailing ! */ |
| OP_I32, /* 1 .. 32 */ |
| OP_I32z, /* 0 .. 32 */ |
| OP_I48_I64, /* 48 or 64 */ |
| OP_I63, /* 0 .. 63 */ |
| OP_I63s, /* -64 .. 63 */ |
| OP_I64, /* 1 .. 64 */ |
| OP_I64z, /* 0 .. 64 */ |
| OP_I127, /* 0 .. 127 */ |
| OP_I255, /* 0 .. 255 */ |
| OP_I511, /* 0 .. 511 */ |
| OP_I4095, /* 0 .. 4095 */ |
| OP_I8191, /* 0 .. 8191 */ |
| OP_I4b, /* immediate, prefix optional, 1 .. 4 */ |
| OP_I7b, /* 0 .. 7 */ |
| OP_I15b, /* 0 .. 15 */ |
| OP_I31b, /* 0 .. 31 */ |
| |
| OP_SH, /* shifter operand */ |
| OP_SHG, /* shifter operand with possible group relocation */ |
| OP_ADDR, /* Memory address expression (any mode) */ |
| OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */ |
| OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */ |
| OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */ |
| OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */ |
| OP_EXP, /* arbitrary expression */ |
| OP_EXPi, /* same, with optional immediate prefix */ |
| OP_EXPr, /* same, with optional relocation suffix */ |
| OP_EXPs, /* same, with optional non-first operand relocation suffix */ |
| OP_HALF, /* 0 .. 65535 or low/high reloc. */ |
| OP_IROT1, /* VCADD rotate immediate: 90, 270. */ |
| OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */ |
| |
| OP_CPSF, /* CPS flags */ |
| OP_ENDI, /* Endianness specifier */ |
| OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */ |
| OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */ |
| OP_COND, /* conditional code */ |
| OP_TB, /* Table branch. */ |
| |
| OP_APSR_RR, /* ARM register or "APSR_nzcv". */ |
| |
| OP_RRnpc_I0, /* ARM register or literal 0 */ |
| OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */ |
| OP_RR_EXi, /* ARM register or expression with imm prefix */ |
| OP_RIWR_RIWC, /* iWMMXt R or C reg */ |
| OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */ |
| |
| /* Optional operands. */ |
| OP_oI7b, /* immediate, prefix optional, 0 .. 7 */ |
| OP_oI31b, /* 0 .. 31 */ |
| OP_oI32b, /* 1 .. 32 */ |
| OP_oI32z, /* 0 .. 32 */ |
| OP_oIffffb, /* 0 .. 65535 */ |
| OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ |
| |
| OP_oRR, /* ARM register */ |
| OP_oLR, /* ARM LR register */ |
| OP_oRRnpc, /* ARM register, not the PC */ |
| OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */ |
| OP_oRRw, /* ARM register, not r15, optional trailing ! */ |
| OP_oRND, /* Optional Neon double precision register */ |
| OP_oRNQ, /* Optional Neon quad precision register */ |
| OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */ |
| OP_oRNDQ, /* Optional Neon double or quad precision register */ |
| OP_oRNSDQ, /* Optional single, double or quad precision vector register */ |
| OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector |
| register. */ |
| OP_oRNSDMQ, /* Optional single, double register or MVE vector |
| register. */ |
| OP_oSHll, /* LSL immediate */ |
| OP_oSHar, /* ASR immediate */ |
| OP_oSHllar, /* LSL or ASR immediate */ |
| OP_oROR, /* ROR 0/8/16/24 */ |
| OP_oBARRIER_I15, /* Option argument for a barrier instruction. */ |
| |
| OP_oRMQRZ, /* optional MVE vector or ARM register including ZR. */ |
| |
| /* Some pre-defined mixed (ARM/THUMB) operands. */ |
| OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp), |
| OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp), |
| OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp), |
| |
| OP_FIRST_OPTIONAL = OP_oI7b |
| }; |
| |
| /* Generic instruction operand parser. This does no encoding and no |
| semantic validation; it merely squirrels values away in the inst |
| structure. Returns SUCCESS or FAIL depending on whether the |
| specified grammar matched. */ |
| static int |
| parse_operands (char *str, const unsigned int *pattern, bool thumb) |
| { |
| unsigned const int *upat = pattern; |
| char *backtrack_pos = 0; |
| const char *backtrack_error = 0; |
| int i, val = 0, backtrack_index = 0; |
| enum arm_reg_type rtype; |
| parse_operand_result result; |
| unsigned int op_parse_code; |
| bool partial_match; |
| |
| #define po_char_or_fail(chr) \ |
| do \ |
| { \ |
| if (skip_past_char (&str, chr) == FAIL) \ |
| goto bad_args; \ |
| } \ |
| while (0) |
| |
| #define po_reg_or_fail(regtype) \ |
| do \ |
| { \ |
| val = arm_typed_reg_parse (& str, regtype, & rtype, \ |
| & inst.operands[i].vectype); \ |
| if (val == FAIL) \ |
| { \ |
| first_error (_(reg_expected_msgs[regtype])); \ |
| goto failure; \ |
| } \ |
| inst.operands[i].reg = val; \ |
| inst.operands[i].isreg = 1; \ |
| inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ |
| inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ |
| inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ |
| || rtype == REG_TYPE_VFD \ |
| || rtype == REG_TYPE_NQ); \ |
| inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \ |
| } \ |
| while (0) |
| |
| #define po_reg_or_goto(regtype, label) \ |
| do \ |
| { \ |
| val = arm_typed_reg_parse (& str, regtype, & rtype, \ |
| & inst.operands[i].vectype); \ |
| if (val == FAIL) \ |
| goto label; \ |
| \ |
| inst.operands[i].reg = val; \ |
| inst.operands[i].isreg = 1; \ |
| inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ |
| inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ |
| inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ |
| || rtype == REG_TYPE_VFD \ |
| || rtype == REG_TYPE_NQ); \ |
| inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \ |
| } \ |
| while (0) |
| |
| #define po_imm_or_fail(min, max, popt) \ |
| do \ |
| { \ |
| if (parse_immediate (&str, &val, min, max, popt) == FAIL) \ |
| goto failure; \ |
| inst.operands[i].imm = val; \ |
| } \ |
| while (0) |
| |
| #define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \ |
| do \ |
| { \ |
| expressionS exp; \ |
| my_get_expression (&exp, &str, popt); \ |
| if (exp.X_op != O_constant) \ |
| { \ |
| inst.error = _("constant expression required"); \ |
| goto failure; \ |
| } \ |
| if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \ |
| { \ |
| inst.error = _("immediate value 48 or 64 expected"); \ |
| goto failure; \ |
| } \ |
| inst.operands[i].imm = exp.X_add_number; \ |
| } \ |
| while (0) |
| |
| #define po_scalar_or_goto(elsz, label, reg_type) \ |
| do \ |
| { \ |
| val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \ |
| reg_type); \ |
| if (val == FAIL) \ |
| goto label; \ |
| inst.operands[i].reg = val; \ |
| inst.operands[i].isscalar = 1; \ |
| } \ |
| while (0) |
| |
| #define po_misc_or_fail(expr) \ |
| do \ |
| { \ |
| if (expr) \ |
| goto failure; \ |
| } \ |
| while (0) |
| |
| #define po_misc_or_fail_no_backtrack(expr) \ |
| do \ |
| { \ |
| result = expr; \ |
| if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \ |
| backtrack_pos = 0; \ |
| if (result != PARSE_OPERAND_SUCCESS) \ |
| goto failure; \ |
| } \ |
| while (0) |
| |
| #define po_barrier_or_imm(str) \ |
| do \ |
| { \ |
| val = parse_barrier (&str); \ |
| if (val == FAIL && ! ISALPHA (*str)) \ |
| goto immediate; \ |
| if (val == FAIL \ |
| /* ISB can only take SY as an option. */ \ |
| || ((inst.instruction & 0xf0) == 0x60 \ |
| && val != 0xf)) \ |
| { \ |
| inst.error = _("invalid barrier type"); \ |
| backtrack_pos = 0; \ |
| goto failure; \ |
| } \ |
| } \ |
| while (0) |
| |
| skip_whitespace (str); |
| |
| for (i = 0; upat[i] != OP_stop; i++) |
| { |
| op_parse_code = upat[i]; |
| if (op_parse_code >= 1<<16) |
| op_parse_code = thumb ? (op_parse_code >> 16) |
| : (op_parse_code & ((1<<16)-1)); |
| |
| if (op_parse_code >= OP_FIRST_OPTIONAL) |
| { |
| /* Remember where we are in case we need to backtrack. */ |
| backtrack_pos = str; |
| backtrack_error = inst.error; |
| backtrack_index = i; |
| } |
| |
| if (i > 0 && (i > 1 || inst.operands[0].present)) |
| po_char_or_fail (','); |
| |
| switch (op_parse_code) |
| { |
| /* Registers */ |
| case OP_oRRnpc: |
| case OP_oRRnpcsp: |
| case OP_RRnpc: |
| case OP_RRnpcsp: |
| case OP_oRR: |
| case OP_RRe: |
| case OP_RRo: |
| case OP_LR: |
| case OP_oLR: |
| case OP_SP: |
| case OP_R12: |
| case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; |
| case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; |
| case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; |
| case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; |
| case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; |
| case OP_oRND: |
| case OP_RNSDMQR: |
| po_reg_or_goto (REG_TYPE_VFS, try_rndmqr); |
| break; |
| try_rndmqr: |
| case OP_RNDMQR: |
| po_reg_or_goto (REG_TYPE_RN, try_rndmq); |
| break; |
| try_rndmq: |
| case OP_RNDMQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_rnd); |
| break; |
| try_rnd: |
| case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; |
| case OP_RVC: |
| po_reg_or_goto (REG_TYPE_VFC, coproc_reg); |
| break; |
| /* Also accept generic coprocessor regs for unknown registers. */ |
| coproc_reg: |
| po_reg_or_goto (REG_TYPE_CN, vpr_po); |
| break; |
| /* Also accept P0 or p0 for VPR.P0. Since P0 is already an |
| existing register with a value of 0, this seems like the |
| best way to parse P0. */ |
| vpr_po: |
| if (strncasecmp (str, "P0", 2) == 0) |
| { |
| str += 2; |
| inst.operands[i].isreg = 1; |
| inst.operands[i].reg = 13; |
| } |
| else |
| goto failure; |
| break; |
| case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break; |
| case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break; |
| case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; |
| case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; |
| case OP_oRNQ: |
| case OP_RNQMQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_nq); |
| break; |
| try_nq: |
| case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; |
| case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break; |
| case OP_RNDQMQR: |
| po_reg_or_goto (REG_TYPE_RN, try_rndqmq); |
| break; |
| try_rndqmq: |
| case OP_oRNDQMQ: |
| case OP_RNDQMQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_rndq); |
| break; |
| try_rndq: |
| case OP_oRNDQ: |
| case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; |
| case OP_RVSDMQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_rvsd); |
| break; |
| try_rvsd: |
| case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; |
| case OP_RVSD_COND: |
| po_reg_or_goto (REG_TYPE_VFSD, try_cond); |
| break; |
| case OP_oRNSDMQ: |
| case OP_RNSDMQ: |
| po_reg_or_goto (REG_TYPE_NSD, try_mq2); |
| break; |
| try_mq2: |
| po_reg_or_fail (REG_TYPE_MQ); |
| break; |
| case OP_oRNSDQ: |
| case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; |
| case OP_RNSDQMQR: |
| po_reg_or_goto (REG_TYPE_RN, try_mq); |
| break; |
| try_mq: |
| case OP_oRNSDQMQ: |
| case OP_RNSDQMQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_nsdq2); |
| break; |
| try_nsdq2: |
| po_reg_or_fail (REG_TYPE_NSDQ); |
| inst.error = 0; |
| break; |
| case OP_RMQRR: |
| po_reg_or_goto (REG_TYPE_RN, try_rmq); |
| break; |
| try_rmq: |
| case OP_RMQ: |
| po_reg_or_fail (REG_TYPE_MQ); |
| break; |
| /* Neon scalar. Using an element size of 8 means that some invalid |
| scalars are accepted here, so deal with those in later code. */ |
| case OP_RNSC: po_scalar_or_goto (8, failure, REG_TYPE_VFD); break; |
| |
| case OP_RNDQ_I0: |
| { |
| po_reg_or_goto (REG_TYPE_NDQ, try_imm0); |
| break; |
| try_imm0: |
| po_imm_or_fail (0, 0, true); |
| } |
| break; |
| |
| case OP_RVSD_I0: |
| po_reg_or_goto (REG_TYPE_VFSD, try_imm0); |
| break; |
| |
| case OP_RSVDMQ_FI0: |
| po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0); |
| break; |
| try_rsvd_fi0: |
| case OP_RSVD_FI0: |
| { |
| po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0); |
| break; |
| try_ifimm0: |
| if (parse_ifimm_zero (&str)) |
| inst.operands[i].imm = 0; |
| else |
| { |
| inst.error |
| = _("only floating point zero is allowed as immediate value"); |
| goto failure; |
| } |
| } |
| break; |
| |
| case OP_RR_RNSC: |
| { |
| po_scalar_or_goto (8, try_rr, REG_TYPE_VFD); |
| break; |
| try_rr: |
| po_reg_or_fail (REG_TYPE_RN); |
| } |
| break; |
| |
| case OP_RNSDQ_RNSC_MQ_RR: |
| po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq); |
| break; |
| try_rnsdq_rnsc_mq: |
| case OP_RNSDQ_RNSC_MQ: |
| po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc); |
| break; |
| try_rnsdq_rnsc: |
| case OP_RNSDQ_RNSC: |
| { |
| po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD); |
| inst.error = 0; |
| break; |
| try_nsdq: |
| po_reg_or_fail (REG_TYPE_NSDQ); |
| inst.error = 0; |
| } |
| break; |
| |
| case OP_RNSD_RNSC: |
| { |
| po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD); |
| break; |
| try_s_scalar: |
| po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS); |
| break; |
| try_nsd: |
| po_reg_or_fail (REG_TYPE_NSD); |
| } |
| break; |
| |
| case OP_RNDQMQ_RNSC_RR: |
| po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr); |
| break; |
| try_rndq_rnsc_rr: |
| case OP_RNDQ_RNSC_RR: |
| po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc); |
| break; |
| case OP_RNDQMQ_RNSC: |
| po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc); |
| break; |
| try_rndq_rnsc: |
| case OP_RNDQ_RNSC: |
| { |
| po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD); |
| break; |
| try_ndq: |
| po_reg_or_fail (REG_TYPE_NDQ); |
| } |
| break; |
| |
| case OP_RND_RNSC: |
| { |
| po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD); |
| break; |
| try_vfd: |
| po_reg_or_fail (REG_TYPE_VFD); |
| } |
| break; |
| |
| case OP_VMOV: |
| /* WARNING: parse_neon_mov can move the operand counter, i. If we're |
| not careful then bad things might happen. */ |
| po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL); |
| break; |
| |
| case OP_RNDQMQ_Ibig: |
| po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig); |
| break; |
| try_rndq_ibig: |
| case OP_RNDQ_Ibig: |
| { |
| po_reg_or_goto (REG_TYPE_NDQ, try_immbig); |
| break; |
| try_immbig: |
| /* There's a possibility of getting a 64-bit immediate here, so |
| we need special handling. */ |
| if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/false) |
| == FAIL) |
| { |
| inst.error = _("immediate value is out of range"); |
| goto failure; |
| } |
| } |
| break; |
| |
| case OP_RNDQMQ_I63b_RR: |
| po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr); |
| break; |
| try_rndq_i63b_rr: |
| po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b); |
| break; |
| try_rndq_i63b: |
| case OP_RNDQ_I63b: |
| { |
| po_reg_or_goto (REG_TYPE_NDQ, try_shimm); |
| break; |
| try_shimm: |
| po_imm_or_fail (0, 63, true); |
| } |
| break; |
| |
| case OP_RRnpcb: |
| po_char_or_fail ('['); |
| po_reg_or_fail (REG_TYPE_RN); |
| po_char_or_fail (']'); |
| break; |
| |
| case OP_RRnpctw: |
| case OP_RRw: |
| case OP_oRRw: |
| po_reg_or_fail (REG_TYPE_RN); |
| if (skip_past_char (&str, '!') == SUCCESS) |
| inst.operands[i].writeback = 1; |
| break; |
| |
| /* Immediates */ |
| case OP_I7: po_imm_or_fail ( 0, 7, false); break; |
| case OP_I15: po_imm_or_fail ( 0, 15, false); break; |
| case OP_I16: po_imm_or_fail ( 1, 16, false); break; |
| case OP_I16z: po_imm_or_fail ( 0, 16, false); break; |
| case OP_I31: po_imm_or_fail ( 0, 31, false); break; |
| case OP_I32: po_imm_or_fail ( 1, 32, false); break; |
| case OP_I32z: po_imm_or_fail ( 0, 32, false); break; |
| case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, false); break; |
| case OP_I63s: po_imm_or_fail (-64, 63, false); break; |
| case OP_I63: po_imm_or_fail ( 0, 63, false); break; |
| case OP_I64: po_imm_or_fail ( 1, 64, false); break; |
| case OP_I64z: po_imm_or_fail ( 0, 64, false); break; |
| case OP_I127: po_imm_or_fail ( 0, 127, false); break; |
| case OP_I255: po_imm_or_fail ( 0, 255, false); break; |
| case OP_I511: po_imm_or_fail ( 0, 511, false); break; |
| case OP_I4095: po_imm_or_fail ( 0, 4095, false); break; |
| case OP_I8191: po_imm_or_fail ( 0, 8191, false); break; |
| case OP_I4b: po_imm_or_fail ( 1, 4, true); break; |
| case OP_oI7b: |
| case OP_I7b: po_imm_or_fail ( 0, 7, true); break; |
| case OP_I15b: po_imm_or_fail ( 0, 15, true); break; |
| case OP_oI31b: |
| case OP_I31b: po_imm_or_fail ( 0, 31, true); break; |
| case OP_oI32b: po_imm_or_fail ( 1, 32, true); break; |
| case OP_oI32z: po_imm_or_fail ( 0, 32, true); break; |
| case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, true); break; |
| |
| /* Immediate variants */ |
| case OP_oI255c: |
| po_char_or_fail ('{'); |
| po_imm_or_fail (0, 255, true); |
| po_char_or_fail ('}'); |
| break; |
| |
| case OP_I31w: |
| /* The expression parser chokes on a trailing !, so we have |
| to find it first and zap it. */ |
| { |
| char *s = str; |
| while (*s && *s != ',') |
| s++; |
| if (s[-1] == '!') |
| { |
| s[-1] = '\0'; |
| inst.operands[i].writeback = 1; |
| } |
| po_imm_or_fail (0, 31, true); |
| if (str == s - 1) |
| str = s; |
| } |
| break; |
| |
| /* Expressions */ |
| case OP_EXPi: EXPi: |
| po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str, |
| GE_OPT_PREFIX)); |
| break; |
| |
| case OP_EXP: |
| po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str, |
| GE_NO_PREFIX)); |
| break; |
| |
| case OP_EXPr: EXPr: |
| po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str, |
| GE_NO_PREFIX)); |
| if (inst.relocs[0].exp.X_op == O_symbol) |
| { |
| val = parse_reloc (&str); |
| if (val == -1) |
| { |
| inst.error = _("unrecognized relocation suffix"); |
| goto failure; |
| } |
| else if (val != BFD_RELOC_UNUSED) |
| { |
| inst.operands[i].imm = val; |
| inst.operands[i].hasreloc = 1; |
| } |
| } |
| break; |
| |
| case OP_EXPs: |
| po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str, |
| GE_NO_PREFIX)); |
| if (inst.relocs[i].exp.X_op == O_symbol) |
| { |
| inst.operands[i].hasreloc = 1; |
| } |
| else if (inst.relocs[i].exp.X_op == O_constant) |
| { |
| inst.operands[i].imm = inst.relocs[i].exp.X_add_number; |
| inst.operands[i].hasreloc = 0; |
| } |
| break; |
| |
| /* Operand for MOVW or MOVT. */ |
| case OP_HALF: |
| po_misc_or_fail (parse_half (&str)); |
| break; |
| |
| /* Register or expression. */ |
| case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break; |
| case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break; |
| |
| /* Register or immediate. */ |
| case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break; |
| I0: po_imm_or_fail (0, 0, false); break; |
| |
| case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break; |
| I32: po_imm_or_fail (1, 32, false); break; |
| |
| case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break; |
| I32z: po_imm_or_fail (0, 32, false); break; |
| |
| /* Two kinds of register. */ |
| case OP_RIWR_RIWC: |
| { |
| struct reg_entry *rege = arm_reg_parse_multi (&str); |
| if (!rege |
| || (rege->type != REG_TYPE_MMXWR |
| && rege->type != REG_TYPE_MMXWC |
| && rege->type != REG_TYPE_MMXWCG)) |
| { |
| inst.error = _("iWMMXt data or control register expected"); |
| goto failure; |
| } |
| inst.operands[i].reg = rege->number; |
| inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR); |
| } |
| break; |
| |
| case OP_RIWC_RIWG: |
| { |
| struct reg_entry *rege = arm_reg_parse_multi (&str); |
| if (!rege |
| || (rege->type != REG_TYPE_MMXWC |
| && rege->type != REG_TYPE_MMXWCG)) |
| { |
| inst.error = _("iWMMXt control register expected"); |
| goto failure; |
| } |
| inst.operands[i].reg = rege->number; |
| inst.operands[i].isreg = 1; |
| } |
| break; |
| |
| /* Misc */ |
| case OP_CPSF: val = parse_cps_flags (&str); break; |
| case OP_ENDI: val = parse_endian_specifier (&str); break; |
| case OP_oROR: val = parse_ror (&str); break; |
| try_cond: |
| case OP_COND: val = parse_cond (&str); break; |
| case OP_oBARRIER_I15: |
| po_barrier_or_imm (str); break; |
| immediate: |
| if (parse_immediate (&str, &val, 0, 15, true) == FAIL) |
| goto failure; |
| break; |
| |
| case OP_wPSR: |
| case OP_rPSR: |
| po_reg_or_goto (REG_TYPE_RNB, try_psr); |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt)) |
| { |
| inst.error = _("Banked registers are not available with this " |
| "architecture."); |
| goto failure; |
| } |
| break; |
| try_psr: |
| val = parse_psr (&str, op_parse_code == OP_wPSR); |
| break; |
| |
| case OP_VLDR: |
| po_reg_or_goto (REG_TYPE_VFSD, try_sysreg); |
| break; |
| try_sysreg: |
| val = parse_sys_vldr_vstr (&str); |
| break; |
| |
| case OP_APSR_RR: |
| po_reg_or_goto (REG_TYPE_RN, try_apsr); |
| break; |
| try_apsr: |
| /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS |
| instruction). */ |
| if (strncasecmp (str, "APSR_", 5) == 0) |
| { |
| unsigned found = 0; |
| str += 5; |
| while (found < 15) |
| switch (*str++) |
| { |
| case 'c': found = (found & 1) ? 16 : found | 1; break; |
| case 'n': found = (found & 2) ? 16 : found | 2; break; |
| case 'z': found = (found & 4) ? 16 : found | 4; break; |
| case 'v': found = (found & 8) ? 16 : found | 8; break; |
| default: found = 16; |
| } |
| if (found != 15) |
| goto failure; |
| inst.operands[i].isvec = 1; |
| /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */ |
| inst.operands[i].reg = REG_PC; |
| } |
| else |
| goto failure; |
| break; |
| |
| case OP_TB: |
| po_misc_or_fail (parse_tb (&str)); |
| break; |
| |
| /* Register lists. */ |
| case OP_REGLST: |
| val = parse_reg_list (&str, REGLIST_RN); |
| if (*str == '^') |
| { |
| inst.operands[i].writeback = 1; |
| str++; |
| } |
| break; |
| |
| case OP_CLRMLST: |
| val = parse_reg_list (&str, REGLIST_CLRM); |
| break; |
| |
| case OP_VRSLST: |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S, |
| &partial_match); |
| break; |
| |
| case OP_VRDLST: |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D, |
| &partial_match); |
| break; |
| |
| case OP_VRSDLST: |
| /* Allow Q registers too. */ |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| REGLIST_NEON_D, &partial_match); |
| if (val == FAIL) |
| { |
| inst.error = NULL; |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| REGLIST_VFP_S, &partial_match); |
| inst.operands[i].issingle = 1; |
| } |
| break; |
| |
| case OP_VRSDVLST: |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| REGLIST_VFP_D_VPR, &partial_match); |
| if (val == FAIL && !partial_match) |
| { |
| inst.error = NULL; |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| REGLIST_VFP_S_VPR, &partial_match); |
| inst.operands[i].issingle = 1; |
| } |
| break; |
| |
| case OP_NRDLST: |
| val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| REGLIST_NEON_D, &partial_match); |
| break; |
| |
| case OP_MSTRLST4: |
| case OP_MSTRLST2: |
| val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, |
| 1, &inst.operands[i].vectype); |
| if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe)) |
| goto failure; |
| break; |
| case OP_NSTRLST: |
| val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, |
| 0, &inst.operands[i].vectype); |
| break; |
| |
| /* Addressing modes */ |
| case OP_ADDRMVE: |
| po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE)); |
| break; |
| |
| case OP_ADDR: |
| po_misc_or_fail (parse_address (&str, i)); |
| break; |
| |
| case OP_ADDRGLDR: |
| po_misc_or_fail_no_backtrack ( |
| parse_address_group_reloc (&str, i, GROUP_LDR)); |
| break; |
| |
| case OP_ADDRGLDRS: |
| po_misc_or_fail_no_backtrack ( |
| parse_address_group_reloc (&str, i, GROUP_LDRS)); |
| break; |
| |
| case OP_ADDRGLDC: |
| po_misc_or_fail_no_backtrack ( |
| parse_address_group_reloc (&str, i, GROUP_LDC)); |
| break; |
| |
| case OP_SH: |
| po_misc_or_fail (parse_shifter_operand (&str, i)); |
| break; |
| |
| case OP_SHG: |
| po_misc_or_fail_no_backtrack ( |
| parse_shifter_operand_group_reloc (&str, i)); |
| break; |
| |
| case OP_oSHll: |
| po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE)); |
| break; |
| |
| case OP_oSHar: |
| po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE)); |
| break; |
| |
| case OP_oSHllar: |
| po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE)); |
| break; |
| |
| case OP_RMQRZ: |
| case OP_oRMQRZ: |
| po_reg_or_goto (REG_TYPE_MQ, try_rr_zr); |
| break; |
| |
| case OP_RR_ZR: |
| try_rr_zr: |
| po_reg_or_goto (REG_TYPE_RN, ZR); |
| break; |
| ZR: |
| po_reg_or_fail (REG_TYPE_ZR); |
| break; |
| |
| default: |
| as_fatal (_("unhandled operand code %d"), op_parse_code); |
| } |
| |
| /* Various value-based sanity checks and shared operations. We |
| do not signal immediate failures for the register constraints; |
| this allows a syntax error to take precedence. */ |
| switch (op_parse_code) |
| { |
| case OP_oRRnpc: |
| case OP_RRnpc: |
| case OP_RRnpcb: |
| case OP_RRw: |
| case OP_oRRw: |
| case OP_RRnpc_I0: |
| if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC) |
| inst.error = BAD_PC; |
| break; |
| |
| case OP_oRRnpcsp: |
| case OP_RRnpcsp: |
| case OP_RRnpcsp_I32: |
| if (inst.operands[i].isreg) |
| { |
| if (inst.operands[i].reg == REG_PC) |
| inst.error = BAD_PC; |
| else if (inst.operands[i].reg == REG_SP |
| /* The restriction on Rd/Rt/Rt2 on Thumb mode has been |
| relaxed since ARMv8-A. */ |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| { |
| gas_assert (thumb); |
| inst.error = BAD_SP; |
| } |
| } |
| break; |
| |
| case OP_RRnpctw: |
| if (inst.operands[i].isreg |
| && inst.operands[i].reg == REG_PC |
| && (inst.operands[i].writeback || thumb)) |
| inst.error = BAD_PC; |
| break; |
| |
| case OP_RVSD_COND: |
| case OP_VLDR: |
| if (inst.operands[i].isreg) |
| break; |
| /* fall through. */ |
| |
| case OP_CPSF: |
| case OP_ENDI: |
| case OP_oROR: |
| case OP_wPSR: |
| case OP_rPSR: |
| case OP_COND: |
| case OP_oBARRIER_I15: |
| case OP_REGLST: |
| case OP_CLRMLST: |
| case OP_VRSLST: |
| case OP_VRDLST: |
| case OP_VRSDLST: |
| case OP_VRSDVLST: |
| case OP_NRDLST: |
| case OP_NSTRLST: |
| case OP_MSTRLST2: |
| case OP_MSTRLST4: |
| if (val == FAIL) |
| goto failure; |
| inst.operands[i].imm = val; |
| break; |
| |
| case OP_LR: |
| case OP_oLR: |
| if (inst.operands[i].reg != REG_LR) |
| inst.error = _("operand must be LR register"); |
| break; |
| |
| case OP_SP: |
| if (inst.operands[i].reg != REG_SP) |
| inst.error = _("operand must be SP register"); |
| break; |
| |
| case OP_R12: |
| if (inst.operands[i].reg != REG_R12) |
| inst.error = _("operand must be r12"); |
| break; |
| |
| case OP_RMQRZ: |
| case OP_oRMQRZ: |
| case OP_RR_ZR: |
| if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC) |
| inst.error = BAD_PC; |
| break; |
| |
| case OP_RRe: |
| if (inst.operands[i].isreg |
| && (inst.operands[i].reg & 0x00000001) != 0) |
| inst.error = BAD_ODD; |
| break; |
| |
| case OP_RRo: |
| if (inst.operands[i].isreg) |
| { |
| if ((inst.operands[i].reg & 0x00000001) != 1) |
| inst.error = BAD_EVEN; |
| else if (inst.operands[i].reg == REG_SP) |
| as_tsktsk (MVE_BAD_SP); |
| else if (inst.operands[i].reg == REG_PC) |
| inst.error = BAD_PC; |
| } |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* If we get here, this operand was successfully parsed. */ |
| inst.operands[i].present = 1; |
| continue; |
| |
| bad_args: |
| inst.error = BAD_ARGS; |
| |
| failure: |
| if (!backtrack_pos) |
| { |
| /* The parse routine should already have set inst.error, but set a |
| default here just in case. */ |
| if (!inst.error) |
| inst.error = BAD_SYNTAX; |
| return FAIL; |
| } |
| |
| /* Do not backtrack over a trailing optional argument that |
| absorbed some text. We will only fail again, with the |
| 'garbage following instruction' error message, which is |
| probably less helpful than the current one. */ |
| if (backtrack_index == i && backtrack_pos != str |
| && upat[i+1] == OP_stop) |
| { |
| if (!inst.error) |
| inst.error = BAD_SYNTAX; |
| return FAIL; |
| } |
| |
| /* Try again, skipping the optional argument at backtrack_pos. */ |
| str = backtrack_pos; |
| inst.error = backtrack_error; |
| inst.operands[backtrack_index].present = 0; |
| i = backtrack_index; |
| backtrack_pos = 0; |
| } |
| |
| /* Check that we have parsed all the arguments. */ |
| if (*str != '\0' && !inst.error) |
| inst.error = _("garbage following instruction"); |
| |
| return inst.error ? FAIL : SUCCESS; |
| } |
| |
| #undef po_char_or_fail |
| #undef po_reg_or_fail |
| #undef po_reg_or_goto |
| #undef po_imm_or_fail |
| #undef po_scalar_or_fail |
| #undef po_barrier_or_imm |
| |
| /* Shorthand macro for instruction encoding functions issuing errors. */ |
| #define constraint(expr, err) \ |
| do \ |
| { \ |
| if (expr) \ |
| { \ |
| inst.error = err; \ |
| return; \ |
| } \ |
| } \ |
| while (0) |
| |
| /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2 |
| instructions are unpredictable if these registers are used. This |
| is the BadReg predicate in ARM's Thumb-2 documentation. |
| |
| Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few |
| places, while the restriction on REG_SP was relaxed since ARMv8-A. */ |
| #define reject_bad_reg(reg) \ |
| do \ |
| if (reg == REG_PC) \ |
| { \ |
| inst.error = BAD_PC; \ |
| return; \ |
| } \ |
| else if (reg == REG_SP \ |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \ |
| { \ |
| inst.error = BAD_SP; \ |
| return; \ |
| } \ |
| while (0) |
| |
| /* If REG is R13 (the stack pointer), warn that its use is |
| deprecated. */ |
| #define warn_deprecated_sp(reg) \ |
| do \ |
| if (warn_on_deprecated && reg == REG_SP) \ |
| as_tsktsk (_("use of r13 is deprecated")); \ |
| while (0) |
| |
| /* Functions for operand encoding. ARM, then Thumb. */ |
| |
| #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31)) |
| |
| /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding. |
| |
| The only binary encoding difference is the Coprocessor number. Coprocessor |
| 9 is used for half-precision calculations or conversions. The format of the |
| instruction is the same as the equivalent Coprocessor 10 instruction that |
| exists for Single-Precision operation. */ |
| |
| static void |
| do_scalar_fp16_v82_encode (void) |
| { |
| if (inst.cond < COND_ALWAYS) |
| as_warn (_("scalar fp16 instruction cannot be conditional," |
| " the behaviour is UNPREDICTABLE")); |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16), |
| _(BAD_FP16)); |
| |
| inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900; |
| mark_feature_used (&arm_ext_fp16); |
| } |
| |
| /* If VAL can be encoded in the immediate field of an ARM instruction, |
| return the encoded form. Otherwise, return FAIL. */ |
| |
| static unsigned int |
| encode_arm_immediate (unsigned int val) |
| { |
| unsigned int a, i; |
| |
| if (val <= 0xff) |
| return val; |
| |
| for (i = 2; i < 32; i += 2) |
| if ((a = rotate_left (val, i)) <= 0xff) |
| return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ |
| |
| return FAIL; |
| } |
| |
| /* If VAL can be encoded in the immediate field of a Thumb32 instruction, |
| return the encoded form. Otherwise, return FAIL. */ |
| static unsigned int |
| encode_thumb32_immediate (unsigned int val) |
| { |
| unsigned int a, i; |
| |
| if (val <= 0xff) |
| return val; |
| |
| for (i = 1; i <= 24; i++) |
| { |
| a = val >> i; |
| if ((val & ~(0xffU << i)) == 0) |
| return ((val >> i) & 0x7f) | ((32 - i) << 7); |
| } |
| |
| a = val & 0xff; |
| if (val == ((a << 16) | a)) |
| return 0x100 | a; |
| if (val == ((a << 24) | (a << 16) | (a << 8) | a)) |
| return 0x300 | a; |
| |
| a = val & 0xff00; |
| if (val == ((a << 16) | a)) |
| return 0x200 | (a >> 8); |
| |
| return FAIL; |
| } |
| /* Encode a VFP SP or DP register number into inst.instruction. */ |
| |
| static void |
| encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos) |
| { |
| if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm) |
| && reg > 15) |
| { |
| if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32)) |
| { |
| if (thumb_mode) |
| ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| fpu_vfp_ext_d32); |
| else |
| ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, |
| fpu_vfp_ext_d32); |
| } |
| else |
| { |
| first_error (_("D register out of range for selected VFP version")); |
| return; |
| } |
| } |
| |
| switch (pos) |
| { |
| case VFP_REG_Sd: |
| inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22); |
| break; |
| |
| case VFP_REG_Sn: |
| inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7); |
| break; |
| |
| case VFP_REG_Sm: |
| inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5); |
| break; |
| |
| case VFP_REG_Dd: |
| inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22); |
| break; |
| |
| case VFP_REG_Dn: |
| inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7); |
| break; |
| |
| case VFP_REG_Dm: |
| inst.instruction |= (reg & 15) | ((reg >> 4) << 5); |
| break; |
| |
| default: |
| abort (); |
| } |
| } |
| |
| /* Encode a <shift> in an ARM-format instruction. The immediate, |
| if any, is handled by md_apply_fix. */ |
| static void |
| encode_arm_shift (int i) |
| { |
| /* register-shifted register. */ |
| if (inst.operands[i].immisreg) |
| { |
| int op_index; |
| for (op_index = 0; op_index <= i; ++op_index) |
| { |
| /* Check the operand only when it's presented. In pre-UAL syntax, |
| if the destination register is the same as the first operand, two |
| register form of the instruction can be used. */ |
| if (inst.operands[op_index].present && inst.operands[op_index].isreg |
| && inst.operands[op_index].reg == REG_PC) |
| as_warn (UNPRED_REG ("r15")); |
| } |
| |
| if (inst.operands[i].imm == REG_PC) |
| as_warn (UNPRED_REG ("r15")); |
| } |
| |
| if (inst.operands[i].shift_kind == SHIFT_RRX) |
| inst.instruction |= SHIFT_ROR << 5; |
| else |
| { |
| inst.instruction |= inst.operands[i].shift_kind << 5; |
| if (inst.operands[i].immisreg) |
| { |
| inst.instruction |= SHIFT_BY_REG; |
| inst.instruction |= inst.operands[i].imm << 8; |
| } |
| else |
| inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM; |
| } |
| } |
| |
| static void |
| encode_arm_shifter_operand (int i) |
| { |
| if (inst.operands[i].isreg) |
| { |
| inst.instruction |= inst.operands[i].reg; |
| encode_arm_shift (i); |
| } |
| else |
| { |
| inst.instruction |= INST_IMMEDIATE; |
| if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE) |
| inst.instruction |= inst.operands[i].imm; |
| } |
| } |
| |
| /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */ |
| static void |
| encode_arm_addr_mode_common (int i, bool is_t) |
| { |
| /* PR 14260: |
| Generate an error if the operand is not a register. */ |
| constraint (!inst.operands[i].isreg, |
| _("Instruction does not support =N addresses")); |
| |
| inst.instruction |= inst.operands[i].reg << 16; |
| |
| if (inst.operands[i].preind) |
| { |
| if (is_t) |
| { |
| inst.error = _("instruction does not accept preindexed addressing"); |
| return; |
| } |
| inst.instruction |= PRE_INDEX; |
| if (inst.operands[i].writeback) |
| inst.instruction |= WRITE_BACK; |
| |
| } |
| else if (inst.operands[i].postind) |
| { |
| gas_assert (inst.operands[i].writeback); |
| if (is_t) |
| inst.instruction |= WRITE_BACK; |
| } |
| else /* unindexed - only for coprocessor */ |
| { |
| inst.error = _("instruction does not accept unindexed addressing"); |
| return; |
| } |
| |
| if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX)) |
| && (((inst.instruction & 0x000f0000) >> 16) |
| == ((inst.instruction & 0x0000f000) >> 12))) |
| as_warn ((inst.instruction & LOAD_BIT) |
| ? _("destination register same as write-back base") |
| : _("source register same as write-back base")); |
| } |
| |
| /* inst.operands[i] was set up by parse_address. Encode it into an |
| ARM-format mode 2 load or store instruction. If is_t is true, |
| reject forms that cannot be used with a T instruction (i.e. not |
| post-indexed). */ |
| static void |
| encode_arm_addr_mode_2 (int i, bool is_t) |
| { |
| const bool is_pc = (inst.operands[i].reg == REG_PC); |
| |
| encode_arm_addr_mode_common (i, is_t); |
| |
| if (inst.operands[i].immisreg) |
| { |
| constraint ((inst.operands[i].imm == REG_PC |
| || (is_pc && inst.operands[i].writeback)), |
| BAD_PC_ADDRESSING); |
| inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */ |
| inst.instruction |= inst.operands[i].imm; |
| if (!inst.operands[i].negative) |
| inst.instruction |= INDEX_UP; |
| if (inst.operands[i].shifted) |
| { |
| if (inst.operands[i].shift_kind == SHIFT_RRX) |
| inst.instruction |= SHIFT_ROR << 5; |
| else |
| { |
| inst.instruction |= inst.operands[i].shift_kind << 5; |
| inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM; |
| } |
| } |
| } |
| else /* immediate offset in inst.relocs[0] */ |
| { |
| if (is_pc && !inst.relocs[0].pc_rel) |
| { |
| const bool is_load = ((inst.instruction & LOAD_BIT) != 0); |
| |
| /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt |
| cannot use PC in addressing. |
| PC cannot be used in writeback addressing, either. */ |
| constraint ((is_t || inst.operands[i].writeback), |
| BAD_PC_ADDRESSING); |
| |
| /* Use of PC in str is deprecated for ARMv7. */ |
| if (warn_on_deprecated |
| && !is_load |
| && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7)) |
| as_tsktsk (_("use of PC in this instruction is deprecated")); |
| } |
| |
| if (inst.relocs[0].type == BFD_RELOC_UNUSED) |
| { |
| /* Prefer + for zero encoded value. */ |
| if (!inst.operands[i].negative) |
| inst.instruction |= INDEX_UP; |
| inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM; |
| } |
| } |
| } |
| |
| /* inst.operands[i] was set up by parse_address. Encode it into an |
| ARM-format mode 3 load or store instruction. Reject forms that |
| cannot be used with such instructions. If is_t is true, reject |
| forms that cannot be used with a T instruction (i.e. not |
| post-indexed). */ |
| static void |
| encode_arm_addr_mode_3 (int i, bool is_t) |
| { |
| if (inst.operands[i].immisreg && inst.operands[i].shifted) |
| { |
| inst.error = _("instruction does not accept scaled register index"); |
| return; |
| } |
| |
| encode_arm_addr_mode_common (i, is_t); |
| |
| if (inst.operands[i].immisreg) |
| { |
| constraint ((inst.operands[i].imm == REG_PC |
| || (is_t && inst.operands[i].reg == REG_PC)), |
| BAD_PC_ADDRESSING); |
| constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback, |
| BAD_PC_WRITEBACK); |
| inst.instruction |= inst.operands[i].imm; |
| if (!inst.operands[i].negative) |
| inst.instruction |= INDEX_UP; |
| } |
| else /* immediate offset in inst.relocs[0] */ |
| { |
| constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel |
| && inst.operands[i].writeback), |
| BAD_PC_WRITEBACK); |
| inst.instruction |= HWOFFSET_IMM; |
| if (inst.relocs[0].type == BFD_RELOC_UNUSED) |
| { |
| /* Prefer + for zero encoded value. */ |
| if (!inst.operands[i].negative) |
| inst.instruction |= INDEX_UP; |
| |
| inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8; |
| } |
| } |
| } |
| |
| /* Write immediate bits [7:0] to the following locations: |
| |
| |28/24|23 19|18 16|15 4|3 0| |
| | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| |
| |
| This function is used by VMOV/VMVN/VORR/VBIC. */ |
| |
| static void |
| neon_write_immbits (unsigned immbits) |
| { |
| inst.instruction |= immbits & 0xf; |
| inst.instruction |= ((immbits >> 4) & 0x7) << 16; |
| inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24); |
| } |
| |
| /* Invert low-order SIZE bits of XHI:XLO. */ |
| |
| static void |
| neon_invert_size (unsigned *xlo, unsigned *xhi, int size) |
| { |
| unsigned immlo = xlo ? *xlo : 0; |
| unsigned immhi = xhi ? *xhi : 0; |
| |
| switch (size) |
| { |
| case 8: |
| immlo = (~immlo) & 0xff; |
| break; |
| |
| case 16: |
| immlo = (~immlo) & 0xffff; |
| break; |
| |
| case 64: |
| immhi = (~immhi) & 0xffffffff; |
| /* fall through. */ |
| |
| case 32: |
| immlo = (~immlo) & 0xffffffff; |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| if (xlo) |
| *xlo = immlo; |
| |
| if (xhi) |
| *xhi = immhi; |
| } |
| |
| /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits |
| A, B, C, D. */ |
| |
| static int |
| neon_bits_same_in_bytes (unsigned imm) |
| { |
| return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) |
| && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) |
| && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) |
| && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); |
| } |
| |
| /* For immediate of above form, return 0bABCD. */ |
| |
| static unsigned |
| neon_squash_bits (unsigned imm) |
| { |
| return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) |
| | ((imm & 0x01000000) >> 21); |
| } |
| |
| /* Compress quarter-float representation to 0b...000 abcdefgh. */ |
| |
| static unsigned |
| neon_qfloat_bits (unsigned imm) |
| { |
| return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); |
| } |
| |
| /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into |
| the instruction. *OP is passed as the initial value of the op field, and |
| may be set to a different value depending on the constant (i.e. |
| "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not |
| MVN). If the immediate looks like a repeated pattern then also |
| try smaller element sizes. */ |
| |
| static int |
| neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, |
| unsigned *immbits, int *op, int size, |
| enum neon_el_type type) |
| { |
| /* Only permit float immediates (including 0.0/-0.0) if the operand type is |
| float. */ |
| if (type == NT_float && !float_p) |
| return FAIL; |
| |
| if (type == NT_float && is_quarter_float (immlo) && immhi == 0) |
| { |
| if (size != 32 || *op == 1) |
| return FAIL; |
| *immbits = neon_qfloat_bits (immlo); |
| return 0xf; |
| } |
| |
| if (size == 64) |
| { |
| if (neon_bits_same_in_bytes (immhi) |
| && neon_bits_same_in_bytes (immlo)) |
| { |
| if (*op == 1) |
| return FAIL; |
| *immbits = (neon_squash_bits (immhi) << 4) |
| | neon_squash_bits (immlo); |
| *op = 1; |
| return 0xe; |
| } |
| |
| if (immhi != immlo) |
| return FAIL; |
| } |
| |
| if (size >= 32) |
| { |
| if (immlo == (immlo & 0x000000ff)) |
| { |
| *immbits = immlo; |
| return 0x0; |
| } |
| else if (immlo == (immlo & 0x0000ff00)) |
| { |
| *immbits = immlo >> 8; |
| return 0x2; |
| } |
| else if (immlo == (immlo & 0x00ff0000)) |
| { |
| *immbits = immlo >> 16; |
| return 0x4; |
| } |
| else if (immlo == (immlo & 0xff000000)) |
| { |
| *immbits = immlo >> 24; |
| return 0x6; |
| } |
| else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) |
| { |
| *immbits = (immlo >> 8) & 0xff; |
| return 0xc; |
| } |
| else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) |
| { |
| *immbits = (immlo >> 16) & 0xff; |
| return 0xd; |
| } |
| |
| if ((immlo & 0xffff) != (immlo >> 16)) |
| return FAIL; |
| immlo &= 0xffff; |
| } |
| |
| if (size >= 16) |
| { |
| if (immlo == (immlo & 0x000000ff)) |
| { |
| *immbits = immlo; |
| return 0x8; |
| } |
| else if (immlo == (immlo & 0x0000ff00)) |
| { |
| *immbits = immlo >> 8; |
| return 0xa; |
| } |
| |
| if ((immlo & 0xff) != (immlo >> 8)) |
| return FAIL; |
| immlo &= 0xff; |
| } |
| |
| if (immlo == (immlo & 0x000000ff)) |
| { |
| /* Don't allow MVN with 8-bit immediate. */ |
| if (*op == 1) |
| return FAIL; |
| *immbits = immlo; |
| return 0xe; |
| } |
| |
| return FAIL; |
| } |
| |
| /* Returns TRUE if double precision value V may be cast |
| to single precision without loss of accuracy. */ |
| |
| static bool |
| is_double_a_single (uint64_t v) |
| { |
| int exp = (v >> 52) & 0x7FF; |
| uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL; |
| |
| return ((exp == 0 || exp == 0x7FF |
| || (exp >= 1023 - 126 && exp <= 1023 + 127)) |
| && (mantissa & 0x1FFFFFFFL) == 0); |
| } |
| |
| /* Returns a double precision value casted to single precision |
| (ignoring the least significant bits in exponent and mantissa). */ |
| |
| static int |
| double_to_single (uint64_t v) |
| { |
| unsigned int sign = (v >> 63) & 1; |
| int exp = (v >> 52) & 0x7FF; |
| uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL; |
| |
| if (exp == 0x7FF) |
| exp = 0xFF; |
| else |
| { |
| exp = exp - 1023 + 127; |
| if (exp >= 0xFF) |
| { |
| /* Infinity. */ |
| exp = 0x7F; |
| mantissa = 0; |
| } |
| else if (exp < 0) |
| { |
| /* No denormalized numbers. */ |
| exp = 0; |
| mantissa = 0; |
| } |
| } |
| mantissa >>= 29; |
| return (sign << 31) | (exp << 23) | mantissa; |
| } |
| |
| enum lit_type |
| { |
| CONST_THUMB, |
| CONST_ARM, |
| CONST_VEC |
| }; |
| |
| static void do_vfp_nsyn_opcode (const char *); |
| |
| /* inst.relocs[0].exp describes an "=expr" load pseudo-operation. |
| Determine whether it can be performed with a move instruction; if |
| it can, convert inst.instruction to that move instruction and |
| return true; if it can't, convert inst.instruction to a literal-pool |
| load and return FALSE. If this is not a valid thing to do in the |
| current context, set inst.error and return TRUE. |
| |
| inst.operands[i] describes the destination register. */ |
| |
| static bool |
| move_or_literal_pool (int i, enum lit_type t, bool mode_3) |
| { |
| unsigned long tbit; |
| bool thumb_p = (t == CONST_THUMB); |
| bool arm_p = (t == CONST_ARM); |
| |
| if (thumb_p) |
| tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; |
| else |
| tbit = LOAD_BIT; |
| |
| if ((inst.instruction & tbit) == 0) |
| { |
| inst.error = _("invalid pseudo operation"); |
| return true; |
| } |
| |
| if (inst.relocs[0].exp.X_op != O_constant |
| && inst.relocs[0].exp.X_op != O_symbol |
| && inst.relocs[0].exp.X_op != O_big) |
| { |
| inst.error = _("constant expression expected"); |
| return true; |
| } |
| |
| if (inst.relocs[0].exp.X_op == O_constant |
| || inst.relocs[0].exp.X_op == O_big) |
| { |
| uint64_t v; |
| if (inst.relocs[0].exp.X_op == O_big) |
| { |
| LITTLENUM_TYPE *l; |
| |
| if (inst.relocs[0].exp.X_add_number <= 0) /* FP value. */ |
| { |
| /* FIXME: The code that was here previously could not |
| work. Firstly, it tried to convert a floating point |
| number into an extended precision format, but only |
| provided a buffer of 5 littlenums, which was too |
| small. Secondly, it then didn't deal with the value |
| converted correctly, just reading out the first 4 |
| littlenum fields and assuming that could be used |
| directly. |
| |
| I think the code was intended to handle expressions |
| such as: |
| |
| LDR r0, =1.0 |
| VLDR d0, =55.3 |
| |
| but the parsers currently don't permit floating-point |
| literal values to be written this way, so this code |
| is probably unreachable. To be safe, we simply |
| return an error here. */ |
| |
| inst.error = _("constant expression not supported"); |
| return true; |
| } |
| else |
| l = generic_bignum; |
| |
| v = l[3] & LITTLENUM_MASK; |
| v <<= LITTLENUM_NUMBER_OF_BITS; |
| v |= l[2] & LITTLENUM_MASK; |
| v <<= LITTLENUM_NUMBER_OF_BITS; |
| v |= l[1] & LITTLENUM_MASK; |
| v <<= LITTLENUM_NUMBER_OF_BITS; |
| v |= l[0] & LITTLENUM_MASK; |
| } |
| else |
| v = inst.relocs[0].exp.X_add_number; |
| |
| if (!inst.operands[i].issingle) |
| { |
| if (thumb_p) |
| { |
| /* LDR should not use lead in a flag-setting instruction being |
| chosen so we do not check whether movs can be used. */ |
| |
| if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2) |
| || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)) |
| && inst.operands[i].reg != 13 |
| && inst.operands[i].reg != 15) |
| { |
| /* Check if on thumb2 it can be done with a mov.w, mvn or |
| movw instruction. */ |
| unsigned int newimm; |
| bool isNegated = false; |
| |
| newimm = encode_thumb32_immediate (v); |
| if (newimm == (unsigned int) FAIL) |
| { |
| newimm = encode_thumb32_immediate (~v); |
| isNegated = true; |
| } |
| |
| /* The number can be loaded with a mov.w or mvn |
| instruction. */ |
| if (newimm != (unsigned int) FAIL |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)) |
| { |
| inst.instruction = (0xf04f0000 /* MOV.W. */ |
| | (inst.operands[i].reg << 8)); |
| /* Change to MOVN. */ |
| inst.instruction |= (isNegated ? 0x200000 : 0); |
| inst.instruction |= (newimm & 0x800) << 15; |
| inst.instruction |= (newimm & 0x700) << 4; |
| inst.instruction |= (newimm & 0x0ff); |
| return true; |
| } |
| /* The number can be loaded with a movw instruction. */ |
| else if ((v & ~0xFFFF) == 0 |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)) |
| { |
| int imm = v & 0xFFFF; |
| |
| inst.instruction = 0xf2400000; /* MOVW. */ |
| inst.instruction |= (inst.operands[i].reg << 8); |
| inst.instruction |= (imm & 0xf000) << 4; |
| inst.instruction |= (imm & 0x0800) << 15; |
| inst.instruction |= (imm & 0x0700) << 4; |
| inst.instruction |= (imm & 0x00ff); |
| /* In case this replacement is being done on Armv8-M |
| Baseline we need to make sure to disable the |
| instruction size check, as otherwise GAS will reject |
| the use of this T32 instruction. */ |
| inst.size_req = 0; |
| return true; |
| } |
| } |
| } |
| else if (arm_p) |
| { |
| int value = encode_arm_immediate (v); |
| |
| if (value != FAIL) |
| { |
| /* This can be done with a mov instruction. */ |
| inst.instruction &= LITERAL_MASK; |
| inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); |
| inst.instruction |= value & 0xfff; |
| return true; |
| } |
| |
| value = encode_arm_immediate (~ v); |
| if (value != FAIL) |
| { |
| /* This can be done with a mvn instruction. */ |
| inst.instruction &= LITERAL_MASK; |
| inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); |
| inst.instruction |= value & 0xfff; |
| return true; |
| } |
| } |
| else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) |
| { |
| int op = 0; |
| unsigned immbits = 0; |
| unsigned immlo = inst.operands[1].imm; |
| unsigned immhi = inst.operands[1].regisimm |
| ? inst.operands[1].reg |
| : inst.relocs[0].exp.X_unsigned |
| ? 0 |
| : (int64_t) (int) immlo >> 32; |
| int cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits, |
| &op, 64, NT_invtype); |
| |
| if (cmode == FAIL) |
| { |
| neon_invert_size (&immlo, &immhi, 64); |
| op = !op; |
| cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits, |
| &op, 64, NT_invtype); |
| } |
| |
| if (cmode != FAIL) |
| { |
| inst.instruction = (inst.instruction & VLDR_VMOV_SAME) |
| | (1 << 23) |
| | (cmode << 8) |
| | (op << 5) |
| | (1 << 4); |
| |
| /* Fill other bits in vmov encoding for both thumb and arm. */ |
| if (thumb_mode) |
| inst.instruction |= (0x7U << 29) | (0xF << 24); |
| else |
| inst.instruction |= (0xFU << 28) | (0x1 << 25); |
| neon_write_immbits (immbits); |
| return true; |
| } |
| } |
| } |
| |
| if (t == CONST_VEC) |
| { |
| /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */ |
| if (inst.operands[i].issingle |
| && is_quarter_float (inst.operands[1].imm) |
| && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd)) |
| { |
| inst.operands[1].imm = |
| neon_qfloat_bits (v); |
| do_vfp_nsyn_opcode ("fconsts"); |
| return true; |
| } |
| |
| /* If our host does not support a 64-bit type then we cannot perform |
| the following optimization. This mean that there will be a |
| discrepancy between the output produced by an assembler built for |
| a 32-bit-only host and the output produced from a 64-bit host, but |
| this cannot be helped. */ |
| else if (!inst.operands[1].issingle |
| && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) |
| { |
| if (is_double_a_single (v) |
| && is_quarter_float (double_to_single (v))) |
| { |
| inst.operands[1].imm = |
| neon_qfloat_bits (double_to_single (v)); |
| do_vfp_nsyn_opcode ("fconstd"); |
| return true; |
| } |
| } |
| } |
| } |
| |
| if (add_to_lit_pool ((!inst.operands[i].isvec |
| || inst.operands[i].issingle) ? 4 : 8) == FAIL) |
| return true; |
| |
| inst.operands[1].reg = REG_PC; |
| inst.operands[1].isreg = 1; |
| inst.operands[1].preind = 1; |
| inst.relocs[0].pc_rel = 1; |
| inst.relocs[0].type = (thumb_p |
| ? BFD_RELOC_ARM_THUMB_OFFSET |
| : (mode_3 |
| ? BFD_RELOC_ARM_HWLITERAL |
| : BFD_RELOC_ARM_LITERAL)); |
| return false; |
| } |
| |
| /* inst.operands[i] was set up by parse_address. Encode it into an |
| ARM-format instruction. Reject all forms which cannot be encoded |
| into a coprocessor load/store instruction. If wb_ok is false, |
| reject use of writeback; if unind_ok is false, reject use of |
| unindexed addressing. If reloc_override is not 0, use it instead |
| of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one |
| (in which case it is preserved). */ |
| |
| static int |
| encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) |
| { |
| if (!inst.operands[i].isreg) |
| { |
| /* PR 18256 */ |
| if (! inst.operands[0].isvec) |
| { |
| inst.error = _("invalid co-processor operand"); |
| return FAIL; |
| } |
| if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/false)) |
| return SUCCESS; |
| } |
| |
| inst.instruction |= inst.operands[i].reg << 16; |
| |
| gas_assert (!(inst.operands[i].preind && inst.operands[i].postind)); |
| |
| if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ |
| { |
| gas_assert (!inst.operands[i].writeback); |
| if (!unind_ok) |
| { |
| inst.error = _("instruction does not support unindexed addressing"); |
| return FAIL; |
| } |
| inst.instruction |= inst.operands[i].imm; |
| inst.instruction |= INDEX_UP; |
| return SUCCESS; |
| } |
| |
| if (inst.operands[i].preind) |
| inst.instruction |= PRE_INDEX; |
| |
| if (inst.operands[i].writeback) |
| { |
| if (inst.operands[i].reg == REG_PC) |
| { |
| inst.error = _("pc may not be used with write-back"); |
| return FAIL; |
| } |
| if (!wb_ok) |
| { |
| inst.error = _("instruction does not support writeback"); |
| return FAIL; |
| } |
| inst.instruction |= WRITE_BACK; |
| } |
| |
| if (reloc_override) |
| inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override; |
| else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC |
| || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2) |
| && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0) |
| { |
| if (thumb_mode) |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM; |
| else |
| inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM; |
| } |
| |
| /* Prefer + for zero encoded value. */ |
| if (!inst.operands[i].negative) |
| inst.instruction |= INDEX_UP; |
| |
| return SUCCESS; |
| } |
| |
| /* Functions for instruction encoding, sorted by sub-architecture. |
| First some generics; their names are taken from the conventional |
| bit positions for register arguments in ARM format instructions. */ |
| |
| static void |
| do_noargs (void) |
| { |
| } |
| |
| static void |
| do_rd (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| } |
| |
| static void |
| do_rn (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| } |
| |
| static void |
| do_rd_rm (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| } |
| |
| static void |
| do_rm_rn (void) |
| { |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 16; |
| } |
| |
| static void |
| do_rd_rn (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| } |
| |
| static void |
| do_rn_rd (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg << 12; |
| } |
| |
| static void |
| do_tt (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg << 16; |
| } |
| |
| static bool |
| check_obsolete (const arm_feature_set *feature, const char *msg) |
| { |
| if (ARM_CPU_IS_ANY (cpu_variant)) |
| { |
| as_tsktsk ("%s", msg); |
| return true; |
| } |
| else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature)) |
| { |
| as_bad ("%s", msg); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static void |
| do_rd_rm_rn (void) |
| { |
| unsigned Rn = inst.operands[2].reg; |
| /* Enforce restrictions on SWP instruction. */ |
| if ((inst.instruction & 0x0fbfffff) == 0x01000090) |
| { |
| constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg, |
| _("Rn must not overlap other operands")); |
| |
| /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7. |
| */ |
| if (!check_obsolete (&arm_ext_v8, |
| _("swp{b} use is obsoleted for ARMv8 and later")) |
| && warn_on_deprecated |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6)) |
| as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7")); |
| } |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= Rn << 16; |
| } |
| |
| static void |
| do_rd_rn_rm (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| } |
| |
| static void |
| do_rm_rd_rn (void) |
| { |
| constraint ((inst.operands[2].reg == REG_PC), BAD_PC); |
| constraint (((inst.relocs[0].exp.X_op != O_constant |
| && inst.relocs[0].exp.X_op != O_illegal) |
| || inst.relocs[0].exp.X_add_number != 0), |
| BAD_ADDR_MODE); |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| |
| static void |
| do_imm0 (void) |
| { |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| /* ARM instructions, in alphabetical order by function name (except |
| that wrapper functions appear immediately after the function they |
| wrap). */ |
| |
| /* This is a pseudo-op of the form "adr rd, label" to be converted |
| into a relative address of the form "add rd, pc, #label-.-8". */ |
| |
| static void |
| do_adr (void) |
| { |
| inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ |
| |
| /* Frag hacking will turn this into a sub instruction if the offset turns |
| out to be negative. */ |
| inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE; |
| inst.relocs[0].pc_rel = 1; |
| inst.relocs[0].exp.X_add_number -= 8; |
| |
| if (support_interwork |
| && inst.relocs[0].exp.X_op == O_symbol |
| && inst.relocs[0].exp.X_add_symbol != NULL |
| && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol) |
| && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol)) |
| inst.relocs[0].exp.X_add_number |= 1; |
| } |
| |
| /* This is a pseudo-op of the form "adrl rd, label" to be converted |
| into a relative address of the form: |
| add rd, pc, #low(label-.-8)" |
| add rd, rd, #high(label-.-8)" */ |
| |
| static void |
| do_adrl (void) |
| { |
| inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ |
| |
| /* Frag hacking will turn this into a sub instruction if the offset turns |
| out to be negative. */ |
| inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE; |
| inst.relocs[0].pc_rel = 1; |
| inst.size = INSN_SIZE * 2; |
| inst.relocs[0].exp.X_add_number -= 8; |
| |
| if (support_interwork |
| && inst.relocs[0].exp.X_op == O_symbol |
| && inst.relocs[0].exp.X_add_symbol != NULL |
| && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol) |
| && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol)) |
| inst.relocs[0].exp.X_add_number |= 1; |
| } |
| |
| static void |
| do_arit (void) |
| { |
| constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC |
| && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , |
| THUMB1_RELOC_ONLY); |
| if (!inst.operands[1].present) |
| inst.operands[1].reg = inst.operands[0].reg; |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| encode_arm_shifter_operand (2); |
| } |
| |
| static void |
| do_barrier (void) |
| { |
| if (inst.operands[0].present) |
| inst.instruction |= inst.operands[0].imm; |
| else |
| inst.instruction |= 0xf; |
| } |
| |
| static void |
| do_bfc (void) |
| { |
| unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; |
| constraint (msb > 32, _("bit-field extends past end of register")); |
| /* The instruction encoding stores the LSB and MSB, |
| not the LSB and width. */ |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].imm << 7; |
| inst.instruction |= (msb - 1) << 16; |
| } |
| |
| static void |
| do_bfi (void) |
| { |
| unsigned int msb; |
| |
| /* #0 in second position is alternative syntax for bfc, which is |
| the same instruction but with REG_PC in the Rm field. */ |
| if (!inst.operands[1].isreg) |
| inst.operands[1].reg = REG_PC; |
| |
| msb = inst.operands[2].imm + inst.operands[3].imm; |
| constraint (msb > 32, _("bit-field extends past end of register")); |
| /* The instruction encoding stores the LSB and MSB, |
| not the LSB and width. */ |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].imm << 7; |
| inst.instruction |= (msb - 1) << 16; |
| } |
| |
| static void |
| do_bfx (void) |
| { |
| constraint (inst.operands[2].imm + inst.operands[3].imm > 32, |
| _("bit-field extends past end of register")); |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].imm << 7; |
| inst.instruction |= (inst.operands[3].imm - 1) << 16; |
| } |
| |
| /* ARM V5 breakpoint instruction (argument parse) |
| BKPT <16 bit unsigned immediate> |
| Instruction is not conditional. |
| The bit pattern given in insns[] has the COND_ALWAYS condition, |
| and it is an error if the caller tried to override that. */ |
| |
| static void |
| do_bkpt (void) |
| { |
| /* Top 12 of 16 bits to bits 19:8. */ |
| inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4; |
| |
| /* Bottom 4 of 16 bits to bits 3:0. */ |
| inst.instruction |= inst.operands[0].imm & 0xf; |
| } |
| |
| static void |
| encode_branch (int default_reloc) |
| { |
| if (inst.operands[0].hasreloc) |
| { |
| constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32 |
| && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL, |
| _("the only valid suffixes here are '(plt)' and '(tlscall)'")); |
| inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32 |
| ? BFD_RELOC_ARM_PLT32 |
| : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL; |
| } |
| else |
| inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc; |
| inst.relocs[0].pc_rel = 1; |
| } |
| |
| static void |
| do_branch (void) |
| { |
| #ifdef OBJ_ELF |
| if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| encode_branch (BFD_RELOC_ARM_PCREL_JUMP); |
| else |
| #endif |
| encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); |
| } |
| |
| static void |
| do_bl (void) |
| { |
| #ifdef OBJ_ELF |
| if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| { |
| if (inst.cond == COND_ALWAYS) |
| encode_branch (BFD_RELOC_ARM_PCREL_CALL); |
| else |
| encode_branch (BFD_RELOC_ARM_PCREL_JUMP); |
| } |
| else |
| #endif |
| encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); |
| } |
| |
| /* ARM V5 branch-link-exchange instruction (argument parse) |
| BLX <target_addr> ie BLX(1) |
| BLX{<condition>} <Rm> ie BLX(2) |
| Unfortunately, there are two different opcodes for this mnemonic. |
| So, the insns[].value is not used, and the code here zaps values |
| into inst.instruction. |
| Also, the <target_addr> can be 25 bits, hence has its own reloc. */ |
| |
| static void |
| do_blx (void) |
| { |
| if (inst.operands[0].isreg) |
| { |
| /* Arg is a register; the opcode provided by insns[] is correct. |
| It is not illegal to do "blx pc", just useless. */ |
| if (inst.operands[0].reg == REG_PC) |
| as_tsktsk (_("use of r15 in blx in ARM mode is not really useful")); |
| |
| inst.instruction |= inst.operands[0].reg; |
| } |
| else |
| { |
| /* Arg is an address; this instruction cannot be executed |
| conditionally, and the opcode must be adjusted. |
| We retain the BFD_RELOC_ARM_PCREL_BLX till the very end |
| where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */ |
| constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| inst.instruction = 0xfa000000; |
| encode_branch (BFD_RELOC_ARM_PCREL_BLX); |
| } |
| } |
| |
| static void |
| do_bx (void) |
| { |
| bool want_reloc; |
| |
| if (inst.operands[0].reg == REG_PC) |
| as_tsktsk (_("use of r15 in bx in ARM mode is not really useful")); |
| |
| inst.instruction |= inst.operands[0].reg; |
| /* Output R_ARM_V4BX relocations if is an EABI object that looks like |
| it is for ARMv4t or earlier. */ |
| want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5); |
| if (!ARM_FEATURE_ZERO (selected_object_arch) |
| && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5)) |
| want_reloc = true; |
| |
| #ifdef OBJ_ELF |
| if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) |
| #endif |
| want_reloc = false; |
| |
| if (want_reloc) |
| inst.relocs[0].type = BFD_RELOC_ARM_V4BX; |
| } |
| |
| |
| /* ARM v5TEJ. Jump to Jazelle code. */ |
| |
| static void |
| do_bxj (void) |
| { |
| if (inst.operands[0].reg == REG_PC) |
| as_tsktsk (_("use of r15 in bxj is not really useful")); |
| |
| inst.instruction |= inst.operands[0].reg; |
| } |
| |
| /* Co-processor data operation: |
| CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} |
| CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */ |
| static void |
| do_cdp (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].imm << 20; |
| inst.instruction |= inst.operands[2].reg << 12; |
| inst.instruction |= inst.operands[3].reg << 16; |
| inst.instruction |= inst.operands[4].reg; |
| inst.instruction |= inst.operands[5].imm << 5; |
| } |
| |
| static void |
| do_cmp (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| encode_arm_shifter_operand (1); |
| } |
| |
| /* Transfer between coprocessor and ARM registers. |
| MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>} |
| MRC2 |
| MCR{cond} |
| MCR2 |
| |
| No special properties. */ |
| |
| struct deprecated_coproc_regs_s |
| { |
| unsigned cp; |
| int opc1; |
| unsigned crn; |
| unsigned crm; |
| int opc2; |
| arm_feature_set deprecated; |
| arm_feature_set obsoleted; |
| const char *dep_msg; |
| const char *obs_msg; |
| }; |
| |
| #define DEPR_ACCESS_V8 \ |
| N_("This coprocessor register access is deprecated in ARMv8") |
| |
| /* Table of all deprecated coprocessor registers. */ |
| static struct deprecated_coproc_regs_s deprecated_coproc_regs[] = |
| { |
| {15, 0, 7, 10, 5, /* CP15DMB. */ |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, |
| DEPR_ACCESS_V8, NULL}, |
| {15, 0, 7, 10, 4, /* CP15DSB. */ |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, |
| DEPR_ACCESS_V8, NULL}, |
| {15, 0, 7, 5, 4, /* CP15ISB. */ |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, |
| DEPR_ACCESS_V8, NULL}, |
| {14, 6, 1, 0, 0, /* TEEHBR. */ |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, |
| DEPR_ACCESS_V8, NULL}, |
| {14, 6, 0, 0, 0, /* TEECR. */ |
| ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE, |
| DEPR_ACCESS_V8, NULL}, |
| }; |
| |
| #undef DEPR_ACCESS_V8 |
| |
| static const size_t deprecated_coproc_reg_count = |
| sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]); |
| |
| static void |
| do_co_reg (void) |
| { |
| unsigned Rd; |
| size_t i; |
| |
| Rd = inst.operands[2].reg; |
| if (thumb_mode) |
| { |
| if (inst.instruction == 0xee000010 |
| || inst.instruction == 0xfe000010) |
| /* MCR, MCR2 */ |
| reject_bad_reg (Rd); |
| else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| /* MRC, MRC2 */ |
| constraint (Rd == REG_SP, BAD_SP); |
| } |
| else |
| { |
| /* MCR */ |
| if (inst.instruction == 0xe000010) |
| constraint (Rd == REG_PC, BAD_PC); |
| } |
| |
| for (i = 0; i < deprecated_coproc_reg_count; ++i) |
| { |
| const struct deprecated_coproc_regs_s *r = |
| deprecated_coproc_regs + i; |
| |
| if (inst.operands[0].reg == r->cp |
| && inst.operands[1].imm == r->opc1 |
| && inst.operands[3].reg == r->crn |
| && inst.operands[4].reg == r->crm |
| && inst.operands[5].imm == r->opc2) |
| { |
| if (! ARM_CPU_IS_ANY (cpu_variant) |
| && warn_on_deprecated |
| && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated)) |
| as_tsktsk ("%s", r->dep_msg); |
| } |
| } |
| |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].imm << 21; |
| inst.instruction |= Rd << 12; |
| inst.instruction |= inst.operands[3].reg << 16; |
| inst.instruction |= inst.operands[4].reg; |
| inst.instruction |= inst.operands[5].imm << 5; |
| } |
| |
| /* Transfer between coprocessor register and pair of ARM registers. |
| MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>. |
| MCRR2 |
| MRRC{cond} |
| MRRC2 |
| |
| Two XScale instructions are special cases of these: |
| |
| MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0 |
| MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0 |
| |
| Result unpredictable if Rd or Rn is R15. */ |
| |
| static void |
| do_co_reg2c (void) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[2].reg; |
| Rn = inst.operands[3].reg; |
| |
| if (thumb_mode) |
| { |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| } |
| else |
| { |
| constraint (Rd == REG_PC, BAD_PC); |
| constraint (Rn == REG_PC, BAD_PC); |
| } |
| |
| /* Only check the MRRC{2} variants. */ |
| if ((inst.instruction & 0x0FF00000) == 0x0C500000) |
| { |
| /* If Rd == Rn, error that the operation is |
| unpredictable (example MRRC p3,#1,r1,r1,c4). */ |
| constraint (Rd == Rn, BAD_OVERLAP); |
| } |
| |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].imm << 4; |
| inst.instruction |= Rd << 12; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= inst.operands[4].reg; |
| } |
| |
| static void |
| do_cpsi (void) |
| { |
| inst.instruction |= inst.operands[0].imm << 6; |
| if (inst.operands[1].present) |
| { |
| inst.instruction |= CPSI_MMOD; |
| inst.instruction |= inst.operands[1].imm; |
| } |
| } |
| |
| static void |
| do_dbg (void) |
| { |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| static void |
| do_div (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rn = (inst.operands[1].present |
| ? inst.operands[1].reg : Rd); |
| Rm = inst.operands[2].reg; |
| |
| constraint ((Rd == REG_PC), BAD_PC); |
| constraint ((Rn == REG_PC), BAD_PC); |
| constraint ((Rm == REG_PC), BAD_PC); |
| |
| inst.instruction |= Rd << 16; |
| inst.instruction |= Rn << 0; |
| inst.instruction |= Rm << 8; |
| } |
| |
| static void |
| do_it (void) |
| { |
| /* There is no IT instruction in ARM mode. We |
| process it to do the validation as if in |
| thumb mode, just in case the code gets |
| assembled for thumb using the unified syntax. */ |
| |
| inst.size = 0; |
| if (unified_syntax) |
| { |
| set_pred_insn_type (IT_INSN); |
| now_pred.mask = (inst.instruction & 0xf) | 0x10; |
| now_pred.cc = inst.operands[0].imm; |
| } |
| } |
| |
| /* If there is only one register in the register list, |
| then return its register number. Otherwise return -1. */ |
| static int |
| only_one_reg_in_list (int range) |
| { |
| int i = ffs (range) - 1; |
| return (i > 15 || range != (1 << i)) ? -1 : i; |
| } |
| |
| static void |
| encode_ldmstm(int from_push_pop_mnem) |
| { |
| int base_reg = inst.operands[0].reg; |
| int range = inst.operands[1].imm; |
| int one_reg; |
| |
| inst.instruction |= base_reg << 16; |
| inst.instruction |= range; |
| |
| if (inst.operands[1].writeback) |
| inst.instruction |= LDM_TYPE_2_OR_3; |
| |
| if (inst.operands[0].writeback) |
| { |
| inst.instruction |= WRITE_BACK; |
| /* Check for unpredictable uses of writeback. */ |
| if (inst.instruction & LOAD_BIT) |
| { |
| /* Not allowed in LDM type 2. */ |
| if ((inst.instruction & LDM_TYPE_2_OR_3) |
| && ((range & (1 << REG_PC)) == 0)) |
| as_warn (_("writeback of base register is UNPREDICTABLE")); |
| /* Only allowed if base reg not in list for other types. */ |
| else if (range & (1 << base_reg)) |
| as_warn (_("writeback of base register when in register list is UNPREDICTABLE")); |
| } |
| else /* STM. */ |
| { |
| /* Not allowed for type 2. */ |
| if (inst.instruction & LDM_TYPE_2_OR_3) |
| as_warn (_("writeback of base register is UNPREDICTABLE")); |
| /* Only allowed if base reg not in list, or first in list. */ |
| else if ((range & (1 << base_reg)) |
| && (range & ((1 << base_reg) - 1))) |
| as_warn (_("if writeback register is in list, it must be the lowest reg in the list")); |
| } |
| } |
| |
| /* If PUSH/POP has only one register, then use the A2 encoding. */ |
| one_reg = only_one_reg_in_list (range); |
| if (from_push_pop_mnem && one_reg >= 0) |
| { |
| int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH; |
| |
| if (is_push && one_reg == 13 /* SP */) |
| /* PR 22483: The A2 encoding cannot be used when |
| pushing the stack pointer as this is UNPREDICTABLE. */ |
| return; |
| |
| inst.instruction &= A_COND_MASK; |
| inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP; |
| inst.instruction |= one_reg << 12; |
| } |
| } |
| |
| static void |
| do_ldmstm (void) |
| { |
| encode_ldmstm (/*from_push_pop_mnem=*/false); |
| } |
| |
| /* ARMv5TE load-consecutive (argument parse) |
| Mode is like LDRH. |
| |
| LDRccD R, mode |
| STRccD R, mode. */ |
| |
| static void |
| do_ldrd (void) |
| { |
| constraint (inst.operands[0].reg % 2 != 0, |
| _("first transfer register must be even")); |
| constraint (inst.operands[1].present |
| && inst.operands[1].reg != inst.operands[0].reg + 1, |
| _("can only transfer two consecutive registers")); |
| constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); |
| constraint (!inst.operands[2].isreg, _("'[' expected")); |
| |
| if (!inst.operands[1].present) |
| inst.operands[1].reg = inst.operands[0].reg + 1; |
| |
| /* encode_arm_addr_mode_3 will diagnose overlap between the base |
| register and the first register written; we have to diagnose |
| overlap between the base and the second register written here. */ |
| |
| if (inst.operands[2].reg == inst.operands[1].reg |
| && (inst.operands[2].writeback || inst.operands[2].postind)) |
| as_warn (_("base register written back, and overlaps " |
| "second transfer register")); |
| |
| if (!(inst.instruction & V4_STR_BIT)) |
| { |
| /* For an index-register load, the index register must not overlap the |
| destination (even if not write-back). */ |
| if (inst.operands[2].immisreg |
| && ((unsigned) inst.operands[2].imm == inst.operands[0].reg |
| || (unsigned) inst.operands[2].imm == inst.operands[1].reg)) |
| as_warn (_("index register overlaps transfer register")); |
| } |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_addr_mode_3 (2, /*is_t=*/false); |
| } |
| |
| static void |
| do_ldrex (void) |
| { |
| constraint (!inst.operands[1].isreg || !inst.operands[1].preind |
| || inst.operands[1].postind || inst.operands[1].writeback |
| || inst.operands[1].immisreg || inst.operands[1].shifted |
| || inst.operands[1].negative |
| /* This can arise if the programmer has written |
| strex rN, rM, foo |
| or if they have mistakenly used a register name as the last |
| operand, eg: |
| strex rN, rM, rX |
| It is very difficult to distinguish between these two cases |
| because "rX" might actually be a label. ie the register |
| name has been occluded by a symbol of the same name. So we |
| just generate a general 'bad addressing mode' type error |
| message and leave it up to the programmer to discover the |
| true cause and fix their mistake. */ |
| || (inst.operands[1].reg == REG_PC), |
| BAD_ADDR_MODE); |
| |
| constraint (inst.relocs[0].exp.X_op != O_constant |
| || inst.relocs[0].exp.X_add_number != 0, |
| _("offset must be zero in ARM encoding")); |
| |
| constraint ((inst.operands[1].reg == REG_PC), BAD_PC); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| } |
| |
| static void |
| do_ldrexd (void) |
| { |
| constraint (inst.operands[0].reg % 2 != 0, |
| _("even register required")); |
| constraint (inst.operands[1].present |
| && inst.operands[1].reg != inst.operands[0].reg + 1, |
| _("can only load two consecutive registers")); |
| /* If op 1 were present and equal to PC, this function wouldn't |
| have been called in the first place. */ |
| constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| |
| /* In both ARM and thumb state 'ldr pc, #imm' with an immediate |
| which is not a multiple of four is UNPREDICTABLE. */ |
| static void |
| check_ldr_r15_aligned (void) |
| { |
| constraint (!(inst.operands[1].immisreg) |
| && (inst.operands[0].reg == REG_PC |
| && inst.operands[1].reg == REG_PC |
| && (inst.relocs[0].exp.X_add_number & 0x3)), |
| _("ldr to register 15 must be 4-byte aligned")); |
| } |
| |
| static void |
| do_ldst (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| if (!inst.operands[1].isreg) |
| if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/false)) |
| return; |
| encode_arm_addr_mode_2 (1, /*is_t=*/false); |
| check_ldr_r15_aligned (); |
| } |
| |
| static void |
| do_ldstt (void) |
| { |
| /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and |
| reject [Rn,...]. */ |
| if (inst.operands[1].preind) |
| { |
| constraint (inst.relocs[0].exp.X_op != O_constant |
| || inst.relocs[0].exp.X_add_number != 0, |
| _("this instruction requires a post-indexed address")); |
| |
| inst.operands[1].preind = 0; |
| inst.operands[1].postind = 1; |
| inst.operands[1].writeback = 1; |
| } |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_addr_mode_2 (1, /*is_t=*/true); |
| } |
| |
| /* Halfword and signed-byte load/store operations. */ |
| |
| static void |
| do_ldstv4 (void) |
| { |
| constraint (inst.operands[0].reg == REG_PC, BAD_PC); |
| inst.instruction |= inst.operands[0].reg << 12; |
| if (!inst.operands[1].isreg) |
| if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/true)) |
| return; |
| encode_arm_addr_mode_3 (1, /*is_t=*/false); |
| } |
| |
| static void |
| do_ldsttv4 (void) |
| { |
| /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and |
| reject [Rn,...]. */ |
| if (inst.operands[1].preind) |
| { |
| constraint (inst.relocs[0].exp.X_op != O_constant |
| || inst.relocs[0].exp.X_add_number != 0, |
| _("this instruction requires a post-indexed address")); |
| |
| inst.operands[1].preind = 0; |
| inst.operands[1].postind = 1; |
| inst.operands[1].writeback = 1; |
| } |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_addr_mode_3 (1, /*is_t=*/true); |
| } |
| |
| /* Co-processor register load/store. |
| Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */ |
| static void |
| do_lstc (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg << 12; |
| encode_arm_cp_address (2, true, true, 0); |
| } |
| |
| static void |
| do_mlas (void) |
| { |
| /* This restriction does not apply to mls (nor to mla in v6 or later). */ |
| if (inst.operands[0].reg == inst.operands[1].reg |
| && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6) |
| && !(inst.instruction & 0x00400000)) |
| as_tsktsk (_("Rd and Rm should be different in mla")); |
| |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 8; |
| inst.instruction |= inst.operands[3].reg << 12; |
| } |
| |
| static void |
| do_mov (void) |
| { |
| constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC |
| && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC , |
| THUMB1_RELOC_ONLY); |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_shifter_operand (1); |
| } |
| |
| /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */ |
| static void |
| do_mov16 (void) |
| { |
| bfd_vma imm; |
| bool top; |
| |
| top = (inst.instruction & 0x00400000) != 0; |
| constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW, |
| _(":lower16: not allowed in this instruction")); |
| constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT, |
| _(":upper16: not allowed in this instruction")); |
| inst.instruction |= inst.operands[0].reg << 12; |
| if (inst.relocs[0].type == BFD_RELOC_UNUSED) |
| { |
| imm = inst.relocs[0].exp.X_add_number; |
| /* The value is in two pieces: 0:11, 16:19. */ |
| inst.instruction |= (imm & 0x00000fff); |
| inst.instruction |= (imm & 0x0000f000) << 4; |
| } |
| } |
| |
| static int |
| do_vfp_nsyn_mrs (void) |
| { |
| if (inst.operands[0].isvec) |
| { |
| if (inst.operands[1].reg != 1) |
| first_error (_("operand 1 must be FPSCR")); |
| memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); |
| memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); |
| do_vfp_nsyn_opcode ("fmstat"); |
| } |
| else if (inst.operands[1].isvec) |
| do_vfp_nsyn_opcode ("fmrx"); |
| else |
| return FAIL; |
| |
| return SUCCESS; |
| } |
| |
| static int |
| do_vfp_nsyn_msr (void) |
| { |
| if (inst.operands[0].isvec) |
| do_vfp_nsyn_opcode ("fmxr"); |
| else |
| return FAIL; |
| |
| return SUCCESS; |
| } |
| |
| static void |
| do_vmrs (void) |
| { |
| unsigned Rt = inst.operands[0].reg; |
| |
| if (thumb_mode && Rt == REG_SP) |
| { |
| inst.error = BAD_SP; |
| return; |
| } |
| |
| switch (inst.operands[1].reg) |
| { |
| /* MVFR2 is only valid for Armv8-A. */ |
| case 5: |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), |
| _(BAD_FPU)); |
| break; |
| |
| /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */ |
| case 1: /* fpscr. */ |
| constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) |
| || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)), |
| _(BAD_FPU)); |
| break; |
| |
| case 14: /* fpcxt_ns, fpcxtns, FPCXT_NS, FPCXTNS. */ |
| case 15: /* fpcxt_s, fpcxts, FPCXT_S, FPCXTS. */ |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main), |
| _("selected processor does not support instruction")); |
| break; |
| |
| case 2: /* fpscr_nzcvqc. */ |
| case 12: /* vpr. */ |
| case 13: /* p0. */ |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main) |
| || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)), |
| _("selected processor does not support instruction")); |
| if (inst.operands[0].reg != 2 |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) |
| as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE")); |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* APSR_ sets isvec. All other refs to PC are illegal. */ |
| if (!inst.operands[0].isvec && Rt == REG_PC) |
| { |
| inst.error = BAD_PC; |
| return; |
| } |
| |
| /* If we get through parsing the register name, we just insert the number |
| generated into the instruction without further validation. */ |
| inst.instruction |= (inst.operands[1].reg << 16); |
| inst.instruction |= (Rt << 12); |
| } |
| |
| static void |
| do_vmsr (void) |
| { |
| unsigned Rt = inst.operands[1].reg; |
| |
| if (thumb_mode) |
| reject_bad_reg (Rt); |
| else if (Rt == REG_PC) |
| { |
| inst.error = BAD_PC; |
| return; |
| } |
| |
| switch (inst.operands[0].reg) |
| { |
| /* MVFR2 is only valid for Armv8-A. */ |
| case 5: |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8), |
| _(BAD_FPU)); |
| break; |
| |
| /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */ |
| case 1: /* fpcr. */ |
| constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) |
| || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)), |
| _(BAD_FPU)); |
| break; |
| |
| case 14: /* fpcxt_ns. */ |
| case 15: /* fpcxt_s. */ |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main), |
| _("selected processor does not support instruction")); |
| break; |
| |
| case 2: /* fpscr_nzcvqc. */ |
| case 12: /* vpr. */ |
| case 13: /* p0. */ |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main) |
| || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)), |
| _("selected processor does not support instruction")); |
| if (inst.operands[0].reg != 2 |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) |
| as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE")); |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* If we get through parsing the register name, we just insert the number |
| generated into the instruction without further validation. */ |
| inst.instruction |= (inst.operands[0].reg << 16); |
| inst.instruction |= (Rt << 12); |
| } |
| |
| static void |
| do_mrs (void) |
| { |
| unsigned br; |
| |
| if (do_vfp_nsyn_mrs () == SUCCESS) |
| return; |
| |
| constraint (inst.operands[0].reg == REG_PC, BAD_PC); |
| inst.instruction |= inst.operands[0].reg << 12; |
| |
| if (inst.operands[1].isreg) |
| { |
| br = inst.operands[1].reg; |
| if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000)) |
| as_bad (_("bad register for mrs")); |
| } |
| else |
| { |
| /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ |
| constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f)) |
| != (PSR_c|PSR_f), |
| _("'APSR', 'CPSR' or 'SPSR' expected")); |
| br = (15<<16) | (inst.operands[1].imm & SPSR_BIT); |
| } |
| |
| inst.instruction |= br; |
| } |
| |
| /* Two possible forms: |
| "{C|S}PSR_<field>, Rm", |
| "{C|S}PSR_f, #expression". */ |
| |
| static void |
| do_msr (void) |
| { |
| if (do_vfp_nsyn_msr () == SUCCESS) |
| return; |
| |
| inst.instruction |= inst.operands[0].imm; |
| if (inst.operands[1].isreg) |
| inst.instruction |= inst.operands[1].reg; |
| else |
| { |
| inst.instruction |= INST_IMMEDIATE; |
| inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE; |
| inst.relocs[0].pc_rel = 0; |
| } |
| } |
| |
| static void |
| do_mul (void) |
| { |
| constraint (inst.operands[2].reg == REG_PC, BAD_PC); |
| |
| if (!inst.operands[2].present) |
| inst.operands[2].reg = inst.operands[0].reg; |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 8; |
| |
| if (inst.operands[0].reg == inst.operands[1].reg |
| && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)) |
| as_tsktsk (_("Rd and Rm should be different in mul")); |
| } |
| |
| /* Long Multiply Parser |
| UMULL RdLo, RdHi, Rm, Rs |
| SMULL RdLo, RdHi, Rm, Rs |
| UMLAL RdLo, RdHi, Rm, Rs |
| SMLAL RdLo, RdHi, Rm, Rs. */ |
| |
| static void |
| do_mull (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| inst.instruction |= inst.operands[3].reg << 8; |
| |
| /* rdhi and rdlo must be different. */ |
| if (inst.operands[0].reg == inst.operands[1].reg) |
| as_tsktsk (_("rdhi and rdlo must be different")); |
| |
| /* rdhi, rdlo and rm must all be different before armv6. */ |
| if ((inst.operands[0].reg == inst.operands[2].reg |
| || inst.operands[1].reg == inst.operands[2].reg) |
| && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)) |
| as_tsktsk (_("rdhi, rdlo and rm must all be different")); |
| } |
| |
| static void |
| do_nop (void) |
| { |
| if (inst.operands[0].present |
| || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k)) |
| { |
| /* Architectural NOP hints are CPSR sets with no bits selected. */ |
| inst.instruction &= 0xf0000000; |
| inst.instruction |= 0x0320f000; |
| if (inst.operands[0].present) |
| inst.instruction |= inst.operands[0].imm; |
| } |
| } |
| |
| /* ARM V6 Pack Halfword Bottom Top instruction (argument parse). |
| PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>} |
| Condition defaults to COND_ALWAYS. |
| Error if Rd, Rn or Rm are R15. */ |
| |
| static void |
| do_pkhbt (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| if (inst.operands[3].present) |
| encode_arm_shift (3); |
| } |
| |
| /* ARM V6 PKHTB (Argument Parse). */ |
| |
| static void |
| do_pkhtb (void) |
| { |
| if (!inst.operands[3].present) |
| { |
| /* If the shift specifier is omitted, turn the instruction |
| into pkhbt rd, rm, rn. */ |
| inst.instruction &= 0xfff00010; |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| else |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| encode_arm_shift (3); |
| } |
| } |
| |
| /* ARMv5TE: Preload-Cache |
| MP Extensions: Preload for write |
| |
| PLD(W) <addr_mode> |
| |
| Syntactically, like LDR with B=1, W=0, L=1. */ |
| |
| static void |
| do_pld (void) |
| { |
| constraint (!inst.operands[0].isreg, |
| _("'[' expected after PLD mnemonic")); |
| constraint (inst.operands[0].postind, |
| _("post-indexed expression used in preload instruction")); |
| constraint (inst.operands[0].writeback, |
| _("writeback used in preload instruction")); |
| constraint (!inst.operands[0].preind, |
| _("unindexed addressing used in preload instruction")); |
| encode_arm_addr_mode_2 (0, /*is_t=*/false); |
| } |
| |
| /* ARMv7: PLI <addr_mode> */ |
| static void |
| do_pli (void) |
| { |
| constraint (!inst.operands[0].isreg, |
| _("'[' expected after PLI mnemonic")); |
| constraint (inst.operands[0].postind, |
| _("post-indexed expression used in preload instruction")); |
| constraint (inst.operands[0].writeback, |
| _("writeback used in preload instruction")); |
| constraint (!inst.operands[0].preind, |
| _("unindexed addressing used in preload instruction")); |
| encode_arm_addr_mode_2 (0, /*is_t=*/false); |
| inst.instruction &= ~PRE_INDEX; |
| } |
| |
| static void |
| do_push_pop (void) |
| { |
| constraint (inst.operands[0].writeback, |
| _("push/pop do not support {reglist}^")); |
| inst.operands[1] = inst.operands[0]; |
| memset (&inst.operands[0], 0, sizeof inst.operands[0]); |
| inst.operands[0].isreg = 1; |
| inst.operands[0].writeback = 1; |
| inst.operands[0].reg = REG_SP; |
| encode_ldmstm (/*from_push_pop_mnem=*/true); |
| } |
| |
| /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the |
| word at the specified address and the following word |
| respectively. |
| Unconditionally executed. |
| Error if Rn is R15. */ |
| |
| static void |
| do_rfe (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| if (inst.operands[0].writeback) |
| inst.instruction |= WRITE_BACK; |
| } |
| |
| /* ARM V6 ssat (argument parse). */ |
| |
| static void |
| do_ssat (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= (inst.operands[1].imm - 1) << 16; |
| inst.instruction |= inst.operands[2].reg; |
| |
| if (inst.operands[3].present) |
| encode_arm_shift (3); |
| } |
| |
| /* ARM V6 usat (argument parse). */ |
| |
| static void |
| do_usat (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].imm << 16; |
| inst.instruction |= inst.operands[2].reg; |
| |
| if (inst.operands[3].present) |
| encode_arm_shift (3); |
| } |
| |
| /* ARM V6 ssat16 (argument parse). */ |
| |
| static void |
| do_ssat16 (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= ((inst.operands[1].imm - 1) << 16); |
| inst.instruction |= inst.operands[2].reg; |
| } |
| |
| static void |
| do_usat16 (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].imm << 16; |
| inst.instruction |= inst.operands[2].reg; |
| } |
| |
| /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while |
| preserving the other bits. |
| |
| setend <endian_specifier>, where <endian_specifier> is either |
| BE or LE. */ |
| |
| static void |
| do_setend (void) |
| { |
| if (warn_on_deprecated |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| as_tsktsk (_("setend use is deprecated for ARMv8")); |
| |
| if (inst.operands[0].imm) |
| inst.instruction |= 0x200; |
| } |
| |
| static void |
| do_shift (void) |
| { |
| unsigned int Rm = (inst.operands[1].present |
| ? inst.operands[1].reg |
| : inst.operands[0].reg); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= Rm; |
| if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */ |
| { |
| inst.instruction |= inst.operands[2].reg << 8; |
| inst.instruction |= SHIFT_BY_REG; |
| /* PR 12854: Error on extraneous shifts. */ |
| constraint (inst.operands[2].shifted, |
| _("extraneous shift as part of operand to shift insn")); |
| } |
| else |
| inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM; |
| } |
| |
| static void |
| do_smc (void) |
| { |
| unsigned int value = inst.relocs[0].exp.X_add_number; |
| constraint (value > 0xf, _("immediate too large (bigger than 0xF)")); |
| |
| inst.relocs[0].type = BFD_RELOC_ARM_SMC; |
| inst.relocs[0].pc_rel = 0; |
| } |
| |
| static void |
| do_hvc (void) |
| { |
| inst.relocs[0].type = BFD_RELOC_ARM_HVC; |
| inst.relocs[0].pc_rel = 0; |
| } |
| |
| static void |
| do_swi (void) |
| { |
| inst.relocs[0].type = BFD_RELOC_ARM_SWI; |
| inst.relocs[0].pc_rel = 0; |
| } |
| |
| static void |
| do_setpan (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan), |
| _("selected processor does not support SETPAN instruction")); |
| |
| inst.instruction |= ((inst.operands[0].imm & 1) << 9); |
| } |
| |
| static void |
| do_t_setpan (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan), |
| _("selected processor does not support SETPAN instruction")); |
| |
| inst.instruction |= (inst.operands[0].imm << 3); |
| } |
| |
| /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse) |
| SMLAxy{cond} Rd,Rm,Rs,Rn |
| SMLAWy{cond} Rd,Rm,Rs,Rn |
| Error if any register is R15. */ |
| |
| static void |
| do_smla (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 8; |
| inst.instruction |= inst.operands[3].reg << 12; |
| } |
| |
| /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse) |
| SMLALxy{cond} Rdlo,Rdhi,Rm,Rs |
| Error if any register is R15. |
| Warning if Rdlo == Rdhi. */ |
| |
| static void |
| do_smlal (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| inst.instruction |= inst.operands[3].reg << 8; |
| |
| if (inst.operands[0].reg == inst.operands[1].reg) |
| as_tsktsk (_("rdhi and rdlo must be different")); |
| } |
| |
| /* ARM V5E (El Segundo) signed-multiply (argument parse) |
| SMULxy{cond} Rd,Rm,Rs |
| Error if any register is R15. */ |
| |
| static void |
| do_smul (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 8; |
| } |
| |
| /* ARM V6 srs (argument parse). The variable fields in the encoding are |
| the same for both ARM and Thumb-2. */ |
| |
| static void |
| do_srs (void) |
| { |
| int reg; |
| |
| if (inst.operands[0].present) |
| { |
| reg = inst.operands[0].reg; |
| constraint (reg != REG_SP, _("SRS base register must be r13")); |
| } |
| else |
| reg = REG_SP; |
| |
| inst.instruction |= reg << 16; |
| inst.instruction |= inst.operands[1].imm; |
| if (inst.operands[0].writeback || inst.operands[1].writeback) |
| inst.instruction |= WRITE_BACK; |
| } |
| |
| /* ARM V6 strex (argument parse). */ |
| |
| static void |
| do_strex (void) |
| { |
| constraint (!inst.operands[2].isreg || !inst.operands[2].preind |
| || inst.operands[2].postind || inst.operands[2].writeback |
| || inst.operands[2].immisreg || inst.operands[2].shifted |
| || inst.operands[2].negative |
| /* See comment in do_ldrex(). */ |
| || (inst.operands[2].reg == REG_PC), |
| BAD_ADDR_MODE); |
| |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); |
| |
| constraint (inst.relocs[0].exp.X_op != O_constant |
| || inst.relocs[0].exp.X_add_number != 0, |
| _("offset must be zero in ARM encoding")); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 16; |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| } |
| |
| static void |
| do_t_strexbh (void) |
| { |
| constraint (!inst.operands[2].isreg || !inst.operands[2].preind |
| || inst.operands[2].postind || inst.operands[2].writeback |
| || inst.operands[2].immisreg || inst.operands[2].shifted |
| || inst.operands[2].negative, |
| BAD_ADDR_MODE); |
| |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); |
| |
| do_rm_rd_rn (); |
| } |
| |
| static void |
| do_strexd (void) |
| { |
| constraint (inst.operands[1].reg % 2 != 0, |
| _("even register required")); |
| constraint (inst.operands[2].present |
| && inst.operands[2].reg != inst.operands[1].reg + 1, |
| _("can only store two consecutive registers")); |
| /* If op 2 were present and equal to PC, this function wouldn't |
| have been called in the first place. */ |
| constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here")); |
| |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[1].reg + 1 |
| || inst.operands[0].reg == inst.operands[3].reg, |
| BAD_OVERLAP); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[3].reg << 16; |
| } |
| |
| /* ARM V8 STRL. */ |
| static void |
| do_stlex (void) |
| { |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); |
| |
| do_rd_rm_rn (); |
| } |
| |
| static void |
| do_t_stlex (void) |
| { |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); |
| |
| do_rm_rd_rn (); |
| } |
| |
| /* ARM V6 SXTAH extracts a 16-bit value from a register, sign |
| extends it to 32-bits, and adds the result to a value in another |
| register. You can specify a rotation by 0, 8, 16, or 24 bits |
| before extracting the 16-bit value. |
| SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>} |
| Condition defaults to COND_ALWAYS. |
| Error if any register uses R15. */ |
| |
| static void |
| do_sxtah (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| inst.instruction |= inst.operands[3].imm << 10; |
| } |
| |
| /* ARM V6 SXTH. |
| |
| SXTH {<cond>} <Rd>, <Rm>{, <rotation>} |
| Condition defaults to COND_ALWAYS. |
| Error if any register uses R15. */ |
| |
| static void |
| do_sxth (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].imm << 10; |
| } |
| |
| /* VFP instructions. In a logical order: SP variant first, monad |
| before dyad, arithmetic then move then load/store. */ |
| |
| static void |
| do_vfp_sp_monadic (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); |
| } |
| |
| static void |
| do_vfp_sp_dyadic (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); |
| encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); |
| } |
| |
| static void |
| do_vfp_sp_compare_z (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| } |
| |
| static void |
| do_vfp_dp_sp_cvt (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); |
| } |
| |
| static void |
| do_vfp_sp_dp_cvt (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); |
| } |
| |
| static void |
| do_vfp_reg_from_sp (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); |
| } |
| |
| static void |
| do_vfp_reg2_from_sp2 (void) |
| { |
| constraint (inst.operands[2].imm != 2, |
| _("only two consecutive VFP SP registers allowed here")); |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); |
| } |
| |
| static void |
| do_vfp_sp_from_reg (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn); |
| inst.instruction |= inst.operands[1].reg << 12; |
| } |
| |
| static void |
| do_vfp_sp2_from_reg2 (void) |
| { |
| constraint (inst.operands[0].imm != 2, |
| _("only two consecutive VFP SP registers allowed here")); |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm); |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| |
| static void |
| do_vfp_sp_ldst (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| encode_arm_cp_address (1, false, true, 0); |
| } |
| |
| static void |
| do_vfp_dp_ldst (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| encode_arm_cp_address (1, false, true, 0); |
| } |
| |
| |
| static void |
| vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type) |
| { |
| if (inst.operands[0].writeback) |
| inst.instruction |= WRITE_BACK; |
| else |
| constraint (ldstm_type != VFP_LDSTMIA, |
| _("this addressing mode requires base-register writeback")); |
| inst.instruction |= inst.operands[0].reg << 16; |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd); |
| inst.instruction |= inst.operands[1].imm; |
| } |
| |
| static void |
| vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type) |
| { |
| int count; |
| |
| if (inst.operands[0].writeback) |
| inst.instruction |= WRITE_BACK; |
| else |
| constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX, |
| _("this addressing mode requires base-register writeback")); |
| |
| inst.instruction |= inst.operands[0].reg << 16; |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| |
| count = inst.operands[1].imm << 1; |
| if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX) |
| count += 1; |
| |
| inst.instruction |= count; |
| } |
| |
| static void |
| do_vfp_sp_ldstmia (void) |
| { |
| vfp_sp_ldstm (VFP_LDSTMIA); |
| } |
| |
| static void |
| do_vfp_sp_ldstmdb (void) |
| { |
| vfp_sp_ldstm (VFP_LDSTMDB); |
| } |
| |
| static void |
| do_vfp_dp_ldstmia (void) |
| { |
| vfp_dp_ldstm (VFP_LDSTMIA); |
| } |
| |
| static void |
| do_vfp_dp_ldstmdb (void) |
| { |
| vfp_dp_ldstm (VFP_LDSTMDB); |
| } |
| |
| static void |
| do_vfp_xp_ldstmia (void) |
| { |
| vfp_dp_ldstm (VFP_LDSTMIAX); |
| } |
| |
| static void |
| do_vfp_xp_ldstmdb (void) |
| { |
| vfp_dp_ldstm (VFP_LDSTMDBX); |
| } |
| |
| static void |
| do_vfp_dp_rd_rm (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); |
| } |
| |
| static void |
| do_vfp_dp_rn_rd (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| } |
| |
| static void |
| do_vfp_dp_rd_rn (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); |
| } |
| |
| static void |
| do_vfp_dp_rd_rn_rm (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); |
| encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm); |
| } |
| |
| static void |
| do_vfp_dp_rd (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| } |
| |
| static void |
| do_vfp_dp_rm_rd_rn (void) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), |
| _(BAD_FPU)); |
| |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm); |
| encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn); |
| } |
| |
| /* VFPv3 instructions. */ |
| static void |
| do_vfp_sp_const (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; |
| inst.instruction |= (inst.operands[1].imm & 0x0f); |
| } |
| |
| static void |
| do_vfp_dp_const (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; |
| inst.instruction |= (inst.operands[1].imm & 0x0f); |
| } |
| |
| static void |
| vfp_conv (int srcsize) |
| { |
| int immbits = srcsize - inst.operands[1].imm; |
| |
| if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize)) |
| { |
| /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16. |
| i.e. immbits must be in range 0 - 16. */ |
| inst.error = _("immediate value out of range, expected range [0, 16]"); |
| return; |
| } |
| else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize)) |
| { |
| /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32. |
| i.e. immbits must be in range 0 - 31. */ |
| inst.error = _("immediate value out of range, expected range [1, 32]"); |
| return; |
| } |
| |
| inst.instruction |= (immbits & 1) << 5; |
| inst.instruction |= (immbits >> 1); |
| } |
| |
| static void |
| do_vfp_sp_conv_16 (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| vfp_conv (16); |
| } |
| |
| static void |
| do_vfp_dp_conv_16 (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| vfp_conv (16); |
| } |
| |
| static void |
| do_vfp_sp_conv_32 (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| vfp_conv (32); |
| } |
| |
| static void |
| do_vfp_dp_conv_32 (void) |
| { |
| encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| vfp_conv (32); |
| } |
| |
| /* iWMMXt instructions: strictly in alphabetical order. */ |
| |
| static void |
| do_iwmmxt_tandorc (void) |
| { |
| constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here")); |
| } |
| |
| static void |
| do_iwmmxt_textrc (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].imm; |
| } |
| |
| static void |
| do_iwmmxt_textrm (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].imm; |
| } |
| |
| static void |
| do_iwmmxt_tinsr (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].imm; |
| } |
| |
| static void |
| do_iwmmxt_tmia (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 5; |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 12; |
| } |
| |
| static void |
| do_iwmmxt_waligni (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| inst.instruction |= inst.operands[3].imm << 20; |
| } |
| |
| static void |
| do_iwmmxt_wmerge (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| inst.instruction |= inst.operands[3].imm << 21; |
| } |
| |
| static void |
| do_iwmmxt_wmov (void) |
| { |
| /* WMOV rD, rN is an alias for WOR rD, rN, rN. */ |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[1].reg; |
| } |
| |
| static void |
| do_iwmmxt_wldstbh (void) |
| { |
| int reloc; |
| inst.instruction |= inst.operands[0].reg << 12; |
| if (thumb_mode) |
| reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2; |
| else |
| reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2; |
| encode_arm_cp_address (1, true, false, reloc); |
| } |
| |
| static void |
| do_iwmmxt_wldstw (void) |
| { |
| /* RIWR_RIWC clears .isreg for a control register. */ |
| if (!inst.operands[0].isreg) |
| { |
| constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| inst.instruction |= 0xf0000000; |
| } |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_arm_cp_address (1, true, true, 0); |
| } |
| |
| static void |
| do_iwmmxt_wldstd (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2) |
| && inst.operands[1].immisreg) |
| { |
| inst.instruction &= ~0x1a000ff; |
| inst.instruction |= (0xfU << 28); |
| if (inst.operands[1].preind) |
| inst.instruction |= PRE_INDEX; |
| if (!inst.operands[1].negative) |
| inst.instruction |= INDEX_UP; |
| if (inst.operands[1].writeback) |
| inst.instruction |= WRITE_BACK; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.relocs[0].exp.X_add_number << 4; |
| inst.instruction |= inst.operands[1].imm; |
| } |
| else |
| encode_arm_cp_address (1, true, false, 0); |
| } |
| |
| static void |
| do_iwmmxt_wshufh (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16); |
| inst.instruction |= (inst.operands[2].imm & 0x0f); |
| } |
| |
| static void |
| do_iwmmxt_wzero (void) |
| { |
| /* WZERO reg is an alias for WANDN reg, reg, reg. */ |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[0].reg << 16; |
| } |
| |
| static void |
| do_iwmmxt_wrwrwr_or_imm5 (void) |
| { |
| if (inst.operands[2].isreg) |
| do_rd_rn_rm (); |
| else { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2), |
| _("immediate operand requires iWMMXt2")); |
| do_rd_rn (); |
| if (inst.operands[2].imm == 0) |
| { |
| switch ((inst.instruction >> 20) & 0xf) |
| { |
| case 4: |
| case 5: |
| case 6: |
| case 7: |
| /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */ |
| inst.operands[2].imm = 16; |
| inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20); |
| break; |
| case 8: |
| case 9: |
| case 10: |
| case 11: |
| /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */ |
| inst.operands[2].imm = 32; |
| inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20); |
| break; |
| case 12: |
| case 13: |
| case 14: |
| case 15: |
| { |
| /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */ |
| unsigned long wrn; |
| wrn = (inst.instruction >> 16) & 0xf; |
| inst.instruction &= 0xff0fff0f; |
| inst.instruction |= wrn; |
| /* Bail out here; the instruction is now assembled. */ |
| return; |
| } |
| } |
| } |
| /* Map 32 -> 0, etc. */ |
| inst.operands[2].imm &= 0x1f; |
| inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); |
| } |
| } |
| |
| |
| /* XScale instructions. Also sorted arithmetic before move. */ |
| |
| /* Xscale multiply-accumulate (argument parse) |
| MIAcc acc0,Rm,Rs |
| MIAPHcc acc0,Rm,Rs |
| MIAxycc acc0,Rm,Rs. */ |
| |
| static void |
| do_xsc_mia (void) |
| { |
| inst.instruction |= inst.operands[1].reg; |
| inst.instruction |= inst.operands[2].reg << 12; |
| } |
| |
| /* Xscale move-accumulator-register (argument parse) |
| |
| MARcc acc0,RdLo,RdHi. */ |
| |
| static void |
| do_xsc_mar (void) |
| { |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| |
| /* Xscale move-register-accumulator (argument parse) |
| |
| MRAcc RdLo,RdHi,acc0. */ |
| |
| static void |
| do_xsc_mra (void) |
| { |
| constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP); |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| } |
| |
| /* Encoding functions relevant only to Thumb. */ |
| |
| /* inst.operands[i] is a shifted-register operand; encode |
| it into inst.instruction in the format used by Thumb32. */ |
| |
| static void |
| encode_thumb32_shifted_operand (int i) |
| { |
| unsigned int value = inst.relocs[0].exp.X_add_number; |
| unsigned int shift = inst.operands[i].shift_kind; |
| |
| constraint (inst.operands[i].immisreg, |
| _("shift by register not allowed in thumb mode")); |
| inst.instruction |= inst.operands[i].reg; |
| if (shift == SHIFT_RRX) |
| inst.instruction |= SHIFT_ROR << 4; |
| else |
| { |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| |
| constraint (value > 32 |
| || (value == 32 && (shift == SHIFT_LSL |
| || shift == SHIFT_ROR)), |
| _("shift expression is too large")); |
| |
| if (value == 0) |
| shift = SHIFT_LSL; |
| else if (value == 32) |
| value = 0; |
| |
| inst.instruction |= shift << 4; |
| inst.instruction |= (value & 0x1c) << 10; |
| inst.instruction |= (value & 0x03) << 6; |
| } |
| } |
| |
| |
| /* inst.operands[i] was set up by parse_address. Encode it into a |
| Thumb32 format load or store instruction. Reject forms that cannot |
| be used with such instructions. If is_t is true, reject forms that |
| cannot be used with a T instruction; if is_d is true, reject forms |
| that cannot be used with a D instruction. If it is a store insn, |
| reject PC in Rn. */ |
| |
| static void |
| encode_thumb32_addr_mode (int i, bool is_t, bool is_d) |
| { |
| const bool is_pc = (inst.operands[i].reg == REG_PC); |
| |
| constraint (!inst.operands[i].isreg, |
| _("Instruction does not support =N addresses")); |
| |
| inst.instruction |= inst.operands[i].reg << 16; |
| if (inst.operands[i].immisreg) |
| { |
| constraint (is_pc, BAD_PC_ADDRESSING); |
| constraint (is_t || is_d, _("cannot use register index with this instruction")); |
| constraint (inst.operands[i].negative, |
| _("Thumb does not support negative register indexing")); |
| constraint (inst.operands[i].postind, |
| _("Thumb does not support register post-indexing")); |
| constraint (inst.operands[i].writeback, |
| _("Thumb does not support register indexing with writeback")); |
| constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL, |
| _("Thumb supports only LSL in shifted register indexing")); |
| |
| inst.instruction |= inst.operands[i].imm; |
| if (inst.operands[i].shifted) |
| { |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| constraint (inst.relocs[0].exp.X_add_number < 0 |
| || inst.relocs[0].exp.X_add_number > 3, |
| _("shift out of range")); |
| inst.instruction |= inst.relocs[0].exp.X_add_number << 4; |
| } |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| } |
| else if (inst.operands[i].preind) |
| { |
| constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK); |
| constraint (is_t && inst.operands[i].writeback, |
| _("cannot use writeback with this instruction")); |
| constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0), |
| BAD_PC_ADDRESSING); |
| |
| if (is_d) |
| { |
| inst.instruction |= 0x01000000; |
| if (inst.operands[i].writeback) |
| inst.instruction |= 0x00200000; |
| } |
| else |
| { |
| inst.instruction |= 0x00000c00; |
| if (inst.operands[i].writeback) |
| inst.instruction |= 0x00000100; |
| } |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM; |
| } |
| else if (inst.operands[i].postind) |
| { |
| gas_assert (inst.operands[i].writeback); |
| constraint (is_pc, _("cannot use post-indexing with PC-relative addressing")); |
| constraint (is_t, _("cannot use post-indexing with this instruction")); |
| |
| if (is_d) |
| inst.instruction |= 0x00200000; |
| else |
| inst.instruction |= 0x00000900; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM; |
| } |
| else /* unindexed - only for coprocessor */ |
| inst.error = _("instruction does not accept unindexed addressing"); |
| } |
| |
| /* Table of Thumb instructions which exist in 16- and/or 32-bit |
| encodings (the latter only in post-V6T2 cores). The index is the |
| value used in the insns table below. When there is more than one |
| possible 16-bit encoding for the instruction, this table always |
| holds variant (1). |
| Also contains several pseudo-instructions used during relaxation. */ |
| #define T16_32_TAB \ |
| X(_adc, 4140, eb400000), \ |
| X(_adcs, 4140, eb500000), \ |
| X(_add, 1c00, eb000000), \ |
| X(_adds, 1c00, eb100000), \ |
| X(_addi, 0000, f1000000), \ |
| X(_addis, 0000, f1100000), \ |
| X(_add_pc,000f, f20f0000), \ |
| X(_add_sp,000d, f10d0000), \ |
| X(_adr, 000f, f20f0000), \ |
| X(_and, 4000, ea000000), \ |
| X(_ands, 4000, ea100000), \ |
| X(_asr, 1000, fa40f000), \ |
| X(_asrs, 1000, fa50f000), \ |
| X(_aut, 0000, f3af802d), \ |
| X(_autg, 0000, fb500f00), \ |
| X(_b, e000, f000b000), \ |
| X(_bcond, d000, f0008000), \ |
| X(_bf, 0000, f040e001), \ |
| X(_bfcsel,0000, f000e001), \ |
| X(_bfx, 0000, f060e001), \ |
| X(_bfl, 0000, f000c001), \ |
| X(_bflx, 0000, f070e001), \ |
| X(_bic, 4380, ea200000), \ |
| X(_bics, 4380, ea300000), \ |
| X(_bxaut, 0000, fb500f10), \ |
| X(_cinc, 0000, ea509000), \ |
| X(_cinv, 0000, ea50a000), \ |
| X(_cmn, 42c0, eb100f00), \ |
| X(_cmp, 2800, ebb00f00), \ |
| X(_cneg, 0000, ea50b000), \ |
| X(_cpsie, b660, f3af8400), \ |
| X(_cpsid, b670, f3af8600), \ |
| X(_cpy, 4600, ea4f0000), \ |
| X(_csel, 0000, ea508000), \ |
| X(_cset, 0000, ea5f900f), \ |
| X(_csetm, 0000, ea5fa00f), \ |
| X(_csinc, 0000, ea509000), \ |
| X(_csinv, 0000, ea50a000), \ |
| X(_csneg, 0000, ea50b000), \ |
| X(_dec_sp,80dd, f1ad0d00), \ |
| X(_dls, 0000, f040e001), \ |
| X(_dlstp, 0000, f000e001), \ |
| X(_eor, 4040, ea800000), \ |
| X(_eors, 4040, ea900000), \ |
| X(_inc_sp,00dd, f10d0d00), \ |
| X(_lctp, 0000, f00fe001), \ |
| X(_ldmia, c800, e8900000), \ |
| X(_ldr, 6800, f8500000), \ |
| X(_ldrb, 7800, f8100000), \ |
| X(_ldrh, 8800, f8300000), \ |
| X(_ldrsb, 5600, f9100000), \ |
| X(_ldrsh, 5e00, f9300000), \ |
| X(_ldr_pc,4800, f85f0000), \ |
| X(_ldr_pc2,4800, f85f0000), \ |
| X(_ldr_sp,9800, f85d0000), \ |
| X(_le, 0000, f00fc001), \ |
| X(_letp, 0000, f01fc001), \ |
| X(_lsl, 0000, fa00f000), \ |
| X(_lsls, 0000, fa10f000), \ |
| X(_lsr, 0800, fa20f000), \ |
| X(_lsrs, 0800, fa30f000), \ |
| X(_mov, 2000, ea4f0000), \ |
| X(_movs, 2000, ea5f0000), \ |
| X(_mul, 4340, fb00f000), \ |
| X(_muls, 4340, ffffffff), /* no 32b muls */ \ |
| X(_mvn, 43c0, ea6f0000), \ |
| X(_mvns, 43c0, ea7f0000), \ |
| X(_neg, 4240, f1c00000), /* rsb #0 */ \ |
| X(_negs, 4240, f1d00000), /* rsbs #0 */ \ |
| X(_orr, 4300, ea400000), \ |
| X(_orrs, 4300, ea500000), \ |
| X(_pac, 0000, f3af801d), \ |
| X(_pacbti, 0000, f3af800d), \ |
| X(_pacg, 0000, fb60f000), \ |
| X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \ |
| X(_push, b400, e92d0000), /* stmdb sp!,... */ \ |
| X(_rev, ba00, fa90f080), \ |
| X(_rev16, ba40, fa90f090), \ |
| X(_revsh, bac0, fa90f0b0), \ |
| X(_ror, 41c0, fa60f000), \ |
| X(_rors, 41c0, fa70f000), \ |
| X(_sbc, 4180, eb600000), \ |
| X(_sbcs, 4180, eb700000), \ |
| X(_stmia, c000, e8800000), \ |
| X(_str, 6000, f8400000), \ |
| X(_strb, 7000, f8000000), \ |
| X(_strh, 8000, f8200000), \ |
| X(_str_sp,9000, f84d0000), \ |
| X(_sub, 1e00, eba00000), \ |
| X(_subs, 1e00, ebb00000), \ |
| X(_subi, 8000, f1a00000), \ |
| X(_subis, 8000, f1b00000), \ |
| X(_sxtb, b240, fa4ff080), \ |
| X(_sxth, b200, fa0ff080), \ |
| X(_tst, 4200, ea100f00), \ |
| X(_uxtb, b2c0, fa5ff080), \ |
| X(_uxth, b280, fa1ff080), \ |
| X(_nop, bf00, f3af8000), \ |
| X(_yield, bf10, f3af8001), \ |
| X(_wfe, bf20, f3af8002), \ |
| X(_wfi, bf30, f3af8003), \ |
| X(_wls, 0000, f040c001), \ |
| X(_wlstp, 0000, f000c001), \ |
| X(_sev, bf40, f3af8004), \ |
| X(_sevl, bf50, f3af8005), \ |
| X(_udf, de00, f7f0a000) |
| |
| /* To catch errors in encoding functions, the codes are all offset by |
| 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined |
| as 16-bit instructions. */ |
| #define X(a,b,c) T_MNEM##a |
| enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB }; |
| #undef X |
| |
| #define X(a,b,c) 0x##b |
| static const unsigned short thumb_op16[] = { T16_32_TAB }; |
| #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)]) |
| #undef X |
| |
| #define X(a,b,c) 0x##c |
| static const unsigned int thumb_op32[] = { T16_32_TAB }; |
| #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)]) |
| #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000) |
| #undef X |
| #undef T16_32_TAB |
| |
| /* Thumb instruction encoders, in alphabetical order. */ |
| |
| /* ADDW or SUBW. */ |
| |
| static void |
| do_t_add_sub_w (void) |
| { |
| int Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| |
| /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this |
| is the SP-{plus,minus}-immediate form of the instruction. */ |
| if (Rn == REG_SP) |
| constraint (Rd == REG_PC, BAD_PC); |
| else |
| reject_bad_reg (Rd); |
| |
| inst.instruction |= (Rn << 16) | (Rd << 8); |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12; |
| } |
| |
| /* Parse an add or subtract instruction. We get here with inst.instruction |
| equaling any of THUMB_OPCODE_add, adds, sub, or subs. */ |
| |
| static void |
| do_t_add_sub (void) |
| { |
| int Rd, Rs, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rs = (inst.operands[1].present |
| ? inst.operands[1].reg /* Rd, Rs, foo */ |
| : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| |
| if (Rd == REG_PC) |
| set_pred_insn_type_last (); |
| |
| if (unified_syntax) |
| { |
| bool flags; |
| bool narrow; |
| int opcode; |
| |
| flags = (inst.instruction == T_MNEM_adds |
| || inst.instruction == T_MNEM_subs); |
| if (flags) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| if (!inst.operands[2].isreg) |
| { |
| int add; |
| |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP); |
| |
| add = (inst.instruction == T_MNEM_add |
| || inst.instruction == T_MNEM_adds); |
| opcode = 0; |
| if (inst.size_req != 4) |
| { |
| /* Attempt to use a narrow opcode, with relaxation if |
| appropriate. */ |
| if (Rd == REG_SP && Rs == REG_SP && !flags) |
| opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp; |
| else if (Rd <= 7 && Rs == REG_SP && add && !flags) |
| opcode = T_MNEM_add_sp; |
| else if (Rd <= 7 && Rs == REG_PC && add && !flags) |
| opcode = T_MNEM_add_pc; |
| else if (Rd <= 7 && Rs <= 7 && narrow) |
| { |
| if (flags) |
| opcode = add ? T_MNEM_addis : T_MNEM_subis; |
| else |
| opcode = add ? T_MNEM_addi : T_MNEM_subi; |
| } |
| if (opcode) |
| { |
| inst.instruction = THUMB_OP16(opcode); |
| inst.instruction |= (Rd << 4) | Rs; |
| if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC |
| || (inst.relocs[0].type |
| > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)) |
| { |
| if (inst.size_req == 2) |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD; |
| else |
| inst.relax = opcode; |
| } |
| } |
| else |
| constraint (inst.size_req == 2, _("cannot honor width suffix")); |
| } |
| if (inst.size_req == 4 |
| || (inst.size_req != 2 && !opcode)) |
| { |
| constraint ((inst.relocs[0].type |
| >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC) |
| && (inst.relocs[0].type |
| <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) , |
| THUMB1_RELOC_ONLY); |
| if (Rd == REG_PC) |
| { |
| constraint (add, BAD_PC); |
| constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs, |
| _("only SUBS PC, LR, #const allowed")); |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| constraint (inst.relocs[0].exp.X_add_number < 0 |
| || inst.relocs[0].exp.X_add_number > 0xff, |
| _("immediate value out of range")); |
| inst.instruction = T2_SUBS_PC_LR |
| | inst.relocs[0].exp.X_add_number; |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| return; |
| } |
| else if (Rs == REG_PC) |
| { |
| /* Always use addw/subw. */ |
| inst.instruction = add ? 0xf20f0000 : 0xf2af0000; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12; |
| } |
| else |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction = (inst.instruction & 0xe1ffffff) |
| | 0x10000000; |
| if (flags) |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| else |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM; |
| } |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| } |
| } |
| else |
| { |
| unsigned int value = inst.relocs[0].exp.X_add_number; |
| unsigned int shift = inst.operands[2].shift_kind; |
| |
| Rn = inst.operands[2].reg; |
| /* See if we can do this with a 16-bit instruction. */ |
| if (!inst.operands[2].shifted && inst.size_req != 4) |
| { |
| if (Rd > 7 || Rs > 7 || Rn > 7) |
| narrow = false; |
| |
| if (narrow) |
| { |
| inst.instruction = ((inst.instruction == T_MNEM_adds |
| || inst.instruction == T_MNEM_add) |
| ? T_OPCODE_ADD_R3 |
| : T_OPCODE_SUB_R3); |
| inst.instruction |= Rd | (Rs << 3) | (Rn << 6); |
| return; |
| } |
| |
| if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn)) |
| { |
| /* Thumb-1 cores (except v6-M) require at least one high |
| register in a narrow non flag setting add. */ |
| if (Rd > 7 || Rn > 7 |
| || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2) |
| || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr)) |
| { |
| if (Rd == Rn) |
| { |
| Rn = Rs; |
| Rs = Rd; |
| } |
| inst.instruction = T_OPCODE_ADD_HI; |
| inst.instruction |= (Rd & 8) << 4; |
| inst.instruction |= (Rd & 7); |
| inst.instruction |= Rn << 3; |
| return; |
| } |
| } |
| } |
| |
| constraint (Rd == REG_PC, BAD_PC); |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP); |
| constraint (Rs == REG_PC, BAD_PC); |
| reject_bad_reg (Rn); |
| |
| /* If we get here, it can't be done in 16 bits. */ |
| constraint (inst.operands[2].shifted && inst.operands[2].immisreg, |
| _("shift must be constant")); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| constraint (Rd == REG_SP && Rs == REG_SP && value > 3, |
| _("shift value over 3 not allowed in thumb mode")); |
| constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL, |
| _("only LSL shift allowed in thumb mode")); |
| encode_thumb32_shifted_operand (2); |
| } |
| } |
| else |
| { |
| constraint (inst.instruction == T_MNEM_adds |
| || inst.instruction == T_MNEM_subs, |
| BAD_THUMB32); |
| |
| if (!inst.operands[2].isreg) /* Rd, Rs, #imm */ |
| { |
| constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP)) |
| || (Rs > 7 && Rs != REG_SP && Rs != REG_PC), |
| BAD_HIREG); |
| |
| inst.instruction = (inst.instruction == T_MNEM_add |
| ? 0x0000 : 0x8000); |
| inst.instruction |= (Rd << 4) | Rs; |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD; |
| return; |
| } |
| |
| Rn = inst.operands[2].reg; |
| constraint (inst.operands[2].shifted, _("unshifted register required")); |
| |
| /* We now have Rd, Rs, and Rn set to registers. */ |
| if (Rd > 7 || Rs > 7 || Rn > 7) |
| { |
| /* Can't do this for SUB. */ |
| constraint (inst.instruction == T_MNEM_sub, BAD_HIREG); |
| inst.instruction = T_OPCODE_ADD_HI; |
| inst.instruction |= (Rd & 8) << 4; |
| inst.instruction |= (Rd & 7); |
| if (Rs == Rd) |
| inst.instruction |= Rn << 3; |
| else if (Rn == Rd) |
| inst.instruction |= Rs << 3; |
| else |
| constraint (1, _("dest must overlap one source register")); |
| } |
| else |
| { |
| inst.instruction = (inst.instruction == T_MNEM_add |
| ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3); |
| inst.instruction |= Rd | (Rs << 3) | (Rn << 6); |
| } |
| } |
| } |
| |
| static void |
| do_t_adr (void) |
| { |
| unsigned Rd; |
| |
| Rd = inst.operands[0].reg; |
| reject_bad_reg (Rd); |
| |
| if (unified_syntax && inst.size_req == 0 && Rd <= 7) |
| { |
| /* Defer to section relaxation. */ |
| inst.relax = inst.instruction; |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd << 4; |
| } |
| else if (unified_syntax && inst.size_req != 2) |
| { |
| /* Generate a 32-bit opcode. */ |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12; |
| inst.relocs[0].pc_rel = 1; |
| } |
| else |
| { |
| /* Generate a 16-bit opcode. */ |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD; |
| inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */ |
| inst.relocs[0].pc_rel = 1; |
| inst.instruction |= Rd << 4; |
| } |
| |
| if (inst.relocs[0].exp.X_op == O_symbol |
| && inst.relocs[0].exp.X_add_symbol != NULL |
| && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol) |
| && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol)) |
| inst.relocs[0].exp.X_add_number += 1; |
| } |
| |
| /* Arithmetic instructions for which there is just one 16-bit |
| instruction encoding, and it allows only two low registers. |
| For maximal compatibility with ARM syntax, we allow three register |
| operands even when Thumb-32 instructions are not available, as long |
| as the first two are identical. For instance, both "sbc r0,r1" and |
| "sbc r0,r0,r1" are allowed. */ |
| static void |
| do_t_arit3 (void) |
| { |
| int Rd, Rs, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rs = (inst.operands[1].present |
| ? inst.operands[1].reg /* Rd, Rs, foo */ |
| : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rs); |
| if (inst.operands[2].isreg) |
| reject_bad_reg (Rn); |
| |
| if (unified_syntax) |
| { |
| if (!inst.operands[2].isreg) |
| { |
| /* For an immediate, we always generate a 32-bit opcode; |
| section relaxation will shrink it later if possible. */ |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| else |
| { |
| bool narrow; |
| |
| /* See if we can do this with a 16-bit instruction. */ |
| if (THUMB_SETS_FLAGS (inst.instruction)) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| |
| if (Rd > 7 || Rn > 7 || Rs > 7) |
| narrow = false; |
| if (inst.operands[2].shifted) |
| narrow = false; |
| if (inst.size_req == 4) |
| narrow = false; |
| |
| if (narrow |
| && Rd == Rs) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rn << 3; |
| return; |
| } |
| |
| /* If we get here, it can't be done in 16 bits. */ |
| constraint (inst.operands[2].shifted |
| && inst.operands[2].immisreg, |
| _("shift must be constant")); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| encode_thumb32_shifted_operand (2); |
| } |
| } |
| else |
| { |
| /* On its face this is a lie - the instruction does set the |
| flags. However, the only supported mnemonic in this mode |
| says it doesn't. */ |
| constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| |
| constraint (!inst.operands[2].isreg || inst.operands[2].shifted, |
| _("unshifted register required")); |
| constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); |
| constraint (Rd != Rs, |
| _("dest and source1 must be the same register")); |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rn << 3; |
| } |
| } |
| |
| /* Similarly, but for instructions where the arithmetic operation is |
| commutative, so we can allow either of them to be different from |
| the destination operand in a 16-bit instruction. For instance, all |
| three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are |
| accepted. */ |
| static void |
| do_t_arit3c (void) |
| { |
| int Rd, Rs, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rs = (inst.operands[1].present |
| ? inst.operands[1].reg /* Rd, Rs, foo */ |
| : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rs); |
| if (inst.operands[2].isreg) |
| reject_bad_reg (Rn); |
| |
| if (unified_syntax) |
| { |
| if (!inst.operands[2].isreg) |
| { |
| /* For an immediate, we always generate a 32-bit opcode; |
| section relaxation will shrink it later if possible. */ |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| else |
| { |
| bool narrow; |
| |
| /* See if we can do this with a 16-bit instruction. */ |
| if (THUMB_SETS_FLAGS (inst.instruction)) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| |
| if (Rd > 7 || Rn > 7 || Rs > 7) |
| narrow = false; |
| if (inst.operands[2].shifted) |
| narrow = false; |
| if (inst.size_req == 4) |
| narrow = false; |
| |
| if (narrow) |
| { |
| if (Rd == Rs) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rn << 3; |
| return; |
| } |
| if (Rd == Rn) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rs << 3; |
| return; |
| } |
| } |
| |
| /* If we get here, it can't be done in 16 bits. */ |
| constraint (inst.operands[2].shifted |
| && inst.operands[2].immisreg, |
| _("shift must be constant")); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| encode_thumb32_shifted_operand (2); |
| } |
| } |
| else |
| { |
| /* On its face this is a lie - the instruction does set the |
| flags. However, the only supported mnemonic in this mode |
| says it doesn't. */ |
| constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| |
| constraint (!inst.operands[2].isreg || inst.operands[2].shifted, |
| _("unshifted register required")); |
| constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| |
| if (Rd == Rs) |
| inst.instruction |= Rn << 3; |
| else if (Rd == Rn) |
| inst.instruction |= Rs << 3; |
| else |
| constraint (1, _("dest must overlap one source register")); |
| } |
| } |
| |
| static void |
| do_t_bfc (void) |
| { |
| unsigned Rd; |
| unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; |
| constraint (msb > 32, _("bit-field extends past end of register")); |
| /* The instruction encoding stores the LSB and MSB, |
| not the LSB and width. */ |
| Rd = inst.operands[0].reg; |
| reject_bad_reg (Rd); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= (inst.operands[1].imm & 0x1c) << 10; |
| inst.instruction |= (inst.operands[1].imm & 0x03) << 6; |
| inst.instruction |= msb - 1; |
| } |
| |
| static void |
| do_t_bfi (void) |
| { |
| int Rd, Rn; |
| unsigned int msb; |
| |
| Rd = inst.operands[0].reg; |
| reject_bad_reg (Rd); |
| |
| /* #0 in second position is alternative syntax for bfc, which is |
| the same instruction but with REG_PC in the Rm field. */ |
| if (!inst.operands[1].isreg) |
| Rn = REG_PC; |
| else |
| { |
| Rn = inst.operands[1].reg; |
| reject_bad_reg (Rn); |
| } |
| |
| msb = inst.operands[2].imm + inst.operands[3].imm; |
| constraint (msb > 32, _("bit-field extends past end of register")); |
| /* The instruction encoding stores the LSB and MSB, |
| not the LSB and width. */ |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; |
| inst.instruction |= (inst.operands[2].imm & 0x03) << 6; |
| inst.instruction |= msb - 1; |
| } |
| |
| static void |
| do_t_bfx (void) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| |
| constraint (inst.operands[2].imm + inst.operands[3].imm > 32, |
| _("bit-field extends past end of register")); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; |
| inst.instruction |= (inst.operands[2].imm & 0x03) << 6; |
| inst.instruction |= inst.operands[3].imm - 1; |
| } |
| |
| /* ARM V5 Thumb BLX (argument parse) |
| BLX <target_addr> which is BLX(1) |
| BLX <Rm> which is BLX(2) |
| Unfortunately, there are two different opcodes for this mnemonic. |
| So, the insns[].value is not used, and the code here zaps values |
| into inst.instruction. |
| |
| ??? How to take advantage of the additional two bits of displacement |
| available in Thumb32 mode? Need new relocation? */ |
| |
| static void |
| do_t_blx (void) |
| { |
| set_pred_insn_type_last (); |
| |
| if (inst.operands[0].isreg) |
| { |
| constraint (inst.operands[0].reg == REG_PC, BAD_PC); |
| /* We have a register, so this is BLX(2). */ |
| inst.instruction |= inst.operands[0].reg << 3; |
| } |
| else |
| { |
| /* No register. This must be BLX(1). */ |
| inst.instruction = 0xf000e800; |
| encode_branch (BFD_RELOC_THUMB_PCREL_BLX); |
| } |
| } |
| |
| static void |
| do_t_branch (void) |
| { |
| int opcode; |
| int cond; |
| bfd_reloc_code_real_type reloc; |
| |
| cond = inst.cond; |
| set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); |
| |
| if (in_pred_block ()) |
| { |
| /* Conditional branches inside IT blocks are encoded as unconditional |
| branches. */ |
| cond = COND_ALWAYS; |
| } |
| else |
| cond = inst.cond; |
| |
| if (cond != COND_ALWAYS) |
| opcode = T_MNEM_bcond; |
| else |
| opcode = inst.instruction; |
| |
| if (unified_syntax |
| && (inst.size_req == 4 |
| || (inst.size_req != 2 |
| && (inst.operands[0].hasreloc |
| || inst.relocs[0].exp.X_op == O_constant)))) |
| { |
| inst.instruction = THUMB_OP32(opcode); |
| if (cond == COND_ALWAYS) |
| reloc = BFD_RELOC_THUMB_PCREL_BRANCH25; |
| else |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2), |
| _("selected architecture does not support " |
| "wide conditional branch instruction")); |
| |
| gas_assert (cond != 0xF); |
| inst.instruction |= cond << 22; |
| reloc = BFD_RELOC_THUMB_PCREL_BRANCH20; |
| } |
| } |
| else |
| { |
| inst.instruction = THUMB_OP16(opcode); |
| if (cond == COND_ALWAYS) |
| reloc = BFD_RELOC_THUMB_PCREL_BRANCH12; |
| else |
| { |
| inst.instruction |= cond << 8; |
| reloc = BFD_RELOC_THUMB_PCREL_BRANCH9; |
| } |
| /* Allow section relaxation. */ |
| if (unified_syntax && inst.size_req != 2) |
| inst.relax = opcode; |
| } |
| inst.relocs[0].type = reloc; |
| inst.relocs[0].pc_rel = 1; |
| } |
| |
| /* Actually do the work for Thumb state bkpt and hlt. The only difference |
| between the two is the maximum immediate allowed - which is passed in |
| RANGE. */ |
| static void |
| do_t_bkpt_hlt1 (int range) |
| { |
| constraint (inst.cond != COND_ALWAYS, |
| _("instruction is always unconditional")); |
| if (inst.operands[0].present) |
| { |
| constraint (inst.operands[0].imm > range, |
| _("immediate value out of range")); |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| set_pred_insn_type (NEUTRAL_IT_INSN); |
| } |
| |
| static void |
| do_t_hlt (void) |
| { |
| do_t_bkpt_hlt1 (63); |
| } |
| |
| static void |
| do_t_bkpt (void) |
| { |
| do_t_bkpt_hlt1 (255); |
| } |
| |
| static void |
| do_t_branch23 (void) |
| { |
| set_pred_insn_type_last (); |
| encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23); |
| |
| /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in |
| this file. We used to simply ignore the PLT reloc type here -- |
| the branch encoding is now needed to deal with TLSCALL relocs. |
| So if we see a PLT reloc now, put it back to how it used to be to |
| keep the preexisting behaviour. */ |
| if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32) |
| inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23; |
| |
| #if defined(OBJ_COFF) |
| /* If the destination of the branch is a defined symbol which does not have |
| the THUMB_FUNC attribute, then we must be calling a function which has |
| the (interfacearm) attribute. We look for the Thumb entry point to that |
| function and change the branch to refer to that function instead. */ |
| if ( inst.relocs[0].exp.X_op == O_symbol |
| && inst.relocs[0].exp.X_add_symbol != NULL |
| && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol) |
| && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol)) |
| inst.relocs[0].exp.X_add_symbol |
| = find_real_start (inst.relocs[0].exp.X_add_symbol); |
| #endif |
| } |
| |
| static void |
| do_t_bx (void) |
| { |
| set_pred_insn_type_last (); |
| inst.instruction |= inst.operands[0].reg << 3; |
| /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc |
| should cause the alignment to be checked once it is known. This is |
| because BX PC only works if the instruction is word aligned. */ |
| } |
| |
| static void |
| do_t_bxj (void) |
| { |
| int Rm; |
| |
| set_pred_insn_type_last (); |
| Rm = inst.operands[0].reg; |
| reject_bad_reg (Rm); |
| inst.instruction |= Rm << 16; |
| } |
| |
| static void |
| do_t_clz (void) |
| { |
| unsigned Rd; |
| unsigned Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rm << 16; |
| inst.instruction |= Rm; |
| } |
| |
| /* For the Armv8.1-M conditional instructions. */ |
| static void |
| do_t_cond (void) |
| { |
| unsigned Rd, Rn, Rm; |
| signed int cond; |
| |
| constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| |
| Rd = inst.operands[0].reg; |
| switch (inst.instruction) |
| { |
| case T_MNEM_csinc: |
| case T_MNEM_csinv: |
| case T_MNEM_csneg: |
| case T_MNEM_csel: |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| cond = inst.operands[3].imm; |
| constraint (Rn == REG_SP, BAD_SP); |
| constraint (Rm == REG_SP, BAD_SP); |
| break; |
| |
| case T_MNEM_cinc: |
| case T_MNEM_cinv: |
| case T_MNEM_cneg: |
| Rn = inst.operands[1].reg; |
| cond = inst.operands[2].imm; |
| /* Invert the last bit to invert the cond. */ |
| cond = TOGGLE_BIT (cond, 0); |
| constraint (Rn == REG_SP, BAD_SP); |
| Rm = Rn; |
| break; |
| |
| case T_MNEM_csetm: |
| case T_MNEM_cset: |
| cond = inst.operands[1].imm; |
| /* Invert the last bit to invert the cond. */ |
| cond = TOGGLE_BIT (cond, 0); |
| Rn = REG_PC; |
| Rm = REG_PC; |
| break; |
| |
| default: abort (); |
| } |
| |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| inst.instruction |= cond << 4; |
| } |
| |
| static void |
| do_t_csdb (void) |
| { |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| } |
| |
| static void |
| do_t_cps (void) |
| { |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| static void |
| do_t_cpsi (void) |
| { |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| if (unified_syntax |
| && (inst.operands[1].present || inst.size_req == 4) |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) |
| { |
| unsigned int imod = (inst.instruction & 0x0030) >> 4; |
| inst.instruction = 0xf3af8000; |
| inst.instruction |= imod << 9; |
| inst.instruction |= inst.operands[0].imm << 5; |
| if (inst.operands[1].present) |
| inst.instruction |= 0x100 | inst.operands[1].imm; |
| } |
| else |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1) |
| && (inst.operands[0].imm & 4), |
| _("selected processor does not support 'A' form " |
| "of this instruction")); |
| constraint (inst.operands[1].present || inst.size_req == 4, |
| _("Thumb does not support the 2-argument " |
| "form of this instruction")); |
| inst.instruction |= inst.operands[0].imm; |
| } |
| } |
| |
| /* THUMB CPY instruction (argument parse). */ |
| |
| static void |
| do_t_cpy (void) |
| { |
| if (inst.size_req == 4) |
| { |
| inst.instruction = THUMB_OP32 (T_MNEM_mov); |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg; |
| } |
| else |
| { |
| inst.instruction |= (inst.operands[0].reg & 0x8) << 4; |
| inst.instruction |= (inst.operands[0].reg & 0x7); |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| } |
| |
| static void |
| do_t_cbz (void) |
| { |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| constraint (inst.operands[0].reg > 7, BAD_HIREG); |
| inst.instruction |= inst.operands[0].reg; |
| inst.relocs[0].pc_rel = 1; |
| inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7; |
| } |
| |
| static void |
| do_t_dbg (void) |
| { |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| static void |
| do_t_div (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rn = (inst.operands[1].present |
| ? inst.operands[1].reg : Rd); |
| Rm = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_hint (void) |
| { |
| if (unified_syntax && inst.size_req == 4) |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| else |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| } |
| |
| static void |
| do_t_it (void) |
| { |
| unsigned int cond = inst.operands[0].imm; |
| |
| set_pred_insn_type (IT_INSN); |
| now_pred.mask = (inst.instruction & 0xf) | 0x10; |
| now_pred.cc = cond; |
| now_pred.warn_deprecated = false; |
| now_pred.type = SCALAR_PRED; |
| |
| /* If the condition is a negative condition, invert the mask. */ |
| if ((cond & 0x1) == 0x0) |
| { |
| unsigned int mask = inst.instruction & 0x000f; |
| |
| if ((mask & 0x7) == 0) |
| { |
| /* No conversion needed. */ |
| now_pred.block_length = 1; |
| } |
| else if ((mask & 0x3) == 0) |
| { |
| mask ^= 0x8; |
| now_pred.block_length = 2; |
| } |
| else if ((mask & 0x1) == 0) |
| { |
| mask ^= 0xC; |
| now_pred.block_length = 3; |
| } |
| else |
| { |
| mask ^= 0xE; |
| now_pred.block_length = 4; |
| } |
| |
| inst.instruction &= 0xfff0; |
| inst.instruction |= mask; |
| } |
| |
| inst.instruction |= cond << 4; |
| } |
| |
| /* Helper function used for both push/pop and ldm/stm. */ |
| static void |
| encode_thumb2_multi (bool do_io, int base, unsigned mask, |
| bool writeback) |
| { |
| bool load, store; |
| |
| gas_assert (base != -1 || !do_io); |
| load = do_io && ((inst.instruction & (1 << 20)) != 0); |
| store = do_io && !load; |
| |
| if (mask & (1 << 13)) |
| inst.error = _("SP not allowed in register list"); |
| |
| if (do_io && (mask & (1 << base)) != 0 |
| && writeback) |
| inst.error = _("having the base register in the register list when " |
| "using write back is UNPREDICTABLE"); |
| |
| if (load) |
| { |
| if (mask & (1 << 15)) |
| { |
| if (mask & (1 << 14)) |
| inst.error = _("LR and PC should not both be in register list"); |
| else |
| set_pred_insn_type_last (); |
| } |
| } |
| else if (store) |
| { |
| if (mask & (1 << 15)) |
| inst.error = _("PC not allowed in register list"); |
| } |
| |
| if (do_io && ((mask & (mask - 1)) == 0)) |
| { |
| /* Single register transfers implemented as str/ldr. */ |
| if (writeback) |
| { |
| if (inst.instruction & (1 << 23)) |
| inst.instruction = 0x00000b04; /* ia! -> [base], #4 */ |
| else |
| inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */ |
| } |
| else |
| { |
| if (inst.instruction & (1 << 23)) |
| inst.instruction = 0x00800000; /* ia -> [base] */ |
| else |
| inst.instruction = 0x00000c04; /* db -> [base, #-4] */ |
| } |
| |
| inst.instruction |= 0xf8400000; |
| if (load) |
| inst.instruction |= 0x00100000; |
| |
| mask = ffs (mask) - 1; |
| mask <<= 12; |
| } |
| else if (writeback) |
| inst.instruction |= WRITE_BACK; |
| |
| inst.instruction |= mask; |
| if (do_io) |
| inst.instruction |= base << 16; |
| } |
| |
| static void |
| do_t_ldmstm (void) |
| { |
| /* This really doesn't seem worth it. */ |
| constraint (inst.relocs[0].type != BFD_RELOC_UNUSED, |
| _("expression too complex")); |
| constraint (inst.operands[1].writeback, |
| _("Thumb load/store multiple does not support {reglist}^")); |
| |
| if (unified_syntax) |
| { |
| bool narrow; |
| unsigned mask; |
| |
| narrow = false; |
| /* See if we can use a 16-bit instruction. */ |
| if (inst.instruction < 0xffff /* not ldmdb/stmdb */ |
| && inst.size_req != 4 |
| && !(inst.operands[1].imm & ~0xff)) |
| { |
| mask = 1 << inst.operands[0].reg; |
| |
| if (inst.operands[0].reg <= 7) |
| { |
| if (inst.instruction == T_MNEM_stmia |
| ? inst.operands[0].writeback |
| : (inst.operands[0].writeback |
| == !(inst.operands[1].imm & mask))) |
| { |
| if (inst.instruction == T_MNEM_stmia |
| && (inst.operands[1].imm & mask) |
| && (inst.operands[1].imm & (mask - 1))) |
| as_warn (_("value stored for r%d is UNKNOWN"), |
| inst.operands[0].reg); |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].imm; |
| narrow = true; |
| } |
| else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0) |
| { |
| /* This means 1 register in reg list one of 3 situations: |
| 1. Instruction is stmia, but without writeback. |
| 2. lmdia without writeback, but with Rn not in |
| reglist. |
| 3. ldmia with writeback, but with Rn in reglist. |
| Case 3 is UNPREDICTABLE behaviour, so we handle |
| case 1 and 2 which can be converted into a 16-bit |
| str or ldr. The SP cases are handled below. */ |
| unsigned long opcode; |
| /* First, record an error for Case 3. */ |
| if (inst.operands[1].imm & mask |
| && inst.operands[0].writeback) |
| inst.error = |
| _("having the base register in the register list when " |
| "using write back is UNPREDICTABLE"); |
| |
| opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str |
| : T_MNEM_ldr); |
| inst.instruction = THUMB_OP16 (opcode); |
| inst.instruction |= inst.operands[0].reg << 3; |
| inst.instruction |= (ffs (inst.operands[1].imm)-1); |
| narrow = true; |
| } |
| } |
| else if (inst.operands[0] .reg == REG_SP) |
| { |
| if (inst.operands[0].writeback) |
| { |
| inst.instruction = |
| THUMB_OP16 (inst.instruction == T_MNEM_stmia |
| ? T_MNEM_push : T_MNEM_pop); |
| inst.instruction |= inst.operands[1].imm; |
| narrow = true; |
| } |
| else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0) |
| { |
| inst.instruction = |
| THUMB_OP16 (inst.instruction == T_MNEM_stmia |
| ? T_MNEM_str_sp : T_MNEM_ldr_sp); |
| inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8); |
| narrow = true; |
| } |
| } |
| } |
| |
| if (!narrow) |
| { |
| if (inst.instruction < 0xffff) |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| |
| encode_thumb2_multi (true /* do_io */, inst.operands[0].reg, |
| inst.operands[1].imm, |
| inst.operands[0].writeback); |
| } |
| } |
| else |
| { |
| constraint (inst.operands[0].reg > 7 |
| || (inst.operands[1].imm & ~0xff), BAD_HIREG); |
| constraint (inst.instruction != T_MNEM_ldmia |
| && inst.instruction != T_MNEM_stmia, |
| _("Thumb-2 instruction only valid in unified syntax")); |
| if (inst.instruction == T_MNEM_stmia) |
| { |
| if (!inst.operands[0].writeback) |
| as_warn (_("this instruction will write back the base register")); |
| if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) |
| && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) |
| as_warn (_("value stored for r%d is UNKNOWN"), |
| inst.operands[0].reg); |
| } |
| else |
| { |
| if (!inst.operands[0].writeback |
| && !(inst.operands[1].imm & (1 << inst.operands[0].reg))) |
| as_warn (_("this instruction will write back the base register")); |
| else if (inst.operands[0].writeback |
| && (inst.operands[1].imm & (1 << inst.operands[0].reg))) |
| as_warn (_("this instruction will not write back the base register")); |
| } |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].imm; |
| } |
| } |
| |
| static void |
| do_t_ldrex (void) |
| { |
| constraint (!inst.operands[1].isreg || !inst.operands[1].preind |
| || inst.operands[1].postind || inst.operands[1].writeback |
| || inst.operands[1].immisreg || inst.operands[1].shifted |
| || inst.operands[1].negative, |
| BAD_ADDR_MODE); |
| |
| constraint ((inst.operands[1].reg == REG_PC), BAD_PC); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8; |
| } |
| |
| static void |
| do_t_ldrexd (void) |
| { |
| if (!inst.operands[1].present) |
| { |
| constraint (inst.operands[0].reg == REG_LR, |
| _("r14 not allowed as first register " |
| "when second register is omitted")); |
| inst.operands[1].reg = inst.operands[0].reg + 1; |
| } |
| constraint (inst.operands[0].reg == inst.operands[1].reg, |
| BAD_OVERLAP); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 8; |
| inst.instruction |= inst.operands[2].reg << 16; |
| } |
| |
| static void |
| do_t_ldst (void) |
| { |
| unsigned long opcode; |
| int Rn; |
| |
| if (inst.operands[0].isreg |
| && !inst.operands[0].preind |
| && inst.operands[0].reg == REG_PC) |
| set_pred_insn_type_last (); |
| |
| opcode = inst.instruction; |
| if (unified_syntax) |
| { |
| if (!inst.operands[1].isreg) |
| { |
| if (opcode <= 0xffff) |
| inst.instruction = THUMB_OP32 (opcode); |
| if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false)) |
| return; |
| } |
| if (inst.operands[1].isreg |
| && !inst.operands[1].writeback |
| && !inst.operands[1].shifted && !inst.operands[1].postind |
| && !inst.operands[1].negative && inst.operands[0].reg <= 7 |
| && opcode <= 0xffff |
| && inst.size_req != 4) |
| { |
| /* Insn may have a 16-bit form. */ |
| Rn = inst.operands[1].reg; |
| if (inst.operands[1].immisreg) |
| { |
| inst.instruction = THUMB_OP16 (opcode); |
| /* [Rn, Rik] */ |
| if (Rn <= 7 && inst.operands[1].imm <= 7) |
| goto op16; |
| else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str) |
| reject_bad_reg (inst.operands[1].imm); |
| } |
| else if ((Rn <= 7 && opcode != T_MNEM_ldrsh |
| && opcode != T_MNEM_ldrsb) |
| || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr) |
| || (Rn == REG_SP && opcode == T_MNEM_str)) |
| { |
| /* [Rn, #const] */ |
| if (Rn > 7) |
| { |
| if (Rn == REG_PC) |
| { |
| if (inst.relocs[0].pc_rel) |
| opcode = T_MNEM_ldr_pc2; |
| else |
| opcode = T_MNEM_ldr_pc; |
| } |
| else |
| { |
| if (opcode == T_MNEM_ldr) |
| opcode = T_MNEM_ldr_sp; |
| else |
| opcode = T_MNEM_str_sp; |
| } |
| inst.instruction = inst.operands[0].reg << 8; |
| } |
| else |
| { |
| inst.instruction = inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| inst.instruction |= THUMB_OP16 (opcode); |
| if (inst.size_req == 2) |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET; |
| else |
| inst.relax = opcode; |
| return; |
| } |
| } |
| /* Definitely a 32-bit variant. */ |
| |
| /* Warning for Erratum 752419. */ |
| if (opcode == T_MNEM_ldr |
| && inst.operands[0].reg == REG_SP |
| && inst.operands[1].writeback == 1 |
| && !inst.operands[1].immisreg) |
| { |
| if (no_cpu_selected () |
| || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a) |
| && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r))) |
| as_warn (_("This instruction may be unpredictable " |
| "if executed on M-profile cores " |
| "with interrupts enabled.")); |
| } |
| |
| /* Do some validations regarding addressing modes. */ |
| if (inst.operands[1].immisreg) |
| reject_bad_reg (inst.operands[1].imm); |
| |
| constraint (inst.operands[1].writeback == 1 |
| && inst.operands[0].reg == inst.operands[1].reg, |
| BAD_OVERLAP); |
| |
| inst.instruction = THUMB_OP32 (opcode); |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_thumb32_addr_mode (1, /*is_t=*/false, /*is_d=*/false); |
| check_ldr_r15_aligned (); |
| return; |
| } |
| |
| constraint (inst.operands[0].reg > 7, BAD_HIREG); |
| |
| if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb) |
| { |
| /* Only [Rn,Rm] is acceptable. */ |
| constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG); |
| constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg |
| || inst.operands[1].postind || inst.operands[1].shifted |
| || inst.operands[1].negative, |
| _("Thumb does not support this addressing mode")); |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| goto op16; |
| } |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| if (!inst.operands[1].isreg) |
| if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false)) |
| return; |
| |
| constraint (!inst.operands[1].preind |
| || inst.operands[1].shifted |
| || inst.operands[1].writeback, |
| _("Thumb does not support this addressing mode")); |
| if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP) |
| { |
| constraint (inst.instruction & 0x0600, |
| _("byte or halfword not valid for base register")); |
| constraint (inst.operands[1].reg == REG_PC |
| && !(inst.instruction & THUMB_LOAD_BIT), |
| _("r15 based store not allowed")); |
| constraint (inst.operands[1].immisreg, |
| _("invalid base register for register offset")); |
| |
| if (inst.operands[1].reg == REG_PC) |
| inst.instruction = T_OPCODE_LDR_PC; |
| else if (inst.instruction & THUMB_LOAD_BIT) |
| inst.instruction = T_OPCODE_LDR_SP; |
| else |
| inst.instruction = T_OPCODE_STR_SP; |
| |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET; |
| return; |
| } |
| |
| constraint (inst.operands[1].reg > 7, BAD_HIREG); |
| if (!inst.operands[1].immisreg) |
| { |
| /* Immediate offset. */ |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET; |
| return; |
| } |
| |
| /* Register offset. */ |
| constraint (inst.operands[1].imm > 7, BAD_HIREG); |
| constraint (inst.operands[1].negative, |
| _("Thumb does not support this addressing mode")); |
| |
| op16: |
| switch (inst.instruction) |
| { |
| case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break; |
| case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break; |
| case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break; |
| case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break; |
| case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break; |
| case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break; |
| case 0x5600 /* ldrsb */: |
| case 0x5e00 /* ldrsh */: break; |
| default: abort (); |
| } |
| |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| inst.instruction |= inst.operands[1].imm << 6; |
| } |
| |
| static void |
| do_t_ldstd (void) |
| { |
| if (!inst.operands[1].present) |
| { |
| inst.operands[1].reg = inst.operands[0].reg + 1; |
| constraint (inst.operands[0].reg == REG_LR, |
| _("r14 not allowed here")); |
| constraint (inst.operands[0].reg == REG_R12, |
| _("r12 not allowed here")); |
| } |
| |
| if (inst.operands[2].writeback |
| && (inst.operands[0].reg == inst.operands[2].reg |
| || inst.operands[1].reg == inst.operands[2].reg)) |
| as_warn (_("base register written back, and overlaps " |
| "one of transfer registers")); |
| |
| inst.instruction |= inst.operands[0].reg << 12; |
| inst.instruction |= inst.operands[1].reg << 8; |
| encode_thumb32_addr_mode (2, /*is_t=*/false, /*is_d=*/true); |
| } |
| |
| static void |
| do_t_ldstt (void) |
| { |
| inst.instruction |= inst.operands[0].reg << 12; |
| encode_thumb32_addr_mode (1, /*is_t=*/true, /*is_d=*/false); |
| } |
| |
| static void |
| do_t_mla (void) |
| { |
| unsigned Rd, Rn, Rm, Ra; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| Ra = inst.operands[3].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| reject_bad_reg (Ra); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| inst.instruction |= Ra << 12; |
| } |
| |
| static void |
| do_t_mlal (void) |
| { |
| unsigned RdLo, RdHi, Rn, Rm; |
| |
| RdLo = inst.operands[0].reg; |
| RdHi = inst.operands[1].reg; |
| Rn = inst.operands[2].reg; |
| Rm = inst.operands[3].reg; |
| |
| reject_bad_reg (RdLo); |
| reject_bad_reg (RdHi); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= RdLo << 12; |
| inst.instruction |= RdHi << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_mov_cmp (void) |
| { |
| unsigned Rn, Rm; |
| |
| Rn = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| if (Rn == REG_PC) |
| set_pred_insn_type_last (); |
| |
| if (unified_syntax) |
| { |
| int r0off = (inst.instruction == T_MNEM_mov |
| || inst.instruction == T_MNEM_movs) ? 8 : 16; |
| unsigned long opcode; |
| bool narrow; |
| bool low_regs; |
| |
| low_regs = (Rn <= 7 && Rm <= 7); |
| opcode = inst.instruction; |
| if (in_pred_block ()) |
| narrow = opcode != T_MNEM_movs; |
| else |
| narrow = opcode != T_MNEM_movs || low_regs; |
| if (inst.size_req == 4 |
| || inst.operands[1].shifted) |
| narrow = false; |
| |
| /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */ |
| if (opcode == T_MNEM_movs && inst.operands[1].isreg |
| && !inst.operands[1].shifted |
| && Rn == REG_PC |
| && Rm == REG_LR) |
| { |
| inst.instruction = T2_SUBS_PC_LR; |
| return; |
| } |
| |
| if (opcode == T_MNEM_cmp) |
| { |
| constraint (Rn == REG_PC, BAD_PC); |
| if (narrow) |
| { |
| /* In the Thumb-2 ISA, use of R13 as Rm is deprecated, |
| but valid. */ |
| warn_deprecated_sp (Rm); |
| /* R15 was documented as a valid choice for Rm in ARMv6, |
| but as UNPREDICTABLE in ARMv7. ARM's proprietary |
| tools reject R15, so we do too. */ |
| constraint (Rm == REG_PC, BAD_PC); |
| } |
| else |
| reject_bad_reg (Rm); |
| } |
| else if (opcode == T_MNEM_mov |
| || opcode == T_MNEM_movs) |
| { |
| if (inst.operands[1].isreg) |
| { |
| if (opcode == T_MNEM_movs) |
| { |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| } |
| else if (narrow) |
| { |
| /* This is mov.n. */ |
| if ((Rn == REG_SP || Rn == REG_PC) |
| && (Rm == REG_SP || Rm == REG_PC)) |
| { |
| as_tsktsk (_("Use of r%u as a source register is " |
| "deprecated when r%u is the destination " |
| "register."), Rm, Rn); |
| } |
| } |
| else |
| { |
| /* This is mov.w. */ |
| constraint (Rn == REG_PC, BAD_PC); |
| constraint (Rm == REG_PC, BAD_PC); |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP); |
| } |
| } |
| else |
| reject_bad_reg (Rn); |
| } |
| |
| if (!inst.operands[1].isreg) |
| { |
| /* Immediate operand. */ |
| if (!in_pred_block () && opcode == T_MNEM_mov) |
| narrow = 0; |
| if (low_regs && narrow) |
| { |
| inst.instruction = THUMB_OP16 (opcode); |
| inst.instruction |= Rn << 8; |
| if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC |
| || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) |
| { |
| if (inst.size_req == 2) |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM; |
| else |
| inst.relax = opcode; |
| } |
| } |
| else |
| { |
| constraint ((inst.relocs[0].type |
| >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC) |
| && (inst.relocs[0].type |
| <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) , |
| THUMB1_RELOC_ONLY); |
| |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.instruction |= Rn << r0off; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| } |
| else if (inst.operands[1].shifted && inst.operands[1].immisreg |
| && (inst.instruction == T_MNEM_mov |
| || inst.instruction == T_MNEM_movs)) |
| { |
| /* Register shifts are encoded as separate shift instructions. */ |
| bool flags = (inst.instruction == T_MNEM_movs); |
| |
| if (in_pred_block ()) |
| narrow = !flags; |
| else |
| narrow = flags; |
| |
| if (inst.size_req == 4) |
| narrow = false; |
| |
| if (!low_regs || inst.operands[1].imm > 7) |
| narrow = false; |
| |
| if (Rn != Rm) |
| narrow = false; |
| |
| switch (inst.operands[1].shift_kind) |
| { |
| case SHIFT_LSL: |
| opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl); |
| break; |
| case SHIFT_ASR: |
| opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr); |
| break; |
| case SHIFT_LSR: |
| opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr); |
| break; |
| case SHIFT_ROR: |
| opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror); |
| break; |
| default: |
| abort (); |
| } |
| |
| inst.instruction = opcode; |
| if (narrow) |
| { |
| inst.instruction |= Rn; |
| inst.instruction |= inst.operands[1].imm << 3; |
| } |
| else |
| { |
| if (flags) |
| inst.instruction |= CONDS_BIT; |
| |
| inst.instruction |= Rn << 8; |
| inst.instruction |= Rm << 16; |
| inst.instruction |= inst.operands[1].imm; |
| } |
| } |
| else if (!narrow) |
| { |
| /* Some mov with immediate shift have narrow variants. |
| Register shifts are handled above. */ |
| if (low_regs && inst.operands[1].shifted |
| && (inst.instruction == T_MNEM_mov |
| || inst.instruction == T_MNEM_movs)) |
| { |
| if (in_pred_block ()) |
| narrow = (inst.instruction == T_MNEM_mov); |
| else |
| narrow = (inst.instruction == T_MNEM_movs); |
| } |
| |
| if (narrow) |
| { |
| switch (inst.operands[1].shift_kind) |
| { |
| case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; |
| case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; |
| case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; |
| default: narrow = false; break; |
| } |
| } |
| |
| if (narrow) |
| { |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT; |
| } |
| else |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rn << r0off; |
| encode_thumb32_shifted_operand (1); |
| } |
| } |
| else |
| switch (inst.instruction) |
| { |
| case T_MNEM_mov: |
| /* In v4t or v5t a move of two lowregs produces unpredictable |
| results. Don't allow this. */ |
| if (low_regs) |
| { |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6), |
| "MOV Rd, Rs with two low registers is not " |
| "permitted on this architecture"); |
| ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| arm_ext_v6); |
| } |
| |
| inst.instruction = T_OPCODE_MOV_HR; |
| inst.instruction |= (Rn & 0x8) << 4; |
| inst.instruction |= (Rn & 0x7); |
| inst.instruction |= Rm << 3; |
| break; |
| |
| case T_MNEM_movs: |
| /* We know we have low registers at this point. |
| Generate LSLS Rd, Rs, #0. */ |
| inst.instruction = T_OPCODE_LSL_I; |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| break; |
| |
| case T_MNEM_cmp: |
| if (low_regs) |
| { |
| inst.instruction = T_OPCODE_CMP_LR; |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| } |
| else |
| { |
| inst.instruction = T_OPCODE_CMP_HR; |
| inst.instruction |= (Rn & 0x8) << 4; |
| inst.instruction |= (Rn & 0x7); |
| inst.instruction |= Rm << 3; |
| } |
| break; |
| } |
| return; |
| } |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| |
| /* PR 10443: Do not silently ignore shifted operands. */ |
| constraint (inst.operands[1].shifted, |
| _("shifts in CMP/MOV instructions are only supported in unified syntax")); |
| |
| if (inst.operands[1].isreg) |
| { |
| if (Rn < 8 && Rm < 8) |
| { |
| /* A move of two lowregs is encoded as ADD Rd, Rs, #0 |
| since a MOV instruction produces unpredictable results. */ |
| if (inst.instruction == T_OPCODE_MOV_I8) |
| inst.instruction = T_OPCODE_ADD_I3; |
| else |
| inst.instruction = T_OPCODE_CMP_LR; |
| |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| } |
| else |
| { |
| if (inst.instruction == T_OPCODE_MOV_I8) |
| inst.instruction = T_OPCODE_MOV_HR; |
| else |
| inst.instruction = T_OPCODE_CMP_HR; |
| do_t_cpy (); |
| } |
| } |
| else |
| { |
| constraint (Rn > 7, |
| _("only lo regs allowed with immediate")); |
| inst.instruction |= Rn << 8; |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM; |
| } |
| } |
| |
| static void |
| do_t_mov16 (void) |
| { |
| unsigned Rd; |
| bfd_vma imm; |
| bool top; |
| |
| top = (inst.instruction & 0x00800000) != 0; |
| if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW) |
| { |
| constraint (top, _(":lower16: not allowed in this instruction")); |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW; |
| } |
| else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT) |
| { |
| constraint (!top, _(":upper16: not allowed in this instruction")); |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT; |
| } |
| |
| Rd = inst.operands[0].reg; |
| reject_bad_reg (Rd); |
| |
| inst.instruction |= Rd << 8; |
| if (inst.relocs[0].type == BFD_RELOC_UNUSED) |
| { |
| imm = inst.relocs[0].exp.X_add_number; |
| inst.instruction |= (imm & 0xf000) << 4; |
| inst.instruction |= (imm & 0x0800) << 15; |
| inst.instruction |= (imm & 0x0700) << 4; |
| inst.instruction |= (imm & 0x00ff); |
| } |
| } |
| |
| static void |
| do_t_mvn_tst (void) |
| { |
| unsigned Rn, Rm; |
| |
| Rn = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| if (inst.instruction == T_MNEM_cmp |
| || inst.instruction == T_MNEM_cmn) |
| constraint (Rn == REG_PC, BAD_PC); |
| else |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| if (unified_syntax) |
| { |
| int r0off = (inst.instruction == T_MNEM_mvn |
| || inst.instruction == T_MNEM_mvns) ? 8 : 16; |
| bool narrow; |
| |
| if (inst.size_req == 4 |
| || inst.instruction > 0xffff |
| || inst.operands[1].shifted |
| || Rn > 7 || Rm > 7) |
| narrow = false; |
| else if (inst.instruction == T_MNEM_cmn |
| || inst.instruction == T_MNEM_tst) |
| narrow = true; |
| else if (THUMB_SETS_FLAGS (inst.instruction)) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| |
| if (!inst.operands[1].isreg) |
| { |
| /* For an immediate, we always generate a 32-bit opcode; |
| section relaxation will shrink it later if possible. */ |
| if (inst.instruction < 0xffff) |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.instruction |= Rn << r0off; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| else |
| { |
| /* See if we can do this with a 16-bit instruction. */ |
| if (narrow) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| } |
| else |
| { |
| constraint (inst.operands[1].shifted |
| && inst.operands[1].immisreg, |
| _("shift must be constant")); |
| if (inst.instruction < 0xffff) |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rn << r0off; |
| encode_thumb32_shifted_operand (1); |
| } |
| } |
| } |
| else |
| { |
| constraint (inst.instruction > 0xffff |
| || inst.instruction == T_MNEM_mvns, BAD_THUMB32); |
| constraint (!inst.operands[1].isreg || inst.operands[1].shifted, |
| _("unshifted register required")); |
| constraint (Rn > 7 || Rm > 7, |
| BAD_HIREG); |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rn; |
| inst.instruction |= Rm << 3; |
| } |
| } |
| |
| static void |
| do_t_mrs (void) |
| { |
| unsigned Rd; |
| |
| if (do_vfp_nsyn_mrs () == SUCCESS) |
| return; |
| |
| Rd = inst.operands[0].reg; |
| reject_bad_reg (Rd); |
| inst.instruction |= Rd << 8; |
| |
| if (inst.operands[1].isreg) |
| { |
| unsigned br = inst.operands[1].reg; |
| if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000)) |
| as_bad (_("bad register for mrs")); |
| |
| inst.instruction |= br & (0xf << 16); |
| inst.instruction |= (br & 0x300) >> 4; |
| inst.instruction |= (br & SPSR_BIT) >> 2; |
| } |
| else |
| { |
| int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); |
| |
| if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m)) |
| { |
| /* PR gas/12698: The constraint is only applied for m_profile. |
| If the user has specified -march=all, we want to ignore it as |
| we are building for any CPU type, including non-m variants. */ |
| bool m_profile = |
| !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any); |
| constraint ((flags != 0) && m_profile, _("selected processor does " |
| "not support requested special purpose register")); |
| } |
| else |
| /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile |
| devices). */ |
| constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f), |
| _("'APSR', 'CPSR' or 'SPSR' expected")); |
| |
| inst.instruction |= (flags & SPSR_BIT) >> 2; |
| inst.instruction |= inst.operands[1].imm & 0xff; |
| inst.instruction |= 0xf0000; |
| } |
| } |
| |
| static void |
| do_t_msr (void) |
| { |
| int flags; |
| unsigned Rn; |
| |
| if (do_vfp_nsyn_msr () == SUCCESS) |
| return; |
| |
| constraint (!inst.operands[1].isreg, |
| _("Thumb encoding does not support an immediate here")); |
| |
| if (inst.operands[0].isreg) |
| flags = (int)(inst.operands[0].reg); |
| else |
| flags = inst.operands[0].imm; |
| |
| if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m)) |
| { |
| int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); |
| |
| /* PR gas/12698: The constraint is only applied for m_profile. |
| If the user has specified -march=all, we want to ignore it as |
| we are building for any CPU type, including non-m variants. */ |
| bool m_profile = |
| !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any); |
| constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp) |
| && (bits & ~(PSR_s | PSR_f)) != 0) |
| || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp) |
| && bits != PSR_f)) && m_profile, |
| _("selected processor does not support requested special " |
| "purpose register")); |
| } |
| else |
| constraint ((flags & 0xff) != 0, _("selected processor does not support " |
| "requested special purpose register")); |
| |
| Rn = inst.operands[1].reg; |
| reject_bad_reg (Rn); |
| |
| inst.instruction |= (flags & SPSR_BIT) >> 2; |
| inst.instruction |= (flags & 0xf0000) >> 8; |
| inst.instruction |= (flags & 0x300) >> 4; |
| inst.instruction |= (flags & 0xff); |
| inst.instruction |= Rn << 16; |
| } |
| |
| static void |
| do_t_mul (void) |
| { |
| bool narrow; |
| unsigned Rd, Rn, Rm; |
| |
| if (!inst.operands[2].present) |
| inst.operands[2].reg = inst.operands[0].reg; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| |
| if (unified_syntax) |
| { |
| if (inst.size_req == 4 |
| || (Rd != Rn |
| && Rd != Rm) |
| || Rn > 7 |
| || Rm > 7) |
| narrow = false; |
| else if (inst.instruction == T_MNEM_muls) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| } |
| else |
| { |
| constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32); |
| constraint (Rn > 7 || Rm > 7, |
| BAD_HIREG); |
| narrow = true; |
| } |
| |
| if (narrow) |
| { |
| /* 16-bit MULS/Conditional MUL. */ |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| |
| if (Rd == Rn) |
| inst.instruction |= Rm << 3; |
| else if (Rd == Rm) |
| inst.instruction |= Rn << 3; |
| else |
| constraint (1, _("dest must overlap one source register")); |
| } |
| else |
| { |
| constraint (inst.instruction != T_MNEM_mul, |
| _("Thumb-2 MUL must not set flags")); |
| /* 32-bit MUL. */ |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm << 0; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| } |
| } |
| |
| static void |
| do_t_mull (void) |
| { |
| unsigned RdLo, RdHi, Rn, Rm; |
| |
| RdLo = inst.operands[0].reg; |
| RdHi = inst.operands[1].reg; |
| Rn = inst.operands[2].reg; |
| Rm = inst.operands[3].reg; |
| |
| reject_bad_reg (RdLo); |
| reject_bad_reg (RdHi); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= RdLo << 12; |
| inst.instruction |= RdHi << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| |
| if (RdLo == RdHi) |
| as_tsktsk (_("rdhi and rdlo must be different")); |
| } |
| |
| static void |
| do_t_nop (void) |
| { |
| set_pred_insn_type (NEUTRAL_IT_INSN); |
| |
| if (unified_syntax) |
| { |
| if (inst.size_req == 4 || inst.operands[0].imm > 15) |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= inst.operands[0].imm; |
| } |
| else |
| { |
| /* PR9722: Check for Thumb2 availability before |
| generating a thumb2 nop instruction. */ |
| if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].imm << 4; |
| } |
| else |
| inst.instruction = 0x46c0; |
| } |
| } |
| else |
| { |
| constraint (inst.operands[0].present, |
| _("Thumb does not support NOP with hints")); |
| inst.instruction = 0x46c0; |
| } |
| } |
| |
| static void |
| do_t_neg (void) |
| { |
| if (unified_syntax) |
| { |
| bool narrow; |
| |
| if (THUMB_SETS_FLAGS (inst.instruction)) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) |
| narrow = false; |
| if (inst.size_req == 4) |
| narrow = false; |
| |
| if (!narrow) |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg << 16; |
| } |
| else |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| } |
| else |
| { |
| constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, |
| BAD_HIREG); |
| constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| } |
| |
| static void |
| do_t_orn (void) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].present ? inst.operands[1].reg : Rd; |
| |
| reject_bad_reg (Rd); |
| /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */ |
| reject_bad_reg (Rn); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| |
| if (!inst.operands[2].isreg) |
| { |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| else |
| { |
| unsigned Rm; |
| |
| Rm = inst.operands[2].reg; |
| reject_bad_reg (Rm); |
| |
| constraint (inst.operands[2].shifted |
| && inst.operands[2].immisreg, |
| _("shift must be constant")); |
| encode_thumb32_shifted_operand (2); |
| } |
| } |
| |
| static void |
| do_t_pkhbt (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| if (inst.operands[3].present) |
| { |
| unsigned int val = inst.relocs[0].exp.X_add_number; |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| inst.instruction |= (val & 0x1c) << 10; |
| inst.instruction |= (val & 0x03) << 6; |
| } |
| } |
| |
| static void |
| do_t_pkhtb (void) |
| { |
| if (!inst.operands[3].present) |
| { |
| unsigned Rtmp; |
| |
| inst.instruction &= ~0x00000020; |
| |
| /* PR 10168. Swap the Rm and Rn registers. */ |
| Rtmp = inst.operands[1].reg; |
| inst.operands[1].reg = inst.operands[2].reg; |
| inst.operands[2].reg = Rtmp; |
| } |
| do_t_pkhbt (); |
| } |
| |
| static void |
| do_t_pld (void) |
| { |
| if (inst.operands[0].immisreg) |
| reject_bad_reg (inst.operands[0].imm); |
| |
| encode_thumb32_addr_mode (0, /*is_t=*/false, /*is_d=*/false); |
| } |
| |
| static void |
| do_t_push_pop (void) |
| { |
| unsigned mask; |
| |
| constraint (inst.operands[0].writeback, |
| _("push/pop do not support {reglist}^")); |
| constraint (inst.relocs[0].type != BFD_RELOC_UNUSED, |
| _("expression too complex")); |
| |
| mask = inst.operands[0].imm; |
| if (inst.size_req != 4 && (mask & ~0xff) == 0) |
| inst.instruction = THUMB_OP16 (inst.instruction) | mask; |
| else if (inst.size_req != 4 |
| && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push |
| ? REG_LR : REG_PC))) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= THUMB_PP_PC_LR; |
| inst.instruction |= mask & 0xff; |
| } |
| else if (unified_syntax) |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| encode_thumb2_multi (true /* do_io */, 13, mask, true); |
| } |
| else |
| { |
| inst.error = _("invalid register list to push/pop instruction"); |
| return; |
| } |
| } |
| |
| static void |
| do_t_clrm (void) |
| { |
| if (unified_syntax) |
| encode_thumb2_multi (false /* do_io */, -1, inst.operands[0].imm, false); |
| else |
| { |
| inst.error = _("invalid register list to push/pop instruction"); |
| return; |
| } |
| } |
| |
| static void |
| do_t_vscclrm (void) |
| { |
| if (inst.operands[0].issingle) |
| { |
| inst.instruction |= (inst.operands[0].reg & 0x1) << 22; |
| inst.instruction |= (inst.operands[0].reg & 0x1e) << 11; |
| inst.instruction |= inst.operands[0].imm; |
| } |
| else |
| { |
| inst.instruction |= (inst.operands[0].reg & 0x10) << 18; |
| inst.instruction |= (inst.operands[0].reg & 0xf) << 12; |
| inst.instruction |= 1 << 8; |
| inst.instruction |= inst.operands[0].imm << 1; |
| } |
| } |
| |
| static void |
| do_t_rbit (void) |
| { |
| unsigned Rd, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rm << 16; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_rev (void) |
| { |
| unsigned Rd, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rm); |
| |
| if (Rd <= 7 && Rm <= 7 |
| && inst.size_req != 4) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rm << 3; |
| } |
| else if (unified_syntax) |
| { |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rm << 16; |
| inst.instruction |= Rm; |
| } |
| else |
| inst.error = BAD_HIREG; |
| } |
| |
| static void |
| do_t_rrx (void) |
| { |
| unsigned Rd, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_rsb (void) |
| { |
| unsigned Rd, Rs; |
| |
| Rd = inst.operands[0].reg; |
| Rs = (inst.operands[1].present |
| ? inst.operands[1].reg /* Rd, Rs, foo */ |
| : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rs); |
| if (inst.operands[2].isreg) |
| reject_bad_reg (inst.operands[2].reg); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rs << 16; |
| if (!inst.operands[2].isreg) |
| { |
| bool narrow; |
| |
| if ((inst.instruction & 0x00100000) != 0) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| |
| if (Rd > 7 || Rs > 7) |
| narrow = false; |
| |
| if (inst.size_req == 4 || !unified_syntax) |
| narrow = false; |
| |
| if (inst.relocs[0].exp.X_op != O_constant |
| || inst.relocs[0].exp.X_add_number != 0) |
| narrow = false; |
| |
| /* Turn rsb #0 into 16-bit neg. We should probably do this via |
| relaxation, but it doesn't seem worth the hassle. */ |
| if (narrow) |
| { |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| inst.instruction = THUMB_OP16 (T_MNEM_negs); |
| inst.instruction |= Rs << 3; |
| inst.instruction |= Rd; |
| } |
| else |
| { |
| inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| } |
| } |
| else |
| encode_thumb32_shifted_operand (2); |
| } |
| |
| static void |
| do_t_setend (void) |
| { |
| if (warn_on_deprecated |
| && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| as_tsktsk (_("setend use is deprecated for ARMv8")); |
| |
| set_pred_insn_type (OUTSIDE_PRED_INSN); |
| if (inst.operands[0].imm) |
| inst.instruction |= 0x8; |
| } |
| |
| static void |
| do_t_shift (void) |
| { |
| if (!inst.operands[1].present) |
| inst.operands[1].reg = inst.operands[0].reg; |
| |
| if (unified_syntax) |
| { |
| bool narrow; |
| int shift_kind; |
| |
| switch (inst.instruction) |
| { |
| case T_MNEM_asr: |
| case T_MNEM_asrs: shift_kind = SHIFT_ASR; break; |
| case T_MNEM_lsl: |
| case T_MNEM_lsls: shift_kind = SHIFT_LSL; break; |
| case T_MNEM_lsr: |
| case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break; |
| case T_MNEM_ror: |
| case T_MNEM_rors: shift_kind = SHIFT_ROR; break; |
| default: abort (); |
| } |
| |
| if (THUMB_SETS_FLAGS (inst.instruction)) |
| narrow = !in_pred_block (); |
| else |
| narrow = in_pred_block (); |
| if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) |
| narrow = false; |
| if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) |
| narrow = false; |
| if (inst.operands[2].isreg |
| && (inst.operands[1].reg != inst.operands[0].reg |
| || inst.operands[2].reg > 7)) |
| narrow = false; |
| if (inst.size_req == 4) |
| narrow = false; |
| |
| reject_bad_reg (inst.operands[0].reg); |
| reject_bad_reg (inst.operands[1].reg); |
| |
| if (!narrow) |
| { |
| if (inst.operands[2].isreg) |
| { |
| reject_bad_reg (inst.operands[2].reg); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg << 16; |
| inst.instruction |= inst.operands[2].reg; |
| |
| /* PR 12854: Error on extraneous shifts. */ |
| constraint (inst.operands[2].shifted, |
| _("extraneous shift as part of operand to shift insn")); |
| } |
| else |
| { |
| inst.operands[1].shifted = 1; |
| inst.operands[1].shift_kind = shift_kind; |
| inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction) |
| ? T_MNEM_movs : T_MNEM_mov); |
| inst.instruction |= inst.operands[0].reg << 8; |
| encode_thumb32_shifted_operand (1); |
| /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */ |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| } |
| } |
| else |
| { |
| if (inst.operands[2].isreg) |
| { |
| switch (shift_kind) |
| { |
| case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break; |
| case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break; |
| case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break; |
| case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break; |
| default: abort (); |
| } |
| |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[2].reg << 3; |
| |
| /* PR 12854: Error on extraneous shifts. */ |
| constraint (inst.operands[2].shifted, |
| _("extraneous shift as part of operand to shift insn")); |
| } |
| else |
| { |
| switch (shift_kind) |
| { |
| case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; |
| case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; |
| case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; |
| default: abort (); |
| } |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT; |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| } |
| } |
| else |
| { |
| constraint (inst.operands[0].reg > 7 |
| || inst.operands[1].reg > 7, BAD_HIREG); |
| constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| |
| if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */ |
| { |
| constraint (inst.operands[2].reg > 7, BAD_HIREG); |
| constraint (inst.operands[0].reg != inst.operands[1].reg, |
| _("source1 and dest must be same register")); |
| |
| switch (inst.instruction) |
| { |
| case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break; |
| case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break; |
| case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break; |
| case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break; |
| default: abort (); |
| } |
| |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[2].reg << 3; |
| |
| /* PR 12854: Error on extraneous shifts. */ |
| constraint (inst.operands[2].shifted, |
| _("extraneous shift as part of operand to shift insn")); |
| } |
| else |
| { |
| switch (inst.instruction) |
| { |
| case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break; |
| case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break; |
| case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break; |
| case T_MNEM_ror: inst.error = _("ror #imm not supported"); return; |
| default: abort (); |
| } |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT; |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 3; |
| } |
| } |
| } |
| |
| static void |
| do_t_simd (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_simd2 (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| } |
| |
| static void |
| do_t_smc (void) |
| { |
| unsigned int value = inst.relocs[0].exp.X_add_number; |
| constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a), |
| _("SMC is not permitted on this architecture")); |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| constraint (value > 0xf, _("immediate too large (bigger than 0xF)")); |
| |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| inst.instruction |= (value & 0x000f) << 16; |
| |
| /* PR gas/15623: SMC instructions must be last in an IT block. */ |
| set_pred_insn_type_last (); |
| } |
| |
| static void |
| do_t_hvc (void) |
| { |
| unsigned int value = inst.relocs[0].exp.X_add_number; |
| |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| inst.instruction |= (value & 0x0fff); |
| inst.instruction |= (value & 0xf000) << 4; |
| } |
| |
| static void |
| do_t_ssat_usat (int bias) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= inst.operands[1].imm - bias; |
| inst.instruction |= Rn << 16; |
| |
| if (inst.operands[3].present) |
| { |
| offsetT shift_amount = inst.relocs[0].exp.X_add_number; |
| |
| inst.relocs[0].type = BFD_RELOC_UNUSED; |
| |
| constraint (inst.relocs[0].exp.X_op != O_constant, |
| _("expression too complex")); |
| |
| if (shift_amount != 0) |
| { |
| constraint (shift_amount > 31, |
| _("shift expression is too large")); |
| |
| if (inst.operands[3].shift_kind == SHIFT_ASR) |
| inst.instruction |= 0x00200000; /* sh bit. */ |
| |
| inst.instruction |= (shift_amount & 0x1c) << 10; |
| inst.instruction |= (shift_amount & 0x03) << 6; |
| } |
| } |
| } |
| |
| static void |
| do_t_ssat (void) |
| { |
| do_t_ssat_usat (1); |
| } |
| |
| static void |
| do_t_ssat16 (void) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= inst.operands[1].imm - 1; |
| inst.instruction |= Rn << 16; |
| } |
| |
| static void |
| do_t_strex (void) |
| { |
| constraint (!inst.operands[2].isreg || !inst.operands[2].preind |
| || inst.operands[2].postind || inst.operands[2].writeback |
| || inst.operands[2].immisreg || inst.operands[2].shifted |
| || inst.operands[2].negative, |
| BAD_ADDR_MODE); |
| |
| constraint (inst.operands[2].reg == REG_PC, BAD_PC); |
| |
| inst.instruction |= inst.operands[0].reg << 8; |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 16; |
| inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8; |
| } |
| |
| static void |
| do_t_strexd (void) |
| { |
| if (!inst.operands[2].present) |
| inst.operands[2].reg = inst.operands[1].reg + 1; |
| |
| constraint (inst.operands[0].reg == inst.operands[1].reg |
| || inst.operands[0].reg == inst.operands[2].reg |
| || inst.operands[0].reg == inst.operands[3].reg, |
| BAD_OVERLAP); |
| |
| inst.instruction |= inst.operands[0].reg; |
| inst.instruction |= inst.operands[1].reg << 12; |
| inst.instruction |= inst.operands[2].reg << 8; |
| inst.instruction |= inst.operands[3].reg << 16; |
| } |
| |
| static void |
| do_t_sxtah (void) |
| { |
| unsigned Rd, Rn, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[1].reg; |
| Rm = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| reject_bad_reg (Rm); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rn << 16; |
| inst.instruction |= Rm; |
| inst.instruction |= inst.operands[3].imm << 4; |
| } |
| |
| static void |
| do_t_sxth (void) |
| { |
| unsigned Rd, Rm; |
| |
| Rd = inst.operands[0].reg; |
| Rm = inst.operands[1].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rm); |
| |
| if (inst.instruction <= 0xffff |
| && inst.size_req != 4 |
| && Rd <= 7 && Rm <= 7 |
| && (!inst.operands[2].present || inst.operands[2].imm == 0)) |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= Rd; |
| inst.instruction |= Rm << 3; |
| } |
| else if (unified_syntax) |
| { |
| if (inst.instruction <= 0xffff) |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= Rd << 8; |
| inst.instruction |= Rm; |
| inst.instruction |= inst.operands[2].imm << 4; |
| } |
| else |
| { |
| constraint (inst.operands[2].present && inst.operands[2].imm != 0, |
| _("Thumb encoding does not support rotation")); |
| constraint (1, BAD_HIREG); |
| } |
| } |
| |
| static void |
| do_t_swi (void) |
| { |
| inst.relocs[0].type = BFD_RELOC_ARM_SWI; |
| } |
| |
| static void |
| do_t_tb (void) |
| { |
| unsigned Rn, Rm; |
| int half; |
| |
| half = (inst.instruction & 0x10) != 0; |
| set_pred_insn_type_last (); |
| constraint (inst.operands[0].immisreg, |
| _("instruction requires register index")); |
| |
| Rn = inst.operands[0].reg; |
| Rm = inst.operands[0].imm; |
| |
| if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) |
| constraint (Rn == REG_SP, BAD_SP); |
| reject_bad_reg (Rm); |
| |
| constraint (!half && inst.operands[0].shifted, |
| _("instruction does not allow shifted index")); |
| inst.instruction |= (Rn << 16) | Rm; |
| } |
| |
| static void |
| do_t_udf (void) |
| { |
| if (!inst.operands[0].present) |
| inst.operands[0].imm = 0; |
| |
| if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4) |
| { |
| constraint (inst.size_req == 2, |
| _("immediate value out of range")); |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4; |
| inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0; |
| } |
| else |
| { |
| inst.instruction = THUMB_OP16 (inst.instruction); |
| inst.instruction |= inst.operands[0].imm; |
| } |
| |
| set_pred_insn_type (NEUTRAL_IT_INSN); |
| } |
| |
| |
| static void |
| do_t_usat (void) |
| { |
| do_t_ssat_usat (0); |
| } |
| |
| static void |
| do_t_usat16 (void) |
| { |
| unsigned Rd, Rn; |
| |
| Rd = inst.operands[0].reg; |
| Rn = inst.operands[2].reg; |
| |
| reject_bad_reg (Rd); |
| reject_bad_reg (Rn); |
| |
| inst.instruction |= Rd << 8; |
| inst.instruction |= inst.operands[1].imm; |
| inst.instruction |= Rn << 16; |
| } |
| |
| /* Checking the range of the branch offset (VAL) with NBITS bits |
| and IS_SIGNED signedness. Also checks the LSB to be 0. */ |
| static int |
| v8_1_branch_value_check (int val, int nbits, int is_signed) |
| { |
| gas_assert (nbits > 0 && nbits <= 32); |
| if (is_signed) |
| { |
| int cmp = (1 << (nbits - 1)); |
| if ((val < -cmp) || (val >= cmp) || (val & 0x01)) |
| return FAIL; |
| } |
| else |
| { |
| if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1)) |
| return FAIL; |
| } |
| return SUCCESS; |
| } |
| |
| /* For branches in Armv8.1-M Mainline. */ |
| static void |
| do_t_branch_future (void) |
| { |
| unsigned long insn = inst.instruction; |
| |
| inst.instruction = THUMB_OP32 (inst.instruction); |
| if (inst.operands[0].hasreloc == 0) |
| { |
| if (v8_1_branch_value_check (inst.operands[0].imm, 5, false) == FAIL) |
| as_bad (BAD_BRANCH_OFF); |
| |
| inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23; |
| } |
| else |
| { |
| inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5; |
| inst.relocs[0].pc_rel = 1; |
| } |
| |
| switch (insn) |
| { |
| case T_MNEM_bf: |
| if (inst.operands[1].hasreloc == 0) |
| { |
| int val = inst.operands[1].imm; |
| if (v8_1_branch_value_check (inst.operands[1].imm, 17, true) == FAIL) |
| as_bad (BAD_BRANCH_OFF); |
| |
| int immA = (val & 0x0001f000) >> 12; |
| int immB = (val & 0x00000ffc) >> 2; |
| int immC = (val & 0x00000002) >> 1; |
| inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); |
| } |
| else |
| { |
| inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17; |
| inst.relocs[1].pc_rel = 1; |
| } |
| break; |
| |
| case T_MNEM_bfl: |
| if (inst.operands[1].hasreloc == 0) |
| { |
| int val = inst.operands[1].imm; |
| if (v8_1_branch_value_check (inst.operands[1].imm, 19, true) == FAIL) |
| as_bad (BAD_BRANCH_OFF); |
| |
| int immA = (val & 0x0007f000) >> 12; |
| int immB = (val & 0x00000ffc) >> 2; |
| int immC = (val & 0x00000002) >> 1; |
| inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); |
| } |
| else |
| { |
| inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19; |
| inst.relocs[1].pc_rel = 1; |
| } |
| break; |
| |
| case T_MNEM_bfcsel: |
| /* Operand 1. */ |
| if (inst.operands[1].hasreloc == 0) |
| { |
| int val = inst.operands[1].imm; |
| int immA = (val & 0x00001000) >> 12; |
| int immB = (val & 0x00000ffc) >> 2; |
| int immC = (val & 0x00000002) >> 1; |
| inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11); |
| } |
| else |
| { |
| inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13; |
| inst.relocs[1].pc_rel = 1; |
| } |
| |
| /* Operand 2. */ |
| if (inst.operands[2].hasreloc == 0) |
| { |
| constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS); |
| int val2 = inst.operands[2].imm; |
| int val0 = inst.operands[0].imm & 0x1f; |
| int diff = val2 - val0; |
| if (diff == 4) |
| inst.instruction |= 1 << 17; /* T bit. */ |
| else if (diff != 2) |
| as_bad (_("out of range label-relative fixup value")); |
| } |
| else |
| { |
| constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS); |
| inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL; |
| inst.relocs[2].pc_rel = 1; |
| } |
| |
| /* Operand 3. */ |
| constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| inst.instruction |= (inst.operands[3].imm & 0xf) << 18; |
| break; |
| |
| case T_MNEM_bfx: |
| case T_MNEM_bflx: |
| inst.instruction |= inst.operands[1].reg << 16; |
| break; |
| |
| default: abort (); |
| } |
| } |
| |
| /* Helper function for do_t_loloop to handle relocations. */ |
| static void |
| v8_1_loop_reloc (int is_le) |
| { |
| if (inst.relocs[0].exp.X_op == O_constant) |
| { |
| int value = inst.relocs[0].exp.X_add_number; |
| value = (is_le) ? -value : value; |
| |
| if (v8_1_branch_value_check (value, 12, false) == FAIL) |
| as_bad (BAD_BRANCH_OFF); |
| |
| int imml, immh; |
| |
| immh = (value & 0x00000ffc) >> 2; |
| imml = (value & 0x00000002) >> 1; |
| |
| inst.instruction |= (imml << 11) | (immh << 1); |
| } |
| else |
| { |
| inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12; |
| inst.relocs[0].pc_rel = 1; |
| } |
| } |
| |
| /* For shifts with four operands in MVE. */ |
| static void |
| do_mve_scalar_shift1 (void) |
| { |
| unsigned int value = inst.operands[2].imm; |
| |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg << 8; |
| |
| /* Setting the bit for saturation. */ |
| inst.instruction |= ((value == 64) ? 0: 1) << 7; |
| |
| /* Assuming Rm is already checked not to be 11x1. */ |
| constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP); |
| constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP); |
| inst.instruction |= inst.operands[3].reg << 12; |
| } |
| |
| /* For shifts in MVE. */ |
| static void |
| do_mve_scalar_shift (void) |
| { |
| if (!inst.operands[2].present) |
| { |
| inst.operands[2] = inst.operands[1]; |
| inst.operands[1].reg = 0xf; |
| } |
| |
| inst.instruction |= inst.operands[0].reg << 16; |
| inst.instruction |= inst.operands[1].reg << 8; |
| |
| if (inst.operands[2].isreg) |
| { |
| /* Assuming Rm is already checked not to be 11x1. */ |
| constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP); |
| constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP); |
| inst.instruction |= inst.operands[2].reg << 12; |
| } |
| else |
| { |
| /* Assuming imm is already checked as [1,32]. */ |
| unsigned int value = inst.operands[2].imm; |
| inst.instruction |= (value & 0x1c) << 10; |
| inst.instruction |= (value & 0x03) << 6; |
| /* Change last 4 bits from 0xd to 0xf. */ |
| inst.instruction |= 0x2; |
| } |
| } |
| |
| /* MVE instruction encoder helpers. */ |
| #define M_MNEM_vabav 0xee800f01 |
| #define M_MNEM_vmladav 0xeef00e00 |
| #define M_MNEM_vmladava 0xeef00e20 |
| #define M_MNEM_vmladavx 0xeef01e00 |
| #define M_MNEM_vmladavax 0xeef01e20 |
| #define M_MNEM_vmlsdav 0xeef00e01 |
| #define M_MNEM_vmlsdava 0xeef00e21 |
| #define M_MNEM_vmlsdavx 0xeef01e01 |
| #define M_MNEM_vmlsdavax 0xeef01e21 |
| #define M_MNEM_vmullt 0xee011e00 |
| #define M_MNEM_vmullb 0xee010e00 |
| #define M_MNEM_vctp 0xf000e801 |
| #define M_MNEM_vst20 0xfc801e00 |
| #define M_MNEM_vst21 0xfc801e20 |
| #define M_MNEM_vst40 0xfc801e01 |
| #define M_MNEM_vst41 0xfc801e21 |
| #define M_MNEM_vst42 0xfc801e41 |
| #define M_MNEM_vst43 0xfc801e61 |
| #define M_MNEM_vld20 0xfc901e00 |
| #define M_MNEM_vld21 0xfc901e20 |
| #define M_MNEM_vld40 0xfc901e01 |
| #define M_MNEM_vld41 0xfc901e21 |
| #define M_MNEM_vld42 0xfc901e41 |
| #define M_MNEM_vld43 0xfc901e61 |
| #define M_MNEM_vstrb 0xec000e00 |
| #define M_MNEM_vstrh 0xec000e10 |
| #define M_MNEM_vstrw 0xec000e40 |
| #define M_MNEM_vstrd 0xec000e50 |
| #define M_MNEM_vldrb 0xec100e00 |
| #define M_MNEM_vldrh 0xec100e10 |
| #define M_MNEM_vldrw 0xec100e40 |
| #define M_MNEM_vldrd 0xec100e50 |
| #define M_MNEM_vmovlt 0xeea01f40 |
| #define M_MNEM_vmovlb 0xeea00f40 |
| #define M_MNEM_vmovnt 0xfe311e81 |
| #define M_MNEM_vmovnb 0xfe310e81 |
| #define M_MNEM_vadc 0xee300f00 |
| #define M_MNEM_vadci 0xee301f00 |
| #define M_MNEM_vbrsr 0xfe011e60 |
| #define M_MNEM_vaddlv 0xee890f00 |
| #define M_MNEM_vaddlva 0xee890f20 |
| #define M_MNEM_vaddv 0xeef10f00 |
| #define M_MNEM_vaddva 0xeef10f20 |
| #define M_MNEM_vddup 0xee011f6e |
| #define M_MNEM_vdwdup 0xee011f60 |
| #define M_MNEM_vidup 0xee010f6e |
| #define M_MNEM_viwdup 0xee010f60 |
| #define M_MNEM_vmaxv 0xeee20f00 |
| #define M_MNEM_vmaxav 0xeee00f00 |
| #define M_MNEM_vminv 0xeee20f80 |
| #define M_MNEM_vminav 0xeee00f80 |
| #define M_MNEM_vmlaldav 0xee800e00 |
| #define M_MNEM_vmlaldava 0xee800e20 |
| #define M_MNEM_vmlaldavx 0xee801e00 |
| #define M_MNEM_vmlaldavax 0xee801e20 |
| #define M_MNEM_vmlsldav 0xee800e01 |
| #define M_MNEM_vmlsldava 0xee800e21 |
| #define M_MNEM_vmlsldavx 0xee801e01 |
| #define M_MNEM_vmlsldavax 0xee801e21 |
| #define M_MNEM_vrmlaldavhx 0xee801f00 |
| #define M_MNEM_vrmlaldavhax 0xee801f20 |
| #define M_MNEM_vrmlsldavh 0xfe800e01 |
| #define M_MNEM_vrmlsldavha 0xfe800e21 |
| #define M_MNEM_vrmlsldavhx 0xfe801e01 |
| #define M_MNEM_vrmlsldavhax 0xfe801e21 |
| #define M_MNEM_vqmovnt 0xee331e01 |
| #define M_MNEM_vqmovnb 0xee330e01 |
| #define M_MNEM_vqmovunt 0xee311e81 |
| #define M_MNEM_vqmovunb 0xee310e81 |
| #define M_MNEM_vshrnt 0xee801fc1 |
| #define M_MNEM_vshrnb 0xee800fc1 |
| #define M_MNEM_vrshrnt 0xfe801fc1 |
| #define M_MNEM_vqshrnt 0xee801f40 |
| #define M_MNEM_vqshrnb 0xee800f40 |
| #define M_MNEM_vqshrunt 0xee801fc0 |
| #define M_MNEM_vqshrunb 0xee800fc0 |
| #define M_MNEM_vrshrnb 0xfe800fc1 |
| #define M_MNEM_vqrshrnt 0xee801f41 |
| #define M_MNEM_vqrshrnb 0xee800f41 |
| #define M_MNEM_vqrshrunt 0xfe801fc0 |
| #define M_MNEM_vqrshrunb 0xfe800fc0 |
| |
| /* Bfloat16 instruction encoder helpers. */ |
| #define B_MNEM_vfmat 0xfc300850 |
| #define B_MNEM_vfmab 0xfc300810 |
| |
| /* Neon instruction encoder helpers. */ |
| |
| /* Encodings for the different types for various Neon opcodes. */ |
| |
| /* An "invalid" code for the following tables. */ |
| #define N_INV -1u |
| |
| struct neon_tab_entry |
| { |
| unsigned integer; |
| unsigned float_or_poly; |
| unsigned scalar_or_imm; |
| }; |
| |
| /* Map overloaded Neon opcodes to their respective encodings. */ |
| #define NEON_ENC_TAB \ |
| X(vabd, 0x0000700, 0x1200d00, N_INV), \ |
| X(vabdl, 0x0800700, N_INV, N_INV), \ |
| X(vmax, 0x0000600, 0x0000f00, N_INV), \ |
| X(vmin, 0x0000610, 0x0200f00, N_INV), \ |
| X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ |
| X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ |
| X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ |
| X(vadd, 0x0000800, 0x0000d00, N_INV), \ |
| X(vaddl, 0x0800000, N_INV, N_INV), \ |
| X(vsub, 0x1000800, 0x0200d00, N_INV), \ |
| X(vsubl, 0x0800200, N_INV, N_INV), \ |
| X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ |
| X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ |
| X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ |
| /* Register variants of the following two instructions are encoded as |
| vcge / vcgt with the operands reversed. */ \ |
| X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \ |
| X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \ |
| X(vfma, N_INV, 0x0000c10, N_INV), \ |
| X(vfms, N_INV, 0x0200c10, N_INV), \ |
| X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ |
| X(vmls, 0x1000900, 0x0200d10, 0x0800440), \ |
| X(vmul, 0x0000910, 0x1000d10, 0x0800840), \ |
| X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \ |
| X(vmlal, 0x0800800, N_INV, 0x0800240), \ |
| X(vmlsl, 0x0800a00, N_INV, 0x0800640), \ |
| X(vqdmlal, 0x0800900, N_INV, 0x0800340), \ |
| X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \ |
| X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \ |
| X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ |
| X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ |
| X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \ |
| X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \ |
| X(vshl, 0x0000400, N_INV, 0x0800510), \ |
| X(vqshl, 0x0000410, N_INV, 0x0800710), \ |
| X(vand, 0x0000110, N_INV, 0x0800030), \ |
| X(vbic, 0x0100110, N_INV, 0x0800030), \ |
| X(veor, 0x1000110, N_INV, N_INV), \ |
| X(vorn, 0x0300110, N_INV, 0x0800010), \ |
| X(vorr, 0x0200110, N_INV, 0x0800010), \ |
| X(vmvn, 0x1b00580, N_INV, 0x0800030), \ |
| X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \ |
| X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \ |
| X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \ |
| X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \ |
| X(vst1, 0x0000000, 0x0800000, N_INV), \ |
| X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \ |
| X(vst2, 0x0000100, 0x0800100, N_INV), \ |
| X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \ |
| X(vst3, 0x0000200, 0x0800200, N_INV), \ |
| X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \ |
| X(vst4, 0x0000300, 0x0800300, N_INV), \ |
| X(vmovn, 0x1b20200, N_INV, N_INV), \ |
| X(vtrn, 0x1b20080, N_INV, N_INV), \ |
| X(vqmovn, 0x1b20200, N_INV, N_INV), \ |
| X(vqmovun, 0x1b20240, N_INV, N_INV), \ |
| X(vnmul, 0xe200a40, 0xe200b40, N_INV), \ |
| X(vnmla, 0xe100a40, 0xe100b40, N_INV), \ |
| X(vnmls, 0xe100a00, 0xe100b00, N_INV), \ |
| X(vfnma, 0xe900a40, 0xe900b40, N_INV), \ |
| X(vfnms, 0xe900a00, 0xe900b00, N_INV), \ |
| X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \ |
| X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \ |
| X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \ |
| X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \ |
| X(vseleq, 0xe000a00, N_INV, N_INV), \ |
| X(vselvs, 0xe100a00, N_INV, N_INV), \ |
| X(vselge, 0xe200a00, N_INV, N_INV), \ |
| X(vselgt, 0xe300a00, N_INV, N_INV), \ |
| X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \ |
| X(vminnm, 0xe800a40, 0x3200f10, N_INV), \ |
| X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \ |
| X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \ |
| X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \ |
| X(aes, 0x3b00300, N_INV, N_INV), \ |
| X(sha3op, 0x2000c00, N_INV, N_INV), \ |
| X(sha1h, 0x3b902c0, N_INV, N_INV), \ |
| X(sha2op, 0x3ba0380, N_INV, N_INV) |
| |
| enum neon_opc |
| { |
| #define X(OPC,I,F,S) N_MNEM_##OPC |
| NEON_ENC_TAB |
| #undef X |
| }; |
| |
| static const struct neon_tab_entry neon_enc_tab[] = |
| { |
| #define X(OPC,I,F,S) { (I), (F), (S) } |
| NEON_ENC_TAB |
| #undef X |
| }; |
| |
| /* Do not use these macros; instead, use NEON_ENCODE defined below. */ |
| #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| #define NEON_ENC_SINGLE_(X) \ |
| ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000)) |
| #define NEON_ENC_DOUBLE_(X) \ |
| ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000)) |
| #define NEON_ENC_FPV8_(X) \ |
| ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000)) |
| |
| #define NEON_ENCODE(type, inst) \ |
| do \ |
| { \ |
| inst.instruction = NEON_ENC_##type##_ (inst.instruction); \ |
| inst.is_neon = 1; \ |
| } \ |
| while (0) |
| |
| #define check_neon_suffixes \ |
| do \ |
| { \ |
| if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \ |
| { \ |
| as_bad (_("invalid neon suffix for non neon instruction")); \ |
| return; \ |
| } \ |
| } \ |
| while (0) |
| |
| /* Define shapes for instruction operands. The following mnemonic characters |
| are used in this table: |
| |
| F - VFP S<n> register |
| D - Neon D<n> register |
| Q - Neon Q<n> register |
| I - Immediate |
| S - Scalar |
| R - ARM register |
| L - D<n> register list |
| |
| This table is used to generate various data: |
| - enumerations of the form NS_DDR to be used as arguments to |
| neon_select_shape. |
| - a table classifying shapes into single, double, quad, mixed. |
| - a table used to drive neon_select_shape. */ |
| |
| #define NEON_SHAPE_DEF \ |
| X(4, (R, R, Q, Q), QUAD), \ |
| X(4, (Q, R, R, I), QUAD), \ |
| X(4, (R, R, S, S), QUAD), \ |
| X(4, (S, S, R, R), QUAD), \ |
| X(3, (Q, R, I), QUAD), \ |
| X(3, (I, Q, Q), QUAD), \ |
| X(3, (I, Q, R), QUAD), \ |
| X(3, (R, Q, Q), QUAD), \ |
| X(3, (D, D, D), DOUBLE), \ |
| X(3, (Q, Q, Q), QUAD), \ |
| X(3, (D, D, I), DOUBLE), \ |
| X(3, (Q, Q, I), QUAD), \ |
| X(3, (D, D, S), DOUBLE), \ |
| X(3, (Q, Q, S), QUAD), \ |
| X(3, (Q, Q, R), QUAD), \ |
| X(3, (R, R, Q), QUAD), \ |
| X(2, (R, Q), QUAD), \ |
| X(2, (D, D), DOUBLE), \ |
| X(2, (Q, Q), QUAD), \ |
| X(2, (D, S), DOUBLE), \ |
| X(2, (Q, S), QUAD), \ |
| X(2, (D, R), DOUBLE), \ |
| X(2, (Q, R), QUAD), \ |
| X(2, (D, I), DOUBLE), \ |
| X(2, (Q, I), QUAD), \ |
| X(3, (P, F, I), SINGLE), \ |
| X(3, (P, D, I), DOUBLE), \ |
| X(3, (P, Q, I), QUAD), \ |
| X(4, (P, F, F, I), SINGLE), \ |
| X(4, (P, D, D, I), DOUBLE), \ |
| X(4, (P, Q, Q, I), QUAD), \ |
| X(5, (P, F, F, F, I), SINGLE), \ |
| X(5, (P, D, D, D, I), DOUBLE), \ |
| X(5, (P, Q, Q, Q, I), QUAD), \ |
| X(3, (D, L, D), DOUBLE), \ |
| X(2, (D, Q), MIXED), \ |
| X(2, (Q, D), MIXED), \ |
| X(3, (D, Q, I), MIXED), \ |
| X(3, (Q, D, I), MIXED), \ |
| X(3, (Q, D, D), MIXED), \ |
| X(3, (D, Q, Q), MIXED), \ |
| X(3, (Q, Q, D), MIXED), \ |
| X(3, (Q, D, S), MIXED), \ |
| X(3, (D, Q, S), MIXED), \ |
| X(4, (D, D, D, I), DOUBLE), \ |
| X(4, (Q, Q, Q, I), QUAD), \ |
| X(4, (D, D, S, I), DOUBLE), \ |
| X(4, (Q, Q, S, I), QUAD), \ |
| X(2, (F, F), SINGLE), \ |
| X(3, (F, F, F), SINGLE), \ |
| X(2, (F, I), SINGLE), \ |
| X(2, (F, D), MIXED), \ |
| X(2, (D, F), MIXED), \ |
| X(3, (F, F, I), MIXED), \ |
| X(4, (R, R, F, F), SINGLE), \ |
| X(4, (F, F, R, R), SINGLE), \ |
| X(3, (D, R, R), DOUBLE), \ |
| X(3, (R, R, D), DOUBLE), \ |
| X(2, (S, R), SINGLE), \ |
| X(2, (R, S), SINGLE), \ |
| X(2, (F, R), SINGLE), \ |
| X(2, (R, F), SINGLE), \ |
| /* Used for MVE tail predicated loop instructions. */\ |
| X(2, (R, R), QUAD), \ |
| /* Half float shape supported so far. */\ |
| X (2, (H, D), MIXED), \ |
| X (2, (D, H), MIXED), \ |
| X (2, (H, F), MIXED), \ |
| X (2, (F, H), MIXED), \ |
| X (2, (H, H), HALF), \ |
| X (2, (H, R), HALF), \ |
| X (2, (R, H), HALF), \ |
| X (2, (H, I), HALF), \ |
| X (3, (H, H, H), HALF), \ |
| X (3, (H, F, I), MIXED), \ |
| X (3, (F, H, I), MIXED), \ |
| X (3, (D, H, H), MIXED), \ |
| X (3, (D, H, S), MIXED) |
| |
| #define S2(A,B) NS_##A##B |
| #define S3(A,B,C) NS_##A##B##C |
| #define S4(A,B,C,D) NS_##A##B##C##D |
| #define S5(A,B,C,D,E) NS_##A##B##C##D##E |
| |
| #define X(N, L, C) S##N L |
| |
| enum neon_shape |
| { |
| NEON_SHAPE_DEF, |
| NS_NULL |
| }; |
| |
| #undef X |
| #undef S2 |
| #undef S3 |
| #undef S4 |
| #undef S5 |
| |
| enum neon_shape_class |
| { |
| SC_HALF, |
| SC_SINGLE, |
| SC_DOUBLE, |
| SC_QUAD, |
| SC_MIXED |
| }; |
| |
| #define X(N, L, C) SC_##C |
| |
| static enum neon_shape_class neon_shape_class[] = |
| { |
| NEON_SHAPE_DEF |
| }; |
| |
| #undef X |
| |
| enum neon_shape_el |
| { |
| SE_H, |
| SE_F, |
| SE_D, |
| SE_Q, |
| SE_I, |
| SE_S, |
| SE_R, |
| SE_L, |
| SE_P |
| }; |
| |
| /* Register widths of above. */ |
| static unsigned neon_shape_el_size[] = |
| { |
| 16, |
| 32, |
| 64, |
| 128, |
| 0, |
| 32, |
| 32, |
| 0, |
| 0 |
| }; |
| |
| struct neon_shape_info |
| { |
| unsigned els; |
| enum neon_shape_el el[NEON_MAX_TYPE_ELS]; |
| }; |
| |
| #define S2(A,B) { SE_##A, SE_##B } |
| #define S3(A,B,C) { SE_##A, SE_##B, SE_##C } |
| #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D } |
| #define S5(A,B,C,D,E) { SE_##A, SE_##B, SE_##C, SE_##D, SE_##E } |
| |
| #define X(N, L, C) { N, S##N L } |
| |
| static struct neon_shape_info neon_shape_tab[] = |
| { |
| NEON_SHAPE_DEF |
| }; |
| |
| #undef X |
| #undef S2 |
| #undef S3 |
| #undef S4 |
| #undef S5 |
| |
| /* Bit masks used in type checking given instructions. |
| 'N_EQK' means the type must be the same as (or based on in some way) the key |
| type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is |
| set, various other bits can be set as well in order to modify the meaning of |
| the type constraint. */ |
| |
| enum neon_type_mask |
| { |
| N_S8 = 0x0000001, |
| N_S16 = 0x0000002, |
| N_S32 = 0x0000004, |
| N_S64 = 0x0000008, |
| N_U8 = 0x0000010, |
| N_U16 = 0x0000020, |
| N_U32 = 0x0000040, |
| N_U64 = 0x0000080, |
| N_I8 = 0x0000100, |
| N_I16 = 0x0000200, |
| N_I32 = 0x0000400, |
| N_I64 = 0x0000800, |
| N_8 = 0x0001000, |
| N_16 = 0x0002000, |
| N_32 = 0x0004000, |
| N_64 = 0x0008000, |
| N_P8 = 0x0010000, |
| N_P16 = 0x0020000, |
| N_F16 = 0x0040000, |
| N_F32 = 0x0080000, |
| N_F64 = 0x0100000, |
| N_P64 = 0x0200000, |
| N_BF16 = 0x0400000, |
| N_KEY = 0x1000000, /* Key element (main type specifier). */ |
| N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */ |
| N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */ |
| N_UNT = 0x8000000, /* Must be explicitly untyped. */ |
| N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */ |
| N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */ |
| N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */ |
| N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */ |
| N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */ |
| N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */ |
| N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */ |
| N_UTYP = 0, |
| N_MAX_NONSPECIAL = N_P64 |
| }; |
| |
| #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ) |
| |
| #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64) |
| #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) |
| #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) |
| #define N_S_32 (N_S8 | N_S16 | N_S32) |
| #define N_F_16_32 (N_F16 | N_F32) |
| #define N_SUF_32 (N_SU_32 | N_F_16_32) |
| #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) |
| #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32) |
| #define N_F_ALL (N_F16 | N_F32 | N_F64) |
| #define N_I_MVE (N_I8 | N_I16 | N_I32) |
| #define N_F_MVE (N_F16 | N_F32) |
| #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) |
| |
| /* Pass this as the first type argument to neon_check_type to ignore types |
| altogether. */ |
| #define N_IGNORE_TYPE (N_KEY | N_EQK) |
| |
| /* Select a "shape" for the current instruction (describing register types or |
| sizes) from a list of alternatives. Return NS_NULL if the current instruction |
| doesn't fit. For non-polymorphic shapes, checking is usually done as a |
| function of operand parsing, so this function doesn't need to be called. |
| Shapes should be listed in order of decreasing length. */ |
| |
| static enum neon_shape |
| neon_select_shape (enum neon_shape shape, ...) |
| { |
| va_list ap; |
| enum neon_shape first_shape = shape; |
| |
| /* Fix missing optional operands. FIXME: we don't know at this point how |
| many arguments we should have, so this makes the assumption that we have |
| > 1. This is true of all current Neon opcodes, I think, but may not be |
| true in the future. */ |
| if (!inst.operands[1].present) |
| inst.operands[1] = inst.operands[0]; |
| |
| va_start (ap, shape); |
| |
| for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int)) |
| { |
| unsigned j; |
| int matches = 1; |
| |
| for (j = 0; j < neon_shape_tab[shape].els; j++) |
| { |
| if (!inst.operands[j].present) |
| { |
| matches = 0; |
| break; |
| } |
| |
| switch (neon_shape_tab[shape].el[j]) |
| { |
| /* If a .f16, .16, .u16, .s16 type specifier is given over |
| a VFP single precision register operand, it's essentially |
| means only half of the register is used. |
| |
| If the type specifier is given after the mnemonics, the |
| information is stored in inst.vectype. If the type specifier |
| is given after register operand, the information is stored |
| in inst.operands[].vectype. |
| |
| When there is only one type specifier, and all the register |
| operands are the same type of hardware register, the type |
| specifier applies to all register operands. |
| |
| If no type specifier is given, the shape is inferred from |
| operand information. |
| |
| for example: |
| vadd.f16 s0, s1, s2: NS_HHH |
| vabs.f16 s0, s1: NS_HH |
| vmov.f16 s0, r1: NS_HR |
| vmov.f16 r0, s1: NS_RH |
| vcvt.f16 r0, s1: NS_RH |
| vcvt.f16.s32 s2, s2, #29: NS_HFI |
| vcvt.f16.s32 s2, s2: NS_HF |
| */ |
| case SE_H: |
| if (!(inst.operands[j].isreg |
| && inst.operands[j].isvec |
| && inst.operands[j].issingle |
| && !inst.operands[j].isquad |
| && ((inst.vectype.elems == 1 |
| && inst.vectype.el[0].size == 16) |
| || (inst.vectype.elems > 1 |
| && inst.vectype.el[j].size == 16) |
| || (inst.vectype.elems == 0 |
| && inst.operands[j].vectype.type != NT_invtype |
| && inst.operands[j].vectype.size == 16)))) |
| matches = 0; |
| break; |
| |
| case SE_F: |
| if (!(inst.operands[j].isreg |
| && inst.operands[j].isvec |
| && inst.operands[j].issingle |
| && !inst.operands[j].isquad |
| && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32) |
| || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32) |
| || (inst.vectype.elems == 0 |
| && (inst.operands[j].vectype.size == 32 |
| || inst.operands[j].vectype.type == NT_invtype))))) |
| matches = 0; |
| break; |
| |
| case SE_D: |
| if (!(inst.operands[j].isreg |
| && inst.operands[j].isvec |
| && !inst.operands[j].isquad |
| && !inst.operands[j].issingle)) |
| matches = 0; |
| break; |
| |
| case SE_R: |
| if (!(inst.operands[j].isreg |
| && !inst.operands[j].isvec)) |
| matches = 0; |
| break; |
| |
| case SE_Q: |
| if (!(inst.operands[j].isreg |
| && inst.operands[j].isvec |
| && inst.operands[j].isquad |
| && !inst.operands[j].issingle)) |
| matches = 0; |
| break; |
| |
| case SE_I: |
| if (!(!inst.operands[j].isreg |
| && !inst.operands[j].isscalar)) |
| matches = 0; |
| break; |
| |
| case SE_S: |
| if (!(!inst.operands[j].isreg |
| && inst.operands[j].isscalar)) |
| matches = 0; |
| break; |
| |
| case SE_P: |
| case SE_L: |
| break; |
| } |
| if (!matches) |
| break; |
| } |
| if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present)) |
| /* We've matched all the entries in the shape table, and we don't |
| have any left over operands which have not been matched. */ |
| break; |
| } |
| |
| va_end (ap); |
| |
| if (shape == NS_NULL && first_shape != NS_NULL) |
| first_error (_("invalid instruction shape")); |
| |
| return shape; |
| } |
| |
| /* True if SHAPE is predominantly a quadword operation (most of the time, this |
| means the Q bit should be set). */ |
| |
| static int |
| neon_quad (enum neon_shape shape) |
| { |
| return neon_shape_class[shape] == SC_QUAD; |
| } |
| |
| static void |
| neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type, |
| unsigned *g_size) |
| { |
| /* Allow modification to be made to types which are constrained to be |
| based on the key element, based on bits set alongside N_EQK. */ |
| if ((typebits & N_EQK) != 0) |
| { |
| if ((typebits & N_HLF) != 0) |
| *g_size /= 2; |
| else if ((typebits & N_DBL) != 0) |
| *g_size *= 2; |
| if ((typebits & N_SGN) != 0) |
| *g_type = NT_signed; |
| else if ((typebits & N_UNS) != 0) |
| *g_type = NT_unsigned; |
| else if ((typebits & N_INT) != 0) |
| *g_type = NT_integer; |
| else if ((typebits & N_FLT) != 0) |
| *g_type = NT_float; |
| else if ((typebits & N_SIZ) != 0) |
| *g_type = NT_untyped; |
| } |
| } |
| |
| /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key" |
| operand type, i.e. the single type specified in a Neon instruction when it |
| is the only one given. */ |
| |
| static struct neon_type_el |
| neon_type_promote (struct neon_type_el *key, unsigned thisarg) |
| { |
| struct neon_type_el dest = *key; |
| |
| gas_assert ((thisarg & N_EQK) != 0); |
| |
| neon_modify_type_size (thisarg, &dest.type, &dest.size); |
| |
| return dest; |
| } |
| |
| /* Convert Neon type and size into compact bitmask representation. */ |
| |
| static enum neon_type_mask |
| type_chk_of_el_type (enum neon_el_type type, unsigned size) |
| { |
| switch (type) |
| { |
| case NT_untyped: |
| switch (size) |
| { |
| case 8: return N_8; |
| case 16: return N_16; |
| case 32: return N_32; |
| case 64: return N_64; |
| default: ; |
| } |
| break; |
| |
| case NT_integer: |
| switch (size) |
| { |
| case 8: return N_I8; |
| case 16: return N_I16; |
| case 32: return N_I32; |
| case 64: return N_I64; |
| default: ; |
| } |
| break; |
| |
| case NT_float: |
| switch (size) |
| { |
| case 16: return N_F16; |
| case 32: return N_F32; |
| case 64: return N_F64; |
| default: ; |
| } |
| break; |
| |
| case NT_poly: |
| switch (size) |
| { |
| case 8: return N_P8; |
| case 16: return N_P16; |
| case 64: return N_P64; |
| default: ; |
| } |
| break; |
| |
| case NT_signed: |
| switch (size) |
| { |
| case 8: return N_S8; |
| case 16: return N_S16; |
| case 32: return N_S32; |
| case 64: return N_S64; |
| default: ; |
| } |
| break; |
| |
| case NT_unsigned: |
| switch (size) |
| { |
| case 8: return N_U8; |
| case 16: return N_U16; |
| case 32 |