| /* Subroutines for insn-output.cc for Motorola 68000 family. |
| Copyright (C) 1987-2022 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #define IN_TARGET_CODE 1 |
| |
| #include "config.h" |
| #define INCLUDE_STRING |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "cfghooks.h" |
| #include "tree.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "rtl.h" |
| #include "df.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "calls.h" |
| #include "stor-layout.h" |
| #include "varasm.h" |
| #include "regs.h" |
| #include "insn-config.h" |
| #include "conditions.h" |
| #include "output.h" |
| #include "insn-attr.h" |
| #include "recog.h" |
| #include "diagnostic-core.h" |
| #include "flags.h" |
| #include "expmed.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "memmodel.h" |
| #include "emit-rtl.h" |
| #include "stmt.h" |
| #include "expr.h" |
| #include "reload.h" |
| #include "tm_p.h" |
| #include "target.h" |
| #include "debug.h" |
| #include "cfgrtl.h" |
| #include "cfganal.h" |
| #include "lcm.h" |
| #include "cfgbuild.h" |
| #include "cfgcleanup.h" |
| /* ??? Need to add a dependency between m68k.o and sched-int.h. */ |
| #include "sched-int.h" |
| #include "insn-codes.h" |
| #include "opts.h" |
| #include "optabs.h" |
| #include "builtins.h" |
| #include "rtl-iter.h" |
| #include "toplev.h" |
| |
| /* This file should be included last. */ |
| #include "target-def.h" |
| |
| enum reg_class regno_reg_class[] = |
| { |
| DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, |
| DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, |
| ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, |
| ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| ADDR_REGS |
| }; |
| |
| |
| /* The minimum number of integer registers that we want to save with the |
| movem instruction. Using two movel instructions instead of a single |
| moveml is about 15% faster for the 68020 and 68030 at no expense in |
| code size. */ |
| #define MIN_MOVEM_REGS 3 |
| |
| /* The minimum number of floating point registers that we want to save |
| with the fmovem instruction. */ |
| #define MIN_FMOVEM_REGS 1 |
| |
| /* Structure describing stack frame layout. */ |
| struct m68k_frame |
| { |
| /* Stack pointer to frame pointer offset. */ |
| HOST_WIDE_INT offset; |
| |
| /* Offset of FPU registers. */ |
| HOST_WIDE_INT foffset; |
| |
| /* Frame size in bytes (rounded up). */ |
| HOST_WIDE_INT size; |
| |
| /* Data and address register. */ |
| int reg_no; |
| unsigned int reg_mask; |
| |
| /* FPU registers. */ |
| int fpu_no; |
| unsigned int fpu_mask; |
| |
| /* Offsets relative to ARG_POINTER. */ |
| HOST_WIDE_INT frame_pointer_offset; |
| HOST_WIDE_INT stack_pointer_offset; |
| |
| /* Function which the above information refers to. */ |
| int funcdef_no; |
| }; |
| |
| /* Current frame information calculated by m68k_compute_frame_layout(). */ |
| static struct m68k_frame current_frame; |
| |
| /* Structure describing an m68k address. |
| |
| If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET, |
| with null fields evaluating to 0. Here: |
| |
| - BASE satisfies m68k_legitimate_base_reg_p |
| - INDEX satisfies m68k_legitimate_index_reg_p |
| - OFFSET satisfies m68k_legitimate_constant_address_p |
| |
| INDEX is either HImode or SImode. The other fields are SImode. |
| |
| If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC, |
| the address is (BASE)+. */ |
| struct m68k_address { |
| enum rtx_code code; |
| rtx base; |
| rtx index; |
| rtx offset; |
| int scale; |
| }; |
| |
| static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int, |
| unsigned int); |
| static int m68k_sched_issue_rate (void); |
| static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int); |
| static void m68k_sched_md_init_global (FILE *, int, int); |
| static void m68k_sched_md_finish_global (FILE *, int); |
| static void m68k_sched_md_init (FILE *, int, int); |
| static void m68k_sched_dfa_pre_advance_cycle (void); |
| static void m68k_sched_dfa_post_advance_cycle (void); |
| static int m68k_sched_first_cycle_multipass_dfa_lookahead (void); |
| |
| static bool m68k_can_eliminate (const int, const int); |
| static void m68k_conditional_register_usage (void); |
| static bool m68k_legitimate_address_p (machine_mode, rtx, bool); |
| static void m68k_option_override (void); |
| static void m68k_override_options_after_change (void); |
| static rtx find_addr_reg (rtx); |
| static const char *singlemove_string (rtx *); |
| static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, |
| HOST_WIDE_INT, tree); |
| static rtx m68k_struct_value_rtx (tree, int); |
| static tree m68k_handle_fndecl_attribute (tree *node, tree name, |
| tree args, int flags, |
| bool *no_add_attrs); |
| static void m68k_compute_frame_layout (void); |
| static bool m68k_save_reg (unsigned int regno, bool interrupt_handler); |
| static bool m68k_ok_for_sibcall_p (tree, tree); |
| static bool m68k_tls_symbol_p (rtx); |
| static rtx m68k_legitimize_address (rtx, rtx, machine_mode); |
| static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool); |
| #if M68K_HONOR_TARGET_STRICT_ALIGNMENT |
| static bool m68k_return_in_memory (const_tree, const_tree); |
| #endif |
| static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; |
| static void m68k_trampoline_init (rtx, tree, rtx); |
| static poly_int64 m68k_return_pops_args (tree, tree, poly_int64); |
| static rtx m68k_delegitimize_address (rtx); |
| static void m68k_function_arg_advance (cumulative_args_t, |
| const function_arg_info &); |
| static rtx m68k_function_arg (cumulative_args_t, const function_arg_info &); |
| static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x); |
| static bool m68k_output_addr_const_extra (FILE *, rtx); |
| static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED; |
| static enum flt_eval_method |
| m68k_excess_precision (enum excess_precision_type); |
| static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode); |
| static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode); |
| static bool m68k_modes_tieable_p (machine_mode, machine_mode); |
| static machine_mode m68k_promote_function_mode (const_tree, machine_mode, |
| int *, const_tree, int); |
| static void m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int); |
| |
| /* Initialize the GCC target structure. */ |
| |
| #if INT_OP_GROUP == INT_OP_DOT_WORD |
| #undef TARGET_ASM_ALIGNED_HI_OP |
| #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" |
| #endif |
| |
| #if INT_OP_GROUP == INT_OP_NO_DOT |
| #undef TARGET_ASM_BYTE_OP |
| #define TARGET_ASM_BYTE_OP "\tbyte\t" |
| #undef TARGET_ASM_ALIGNED_HI_OP |
| #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t" |
| #undef TARGET_ASM_ALIGNED_SI_OP |
| #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t" |
| #endif |
| |
| #if INT_OP_GROUP == INT_OP_DC |
| #undef TARGET_ASM_BYTE_OP |
| #define TARGET_ASM_BYTE_OP "\tdc.b\t" |
| #undef TARGET_ASM_ALIGNED_HI_OP |
| #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t" |
| #undef TARGET_ASM_ALIGNED_SI_OP |
| #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t" |
| #endif |
| |
| #undef TARGET_ASM_UNALIGNED_HI_OP |
| #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP |
| #undef TARGET_ASM_UNALIGNED_SI_OP |
| #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP |
| |
| #undef TARGET_ASM_OUTPUT_MI_THUNK |
| #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk |
| #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
| #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true |
| |
| #undef TARGET_ASM_FILE_START_APP_OFF |
| #define TARGET_ASM_FILE_START_APP_OFF true |
| |
| #undef TARGET_LEGITIMIZE_ADDRESS |
| #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address |
| |
| #undef TARGET_SCHED_ADJUST_COST |
| #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost |
| |
| #undef TARGET_SCHED_ISSUE_RATE |
| #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate |
| |
| #undef TARGET_SCHED_VARIABLE_ISSUE |
| #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue |
| |
| #undef TARGET_SCHED_INIT_GLOBAL |
| #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global |
| |
| #undef TARGET_SCHED_FINISH_GLOBAL |
| #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global |
| |
| #undef TARGET_SCHED_INIT |
| #define TARGET_SCHED_INIT m68k_sched_md_init |
| |
| #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE |
| #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle |
| |
| #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE |
| #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle |
| |
| #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD |
| #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ |
| m68k_sched_first_cycle_multipass_dfa_lookahead |
| |
| #undef TARGET_OPTION_OVERRIDE |
| #define TARGET_OPTION_OVERRIDE m68k_option_override |
| |
| #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE |
| #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change |
| |
| #undef TARGET_RTX_COSTS |
| #define TARGET_RTX_COSTS m68k_rtx_costs |
| |
| #undef TARGET_ATTRIBUTE_TABLE |
| #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table |
| |
| #undef TARGET_PROMOTE_PROTOTYPES |
| #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true |
| |
| #undef TARGET_STRUCT_VALUE_RTX |
| #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx |
| |
| #undef TARGET_CANNOT_FORCE_CONST_MEM |
| #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem |
| |
| #undef TARGET_FUNCTION_OK_FOR_SIBCALL |
| #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p |
| |
| #if M68K_HONOR_TARGET_STRICT_ALIGNMENT |
| #undef TARGET_RETURN_IN_MEMORY |
| #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory |
| #endif |
| |
| #ifdef HAVE_AS_TLS |
| #undef TARGET_HAVE_TLS |
| #define TARGET_HAVE_TLS (true) |
| |
| #undef TARGET_ASM_OUTPUT_DWARF_DTPREL |
| #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel |
| #endif |
| |
| #undef TARGET_LRA_P |
| #define TARGET_LRA_P hook_bool_void_false |
| |
| #undef TARGET_LEGITIMATE_ADDRESS_P |
| #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p |
| |
| #undef TARGET_CAN_ELIMINATE |
| #define TARGET_CAN_ELIMINATE m68k_can_eliminate |
| |
| #undef TARGET_CONDITIONAL_REGISTER_USAGE |
| #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage |
| |
| #undef TARGET_TRAMPOLINE_INIT |
| #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init |
| |
| #undef TARGET_RETURN_POPS_ARGS |
| #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args |
| |
| #undef TARGET_DELEGITIMIZE_ADDRESS |
| #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address |
| |
| #undef TARGET_FUNCTION_ARG |
| #define TARGET_FUNCTION_ARG m68k_function_arg |
| |
| #undef TARGET_FUNCTION_ARG_ADVANCE |
| #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance |
| |
| #undef TARGET_LEGITIMATE_CONSTANT_P |
| #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p |
| |
| #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA |
| #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra |
| |
| #undef TARGET_C_EXCESS_PRECISION |
| #define TARGET_C_EXCESS_PRECISION m68k_excess_precision |
| |
| /* The value stored by TAS. */ |
| #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL |
| #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128 |
| |
| #undef TARGET_HARD_REGNO_NREGS |
| #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs |
| #undef TARGET_HARD_REGNO_MODE_OK |
| #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok |
| |
| #undef TARGET_MODES_TIEABLE_P |
| #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p |
| |
| #undef TARGET_PROMOTE_FUNCTION_MODE |
| #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode |
| |
| #undef TARGET_HAVE_SPECULATION_SAFE_VALUE |
| #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed |
| |
| #undef TARGET_ASM_FINAL_POSTSCAN_INSN |
| #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn |
| |
| static const struct attribute_spec m68k_attribute_table[] = |
| { |
| /* { name, min_len, max_len, decl_req, type_req, fn_type_req, |
| affects_type_identity, handler, exclude } */ |
| { "interrupt", 0, 0, true, false, false, false, |
| m68k_handle_fndecl_attribute, NULL }, |
| { "interrupt_handler", 0, 0, true, false, false, false, |
| m68k_handle_fndecl_attribute, NULL }, |
| { "interrupt_thread", 0, 0, true, false, false, false, |
| m68k_handle_fndecl_attribute, NULL }, |
| { NULL, 0, 0, false, false, false, false, NULL, NULL } |
| }; |
| |
| struct gcc_target targetm = TARGET_INITIALIZER; |
| |
| /* Base flags for 68k ISAs. */ |
| #define FL_FOR_isa_00 FL_ISA_68000 |
| #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010) |
| /* FL_68881 controls the default setting of -m68881. gcc has traditionally |
| generated 68881 code for 68020 and 68030 targets unless explicitly told |
| not to. */ |
| #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \ |
| | FL_BITFIELD | FL_68881 | FL_CAS) |
| #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040) |
| #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020) |
| |
| /* Base flags for ColdFire ISAs. */ |
| #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A) |
| #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP) |
| /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */ |
| #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV) |
| /* ISA_C is not upwardly compatible with ISA_B. */ |
| #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP) |
| |
| enum m68k_isa |
| { |
| /* Traditional 68000 instruction sets. */ |
| isa_00, |
| isa_10, |
| isa_20, |
| isa_40, |
| isa_cpu32, |
| /* ColdFire instruction set variants. */ |
| isa_a, |
| isa_aplus, |
| isa_b, |
| isa_c, |
| isa_max |
| }; |
| |
| /* Information about one of the -march, -mcpu or -mtune arguments. */ |
| struct m68k_target_selection |
| { |
| /* The argument being described. */ |
| const char *name; |
| |
| /* For -mcpu, this is the device selected by the option. |
| For -mtune and -march, it is a representative device |
| for the microarchitecture or ISA respectively. */ |
| enum target_device device; |
| |
| /* The M68K_DEVICE fields associated with DEVICE. See the comment |
| in m68k-devices.def for details. FAMILY is only valid for -mcpu. */ |
| const char *family; |
| enum uarch_type microarch; |
| enum m68k_isa isa; |
| unsigned long flags; |
| }; |
| |
| /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */ |
| static const struct m68k_target_selection all_devices[] = |
| { |
| #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \ |
| { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA }, |
| #include "m68k-devices.def" |
| #undef M68K_DEVICE |
| { NULL, unk_device, NULL, unk_arch, isa_max, 0 } |
| }; |
| |
| /* A list of all ISAs, mapping each one to a representative device. |
| Used for -march selection. */ |
| static const struct m68k_target_selection all_isas[] = |
| { |
| #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ |
| { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, |
| #include "m68k-isas.def" |
| #undef M68K_ISA |
| { NULL, unk_device, NULL, unk_arch, isa_max, 0 } |
| }; |
| |
| /* A list of all microarchitectures, mapping each one to a representative |
| device. Used for -mtune selection. */ |
| static const struct m68k_target_selection all_microarchs[] = |
| { |
| #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ |
| { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, |
| #include "m68k-microarchs.def" |
| #undef M68K_MICROARCH |
| { NULL, unk_device, NULL, unk_arch, isa_max, 0 } |
| }; |
| |
| /* The entries associated with the -mcpu, -march and -mtune settings, |
| or null for options that have not been used. */ |
| const struct m68k_target_selection *m68k_cpu_entry; |
| const struct m68k_target_selection *m68k_arch_entry; |
| const struct m68k_target_selection *m68k_tune_entry; |
| |
| /* Which CPU we are generating code for. */ |
| enum target_device m68k_cpu; |
| |
| /* Which microarchitecture to tune for. */ |
| enum uarch_type m68k_tune; |
| |
| /* Which FPU to use. */ |
| enum fpu_type m68k_fpu; |
| |
| /* The set of FL_* flags that apply to the target processor. */ |
| unsigned int m68k_cpu_flags; |
| |
| /* The set of FL_* flags that apply to the processor to be tuned for. */ |
| unsigned int m68k_tune_flags; |
| |
| /* Asm templates for calling or jumping to an arbitrary symbolic address, |
| or NULL if such calls or jumps are not supported. The address is held |
| in operand 0. */ |
| const char *m68k_symbolic_call; |
| const char *m68k_symbolic_jump; |
| |
| /* Enum variable that corresponds to m68k_symbolic_call values. */ |
| enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var; |
| |
| |
| /* Implement TARGET_OPTION_OVERRIDE. */ |
| |
| static void |
| m68k_option_override (void) |
| { |
| const struct m68k_target_selection *entry; |
| unsigned long target_mask; |
| |
| if (OPTION_SET_P (m68k_arch_option)) |
| m68k_arch_entry = &all_isas[m68k_arch_option]; |
| |
| if (OPTION_SET_P (m68k_cpu_option)) |
| m68k_cpu_entry = &all_devices[(int) m68k_cpu_option]; |
| |
| if (OPTION_SET_P (m68k_tune_option)) |
| m68k_tune_entry = &all_microarchs[(int) m68k_tune_option]; |
| |
| /* User can choose: |
| |
| -mcpu= |
| -march= |
| -mtune= |
| |
| -march=ARCH should generate code that runs any processor |
| implementing architecture ARCH. -mcpu=CPU should override -march |
| and should generate code that runs on processor CPU, making free |
| use of any instructions that CPU understands. -mtune=UARCH applies |
| on top of -mcpu or -march and optimizes the code for UARCH. It does |
| not change the target architecture. */ |
| if (m68k_cpu_entry) |
| { |
| /* Complain if the -march setting is for a different microarchitecture, |
| or includes flags that the -mcpu setting doesn't. */ |
| if (m68k_arch_entry |
| && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch |
| || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0)) |
| warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>", |
| m68k_cpu_entry->name, m68k_arch_entry->name); |
| |
| entry = m68k_cpu_entry; |
| } |
| else |
| entry = m68k_arch_entry; |
| |
| if (!entry) |
| entry = all_devices + TARGET_CPU_DEFAULT; |
| |
| m68k_cpu_flags = entry->flags; |
| |
| /* Use the architecture setting to derive default values for |
| certain flags. */ |
| target_mask = 0; |
| |
| /* ColdFire is lenient about alignment. */ |
| if (!TARGET_COLDFIRE) |
| target_mask |= MASK_STRICT_ALIGNMENT; |
| |
| if ((m68k_cpu_flags & FL_BITFIELD) != 0) |
| target_mask |= MASK_BITFIELD; |
| if ((m68k_cpu_flags & FL_CF_HWDIV) != 0) |
| target_mask |= MASK_CF_HWDIV; |
| if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0) |
| target_mask |= MASK_HARD_FLOAT; |
| target_flags |= target_mask & ~target_flags_explicit; |
| |
| /* Set the directly-usable versions of the -mcpu and -mtune settings. */ |
| m68k_cpu = entry->device; |
| if (m68k_tune_entry) |
| { |
| m68k_tune = m68k_tune_entry->microarch; |
| m68k_tune_flags = m68k_tune_entry->flags; |
| } |
| #ifdef M68K_DEFAULT_TUNE |
| else if (!m68k_cpu_entry && !m68k_arch_entry) |
| { |
| enum target_device dev; |
| dev = all_microarchs[M68K_DEFAULT_TUNE].device; |
| m68k_tune_flags = all_devices[dev].flags; |
| } |
| #endif |
| else |
| { |
| m68k_tune = entry->microarch; |
| m68k_tune_flags = entry->flags; |
| } |
| |
| /* Set the type of FPU. */ |
| m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE |
| : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE |
| : FPUTYPE_68881); |
| |
| /* Sanity check to ensure that msep-data and mid-sahred-library are not |
| * both specified together. Doing so simply doesn't make sense. |
| */ |
| if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY) |
| error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>"); |
| |
| /* If we're generating code for a separate A5 relative data segment, |
| * we've got to enable -fPIC as well. This might be relaxable to |
| * -fpic but it hasn't been tested properly. |
| */ |
| if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY) |
| flag_pic = 2; |
| |
| /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an |
| error if the target does not support them. */ |
| if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2) |
| error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu"); |
| |
| /* ??? A historic way of turning on pic, or is this intended to |
| be an embedded thing that doesn't have the same name binding |
| significance that it does on hosted ELF systems? */ |
| if (TARGET_PCREL && flag_pic == 0) |
| flag_pic = 1; |
| |
| if (!flag_pic) |
| { |
| m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR; |
| |
| m68k_symbolic_jump = "jra %a0"; |
| } |
| else if (TARGET_ID_SHARED_LIBRARY) |
| /* All addresses must be loaded from the GOT. */ |
| ; |
| else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC) |
| { |
| if (TARGET_PCREL) |
| m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C; |
| else |
| m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P; |
| |
| if (TARGET_ISAC) |
| /* No unconditional long branch */; |
| else if (TARGET_PCREL) |
| m68k_symbolic_jump = "bra%.l %c0"; |
| else |
| m68k_symbolic_jump = "bra%.l %p0"; |
| /* Turn off function cse if we are doing PIC. We always want |
| function call to be done as `bsr foo@PLTPC'. */ |
| /* ??? It's traditional to do this for -mpcrel too, but it isn't |
| clear how intentional that is. */ |
| flag_no_function_cse = 1; |
| } |
| |
| switch (m68k_symbolic_call_var) |
| { |
| case M68K_SYMBOLIC_CALL_JSR: |
| m68k_symbolic_call = "jsr %a0"; |
| break; |
| |
| case M68K_SYMBOLIC_CALL_BSR_C: |
| m68k_symbolic_call = "bsr%.l %c0"; |
| break; |
| |
| case M68K_SYMBOLIC_CALL_BSR_P: |
| m68k_symbolic_call = "bsr%.l %p0"; |
| break; |
| |
| case M68K_SYMBOLIC_CALL_NONE: |
| gcc_assert (m68k_symbolic_call == NULL); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| #ifndef ASM_OUTPUT_ALIGN_WITH_NOP |
| parse_alignment_opts (); |
| int label_alignment = align_labels.levels[0].get_value (); |
| if (label_alignment > 2) |
| { |
| warning (0, "%<-falign-labels=%d%> is not supported", label_alignment); |
| str_align_labels = "1"; |
| } |
| |
| int loop_alignment = align_loops.levels[0].get_value (); |
| if (loop_alignment > 2) |
| { |
| warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment); |
| str_align_loops = "1"; |
| } |
| #endif |
| |
| if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0) |
| && !TARGET_68020) |
| { |
| warning (0, "%<-fstack-limit-%> options are not supported on this cpu"); |
| opt_fstack_limit_symbol_arg = NULL; |
| opt_fstack_limit_register_no = -1; |
| } |
| |
| SUBTARGET_OVERRIDE_OPTIONS; |
| |
| /* Setup scheduling options. */ |
| if (TUNE_CFV1) |
| m68k_sched_cpu = CPU_CFV1; |
| else if (TUNE_CFV2) |
| m68k_sched_cpu = CPU_CFV2; |
| else if (TUNE_CFV3) |
| m68k_sched_cpu = CPU_CFV3; |
| else if (TUNE_CFV4) |
| m68k_sched_cpu = CPU_CFV4; |
| else |
| { |
| m68k_sched_cpu = CPU_UNKNOWN; |
| flag_schedule_insns = 0; |
| flag_schedule_insns_after_reload = 0; |
| flag_modulo_sched = 0; |
| flag_live_range_shrinkage = 0; |
| } |
| |
| if (m68k_sched_cpu != CPU_UNKNOWN) |
| { |
| if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0) |
| m68k_sched_mac = MAC_CF_EMAC; |
| else if ((m68k_cpu_flags & FL_CF_MAC) != 0) |
| m68k_sched_mac = MAC_CF_MAC; |
| else |
| m68k_sched_mac = MAC_NO; |
| } |
| } |
| |
| /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */ |
| |
| static void |
| m68k_override_options_after_change (void) |
| { |
| if (m68k_sched_cpu == CPU_UNKNOWN) |
| { |
| flag_schedule_insns = 0; |
| flag_schedule_insns_after_reload = 0; |
| flag_modulo_sched = 0; |
| flag_live_range_shrinkage = 0; |
| } |
| } |
| |
| /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the |
| given argument and NAME is the argument passed to -mcpu. Return NULL |
| if -mcpu was not passed. */ |
| |
| const char * |
| m68k_cpp_cpu_ident (const char *prefix) |
| { |
| if (!m68k_cpu_entry) |
| return NULL; |
| return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL); |
| } |
| |
| /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the |
| given argument and NAME is the name of the representative device for |
| the -mcpu argument's family. Return NULL if -mcpu was not passed. */ |
| |
| const char * |
| m68k_cpp_cpu_family (const char *prefix) |
| { |
| if (!m68k_cpu_entry) |
| return NULL; |
| return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL); |
| } |
| |
| /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or |
| "interrupt_handler" attribute and interrupt_thread if FUNC has an |
| "interrupt_thread" attribute. Otherwise, return |
| m68k_fk_normal_function. */ |
| |
| enum m68k_function_kind |
| m68k_get_function_kind (tree func) |
| { |
| tree a; |
| |
| gcc_assert (TREE_CODE (func) == FUNCTION_DECL); |
| |
| a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func)); |
| if (a != NULL_TREE) |
| return m68k_fk_interrupt_handler; |
| |
| a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func)); |
| if (a != NULL_TREE) |
| return m68k_fk_interrupt_handler; |
| |
| a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func)); |
| if (a != NULL_TREE) |
| return m68k_fk_interrupt_thread; |
| |
| return m68k_fk_normal_function; |
| } |
| |
| /* Handle an attribute requiring a FUNCTION_DECL; arguments as in |
| struct attribute_spec.handler. */ |
| static tree |
| m68k_handle_fndecl_attribute (tree *node, tree name, |
| tree args ATTRIBUTE_UNUSED, |
| int flags ATTRIBUTE_UNUSED, |
| bool *no_add_attrs) |
| { |
| if (TREE_CODE (*node) != FUNCTION_DECL) |
| { |
| warning (OPT_Wattributes, "%qE attribute only applies to functions", |
| name); |
| *no_add_attrs = true; |
| } |
| |
| if (m68k_get_function_kind (*node) != m68k_fk_normal_function) |
| { |
| error ("multiple interrupt attributes not allowed"); |
| *no_add_attrs = true; |
| } |
| |
| if (!TARGET_FIDOA |
| && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread")) |
| { |
| error ("%<interrupt_thread%> is available only on fido"); |
| *no_add_attrs = true; |
| } |
| |
| return NULL_TREE; |
| } |
| |
| static void |
| m68k_compute_frame_layout (void) |
| { |
| int regno, saved; |
| unsigned int mask; |
| enum m68k_function_kind func_kind = |
| m68k_get_function_kind (current_function_decl); |
| bool interrupt_handler = func_kind == m68k_fk_interrupt_handler; |
| bool interrupt_thread = func_kind == m68k_fk_interrupt_thread; |
| |
| /* Only compute the frame once per function. |
| Don't cache information until reload has been completed. */ |
| if (current_frame.funcdef_no == current_function_funcdef_no |
| && reload_completed) |
| return; |
| |
| current_frame.size = (get_frame_size () + 3) & -4; |
| |
| mask = saved = 0; |
| |
| /* Interrupt thread does not need to save any register. */ |
| if (!interrupt_thread) |
| for (regno = 0; regno < 16; regno++) |
| if (m68k_save_reg (regno, interrupt_handler)) |
| { |
| mask |= 1 << (regno - D0_REG); |
| saved++; |
| } |
| current_frame.offset = saved * 4; |
| current_frame.reg_no = saved; |
| current_frame.reg_mask = mask; |
| |
| current_frame.foffset = 0; |
| mask = saved = 0; |
| if (TARGET_HARD_FLOAT) |
| { |
| /* Interrupt thread does not need to save any register. */ |
| if (!interrupt_thread) |
| for (regno = 16; regno < 24; regno++) |
| if (m68k_save_reg (regno, interrupt_handler)) |
| { |
| mask |= 1 << (regno - FP0_REG); |
| saved++; |
| } |
| current_frame.foffset = saved * TARGET_FP_REG_SIZE; |
| current_frame.offset += current_frame.foffset; |
| } |
| current_frame.fpu_no = saved; |
| current_frame.fpu_mask = mask; |
| |
| /* Remember what function this frame refers to. */ |
| current_frame.funcdef_no = current_function_funcdef_no; |
| } |
| |
| /* Worker function for TARGET_CAN_ELIMINATE. */ |
| |
| bool |
| m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) |
| { |
| return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true); |
| } |
| |
| HOST_WIDE_INT |
| m68k_initial_elimination_offset (int from, int to) |
| { |
| int argptr_offset; |
| /* The arg pointer points 8 bytes before the start of the arguments, |
| as defined by FIRST_PARM_OFFSET. This makes it coincident with the |
| frame pointer in most frames. */ |
| argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD; |
| if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) |
| return argptr_offset; |
| |
| m68k_compute_frame_layout (); |
| |
| gcc_assert (to == STACK_POINTER_REGNUM); |
| switch (from) |
| { |
| case ARG_POINTER_REGNUM: |
| return current_frame.offset + current_frame.size - argptr_offset; |
| case FRAME_POINTER_REGNUM: |
| return current_frame.offset + current_frame.size; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Refer to the array `regs_ever_live' to determine which registers |
| to save; `regs_ever_live[I]' is nonzero if register number I |
| is ever used in the function. This function is responsible for |
| knowing which registers should not be saved even if used. |
| Return true if we need to save REGNO. */ |
| |
| static bool |
| m68k_save_reg (unsigned int regno, bool interrupt_handler) |
| { |
| if (flag_pic && regno == PIC_REG) |
| { |
| if (crtl->saves_all_registers) |
| return true; |
| if (crtl->uses_pic_offset_table) |
| return true; |
| /* Reload may introduce constant pool references into a function |
| that thitherto didn't need a PIC register. Note that the test |
| above will not catch that case because we will only set |
| crtl->uses_pic_offset_table when emitting |
| the address reloads. */ |
| if (crtl->uses_const_pool) |
| return true; |
| } |
| |
| if (crtl->calls_eh_return) |
| { |
| unsigned int i; |
| for (i = 0; ; i++) |
| { |
| unsigned int test = EH_RETURN_DATA_REGNO (i); |
| if (test == INVALID_REGNUM) |
| break; |
| if (test == regno) |
| return true; |
| } |
| } |
| |
| /* Fixed regs we never touch. */ |
| if (fixed_regs[regno]) |
| return false; |
| |
| /* The frame pointer (if it is such) is handled specially. */ |
| if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) |
| return false; |
| |
| /* Interrupt handlers must also save call_used_regs |
| if they are live or when calling nested functions. */ |
| if (interrupt_handler) |
| { |
| if (df_regs_ever_live_p (regno)) |
| return true; |
| |
| if (!crtl->is_leaf && call_used_or_fixed_reg_p (regno)) |
| return true; |
| } |
| |
| /* Never need to save registers that aren't touched. */ |
| if (!df_regs_ever_live_p (regno)) |
| return false; |
| |
| /* Otherwise save everything that isn't call-clobbered. */ |
| return !call_used_or_fixed_reg_p (regno); |
| } |
| |
| /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents |
| the lowest memory address. COUNT is the number of registers to be |
| moved, with register REGNO + I being moved if bit I of MASK is set. |
| STORE_P specifies the direction of the move and ADJUST_STACK_P says |
| whether or not this is pre-decrement (if STORE_P) or post-increment |
| (if !STORE_P) operation. */ |
| |
| static rtx_insn * |
| m68k_emit_movem (rtx base, HOST_WIDE_INT offset, |
| unsigned int count, unsigned int regno, |
| unsigned int mask, bool store_p, bool adjust_stack_p) |
| { |
| int i; |
| rtx body, addr, src, operands[2]; |
| machine_mode mode; |
| |
| body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count)); |
| mode = reg_raw_mode[regno]; |
| i = 0; |
| |
| if (adjust_stack_p) |
| { |
| src = plus_constant (Pmode, base, |
| (count |
| * GET_MODE_SIZE (mode) |
| * (HOST_WIDE_INT) (store_p ? -1 : 1))); |
| XVECEXP (body, 0, i++) = gen_rtx_SET (base, src); |
| } |
| |
| for (; mask != 0; mask >>= 1, regno++) |
| if (mask & 1) |
| { |
| addr = plus_constant (Pmode, base, offset); |
| operands[!store_p] = gen_frame_mem (mode, addr); |
| operands[store_p] = gen_rtx_REG (mode, regno); |
| XVECEXP (body, 0, i++) |
| = gen_rtx_SET (operands[0], operands[1]); |
| offset += GET_MODE_SIZE (mode); |
| } |
| gcc_assert (i == XVECLEN (body, 0)); |
| |
| return emit_insn (body); |
| } |
| |
| /* Make INSN a frame-related instruction. */ |
| |
| static void |
| m68k_set_frame_related (rtx_insn *insn) |
| { |
| rtx body; |
| int i; |
| |
| RTX_FRAME_RELATED_P (insn) = 1; |
| body = PATTERN (insn); |
| if (GET_CODE (body) == PARALLEL) |
| for (i = 0; i < XVECLEN (body, 0); i++) |
| RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1; |
| } |
| |
| /* Emit RTL for the "prologue" define_expand. */ |
| |
| void |
| m68k_expand_prologue (void) |
| { |
| HOST_WIDE_INT fsize_with_regs; |
| rtx limit, src, dest; |
| |
| m68k_compute_frame_layout (); |
| |
| if (flag_stack_usage_info) |
| current_function_static_stack_size |
| = current_frame.size + current_frame.offset; |
| |
| /* If the stack limit is a symbol, we can check it here, |
| before actually allocating the space. */ |
| if (crtl->limit_stack |
| && GET_CODE (stack_limit_rtx) == SYMBOL_REF) |
| { |
| limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4); |
| if (!m68k_legitimate_constant_p (Pmode, limit)) |
| { |
| emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit); |
| limit = gen_rtx_REG (Pmode, D0_REG); |
| } |
| emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, |
| stack_pointer_rtx, limit), |
| stack_pointer_rtx, limit, |
| const1_rtx)); |
| } |
| |
| fsize_with_regs = current_frame.size; |
| if (TARGET_COLDFIRE) |
| { |
| /* ColdFire's move multiple instructions do not allow pre-decrement |
| addressing. Add the size of movem saves to the initial stack |
| allocation instead. */ |
| if (current_frame.reg_no >= MIN_MOVEM_REGS) |
| fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); |
| if (current_frame.fpu_no >= MIN_FMOVEM_REGS) |
| fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); |
| } |
| |
| if (frame_pointer_needed) |
| { |
| if (fsize_with_regs == 0 && TUNE_68040) |
| { |
| /* On the 68040, two separate moves are faster than link.w 0. */ |
| dest = gen_frame_mem (Pmode, |
| gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); |
| m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx)); |
| m68k_set_frame_related (emit_move_insn (frame_pointer_rtx, |
| stack_pointer_rtx)); |
| } |
| else if (fsize_with_regs < 0x8000 || TARGET_68020) |
| m68k_set_frame_related |
| (emit_insn (gen_link (frame_pointer_rtx, |
| GEN_INT (-4 - fsize_with_regs)))); |
| else |
| { |
| m68k_set_frame_related |
| (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4)))); |
| m68k_set_frame_related |
| (emit_insn (gen_addsi3 (stack_pointer_rtx, |
| stack_pointer_rtx, |
| GEN_INT (-fsize_with_regs)))); |
| } |
| |
| /* If the frame pointer is needed, emit a special barrier that |
| will prevent the scheduler from moving stores to the frame |
| before the stack adjustment. */ |
| emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx)); |
| } |
| else if (fsize_with_regs != 0) |
| m68k_set_frame_related |
| (emit_insn (gen_addsi3 (stack_pointer_rtx, |
| stack_pointer_rtx, |
| GEN_INT (-fsize_with_regs)))); |
| |
| if (current_frame.fpu_mask) |
| { |
| gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS); |
| if (TARGET_68881) |
| m68k_set_frame_related |
| (m68k_emit_movem (stack_pointer_rtx, |
| current_frame.fpu_no * -GET_MODE_SIZE (XFmode), |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, true, true)); |
| else |
| { |
| int offset; |
| |
| /* If we're using moveml to save the integer registers, |
| the stack pointer will point to the bottom of the moveml |
| save area. Find the stack offset of the first FP register. */ |
| if (current_frame.reg_no < MIN_MOVEM_REGS) |
| offset = 0; |
| else |
| offset = current_frame.reg_no * GET_MODE_SIZE (SImode); |
| m68k_set_frame_related |
| (m68k_emit_movem (stack_pointer_rtx, offset, |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, true, false)); |
| } |
| } |
| |
| /* If the stack limit is not a symbol, check it here. |
| This has the disadvantage that it may be too late... */ |
| if (crtl->limit_stack) |
| { |
| if (REG_P (stack_limit_rtx)) |
| emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx, |
| stack_limit_rtx), |
| stack_pointer_rtx, stack_limit_rtx, |
| const1_rtx)); |
| |
| else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF) |
| warning (0, "stack limit expression is not supported"); |
| } |
| |
| if (current_frame.reg_no < MIN_MOVEM_REGS) |
| { |
| /* Store each register separately in the same order moveml does. */ |
| int i; |
| |
| for (i = 16; i-- > 0; ) |
| if (current_frame.reg_mask & (1 << i)) |
| { |
| src = gen_rtx_REG (SImode, D0_REG + i); |
| dest = gen_frame_mem (SImode, |
| gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); |
| m68k_set_frame_related (emit_insn (gen_movsi (dest, src))); |
| } |
| } |
| else |
| { |
| if (TARGET_COLDFIRE) |
| /* The required register save space has already been allocated. |
| The first register should be stored at (%sp). */ |
| m68k_set_frame_related |
| (m68k_emit_movem (stack_pointer_rtx, 0, |
| current_frame.reg_no, D0_REG, |
| current_frame.reg_mask, true, false)); |
| else |
| m68k_set_frame_related |
| (m68k_emit_movem (stack_pointer_rtx, |
| current_frame.reg_no * -GET_MODE_SIZE (SImode), |
| current_frame.reg_no, D0_REG, |
| current_frame.reg_mask, true, true)); |
| } |
| |
| if (!TARGET_SEP_DATA |
| && crtl->uses_pic_offset_table) |
| emit_insn (gen_load_got (pic_offset_table_rtx)); |
| } |
| |
| /* Return true if a simple (return) instruction is sufficient for this |
| instruction (i.e. if no epilogue is needed). */ |
| |
| bool |
| m68k_use_return_insn (void) |
| { |
| if (!reload_completed || frame_pointer_needed || get_frame_size () != 0) |
| return false; |
| |
| m68k_compute_frame_layout (); |
| return current_frame.offset == 0; |
| } |
| |
| /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand; |
| SIBCALL_P says which. |
| |
| The function epilogue should not depend on the current stack pointer! |
| It should use the frame pointer only, if there is a frame pointer. |
| This is mandatory because of alloca; we also take advantage of it to |
| omit stack adjustments before returning. */ |
| |
| void |
| m68k_expand_epilogue (bool sibcall_p) |
| { |
| HOST_WIDE_INT fsize, fsize_with_regs; |
| bool big, restore_from_sp; |
| |
| m68k_compute_frame_layout (); |
| |
| fsize = current_frame.size; |
| big = false; |
| restore_from_sp = false; |
| |
| /* FIXME : crtl->is_leaf below is too strong. |
| What we really need to know there is if there could be pending |
| stack adjustment needed at that point. */ |
| restore_from_sp = (!frame_pointer_needed |
| || (!cfun->calls_alloca && crtl->is_leaf)); |
| |
| /* fsize_with_regs is the size we need to adjust the sp when |
| popping the frame. */ |
| fsize_with_regs = fsize; |
| if (TARGET_COLDFIRE && restore_from_sp) |
| { |
| /* ColdFire's move multiple instructions do not allow post-increment |
| addressing. Add the size of movem loads to the final deallocation |
| instead. */ |
| if (current_frame.reg_no >= MIN_MOVEM_REGS) |
| fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); |
| if (current_frame.fpu_no >= MIN_FMOVEM_REGS) |
| fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); |
| } |
| |
| if (current_frame.offset + fsize >= 0x8000 |
| && !restore_from_sp |
| && (current_frame.reg_mask || current_frame.fpu_mask)) |
| { |
| if (TARGET_COLDFIRE |
| && (current_frame.reg_no >= MIN_MOVEM_REGS |
| || current_frame.fpu_no >= MIN_FMOVEM_REGS)) |
| { |
| /* ColdFire's move multiple instructions do not support the |
| (d8,Ax,Xi) addressing mode, so we're as well using a normal |
| stack-based restore. */ |
| emit_move_insn (gen_rtx_REG (Pmode, A1_REG), |
| GEN_INT (-(current_frame.offset + fsize))); |
| emit_insn (gen_blockage ()); |
| emit_insn (gen_addsi3 (stack_pointer_rtx, |
| gen_rtx_REG (Pmode, A1_REG), |
| frame_pointer_rtx)); |
| restore_from_sp = true; |
| } |
| else |
| { |
| emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize)); |
| fsize = 0; |
| big = true; |
| } |
| } |
| |
| if (current_frame.reg_no < MIN_MOVEM_REGS) |
| { |
| /* Restore each register separately in the same order moveml does. */ |
| int i; |
| HOST_WIDE_INT offset; |
| |
| offset = current_frame.offset + fsize; |
| for (i = 0; i < 16; i++) |
| if (current_frame.reg_mask & (1 << i)) |
| { |
| rtx addr; |
| |
| if (big) |
| { |
| /* Generate the address -OFFSET(%fp,%a1.l). */ |
| addr = gen_rtx_REG (Pmode, A1_REG); |
| addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx); |
| addr = plus_constant (Pmode, addr, -offset); |
| } |
| else if (restore_from_sp) |
| addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx); |
| else |
| addr = plus_constant (Pmode, frame_pointer_rtx, -offset); |
| emit_move_insn (gen_rtx_REG (SImode, D0_REG + i), |
| gen_frame_mem (SImode, addr)); |
| offset -= GET_MODE_SIZE (SImode); |
| } |
| } |
| else if (current_frame.reg_mask) |
| { |
| if (big) |
| m68k_emit_movem (gen_rtx_PLUS (Pmode, |
| gen_rtx_REG (Pmode, A1_REG), |
| frame_pointer_rtx), |
| -(current_frame.offset + fsize), |
| current_frame.reg_no, D0_REG, |
| current_frame.reg_mask, false, false); |
| else if (restore_from_sp) |
| m68k_emit_movem (stack_pointer_rtx, 0, |
| current_frame.reg_no, D0_REG, |
| current_frame.reg_mask, false, |
| !TARGET_COLDFIRE); |
| else |
| m68k_emit_movem (frame_pointer_rtx, |
| -(current_frame.offset + fsize), |
| current_frame.reg_no, D0_REG, |
| current_frame.reg_mask, false, false); |
| } |
| |
| if (current_frame.fpu_no > 0) |
| { |
| if (big) |
| m68k_emit_movem (gen_rtx_PLUS (Pmode, |
| gen_rtx_REG (Pmode, A1_REG), |
| frame_pointer_rtx), |
| -(current_frame.foffset + fsize), |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, false, false); |
| else if (restore_from_sp) |
| { |
| if (TARGET_COLDFIRE) |
| { |
| int offset; |
| |
| /* If we used moveml to restore the integer registers, the |
| stack pointer will still point to the bottom of the moveml |
| save area. Find the stack offset of the first FP |
| register. */ |
| if (current_frame.reg_no < MIN_MOVEM_REGS) |
| offset = 0; |
| else |
| offset = current_frame.reg_no * GET_MODE_SIZE (SImode); |
| m68k_emit_movem (stack_pointer_rtx, offset, |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, false, false); |
| } |
| else |
| m68k_emit_movem (stack_pointer_rtx, 0, |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, false, true); |
| } |
| else |
| m68k_emit_movem (frame_pointer_rtx, |
| -(current_frame.foffset + fsize), |
| current_frame.fpu_no, FP0_REG, |
| current_frame.fpu_mask, false, false); |
| } |
| |
| emit_insn (gen_blockage ()); |
| if (frame_pointer_needed) |
| emit_insn (gen_unlink (frame_pointer_rtx)); |
| else if (fsize_with_regs) |
| emit_insn (gen_addsi3 (stack_pointer_rtx, |
| stack_pointer_rtx, |
| GEN_INT (fsize_with_regs))); |
| |
| if (crtl->calls_eh_return) |
| emit_insn (gen_addsi3 (stack_pointer_rtx, |
| stack_pointer_rtx, |
| EH_RETURN_STACKADJ_RTX)); |
| |
| if (!sibcall_p) |
| emit_jump_insn (ret_rtx); |
| } |
| |
| /* Return true if PARALLEL contains register REGNO. */ |
| static bool |
| m68k_reg_present_p (const_rtx parallel, unsigned int regno) |
| { |
| int i; |
| |
| if (REG_P (parallel) && REGNO (parallel) == regno) |
| return true; |
| |
| if (GET_CODE (parallel) != PARALLEL) |
| return false; |
| |
| for (i = 0; i < XVECLEN (parallel, 0); ++i) |
| { |
| const_rtx x; |
| |
| x = XEXP (XVECEXP (parallel, 0, i), 0); |
| if (REG_P (x) && REGNO (x) == regno) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */ |
| |
| static bool |
| m68k_ok_for_sibcall_p (tree decl, tree exp) |
| { |
| enum m68k_function_kind kind; |
| |
| /* We cannot use sibcalls for nested functions because we use the |
| static chain register for indirect calls. */ |
| if (CALL_EXPR_STATIC_CHAIN (exp)) |
| return false; |
| |
| if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl)))) |
| { |
| /* Check that the return value locations are the same. For |
| example that we aren't returning a value from the sibling in |
| a D0 register but then need to transfer it to a A0 register. */ |
| rtx cfun_value; |
| rtx call_value; |
| |
| cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)), |
| cfun->decl); |
| call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl); |
| |
| /* Check that the values are equal or that the result the callee |
| function returns is superset of what the current function returns. */ |
| if (!(rtx_equal_p (cfun_value, call_value) |
| || (REG_P (cfun_value) |
| && m68k_reg_present_p (call_value, REGNO (cfun_value))))) |
| return false; |
| } |
| |
| kind = m68k_get_function_kind (current_function_decl); |
| if (kind == m68k_fk_normal_function) |
| /* We can always sibcall from a normal function, because it's |
| undefined if it is calling an interrupt function. */ |
| return true; |
| |
| /* Otherwise we can only sibcall if the function kind is known to be |
| the same. */ |
| if (decl && m68k_get_function_kind (decl) == kind) |
| return true; |
| |
| return false; |
| } |
| |
| /* On the m68k all args are always pushed. */ |
| |
| static rtx |
| m68k_function_arg (cumulative_args_t, const function_arg_info &) |
| { |
| return NULL_RTX; |
| } |
| |
| static void |
| m68k_function_arg_advance (cumulative_args_t cum_v, |
| const function_arg_info &arg) |
| { |
| CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); |
| |
| *cum += (arg.promoted_size_in_bytes () + 3) & ~3; |
| } |
| |
| /* Convert X to a legitimate function call memory reference and return the |
| result. */ |
| |
| rtx |
| m68k_legitimize_call_address (rtx x) |
| { |
| gcc_assert (MEM_P (x)); |
| if (call_operand (XEXP (x, 0), VOIDmode)) |
| return x; |
| return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0))); |
| } |
| |
| /* Likewise for sibling calls. */ |
| |
| rtx |
| m68k_legitimize_sibcall_address (rtx x) |
| { |
| gcc_assert (MEM_P (x)); |
| if (sibcall_operand (XEXP (x, 0), VOIDmode)) |
| return x; |
| |
| emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0)); |
| return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM)); |
| } |
| |
| /* Convert X to a legitimate address and return it if successful. Otherwise |
| return X. |
| |
| For the 68000, we handle X+REG by loading X into a register R and |
| using R+REG. R will go in an address reg and indexing will be used. |
| However, if REG is a broken-out memory address or multiplication, |
| nothing needs to be done because REG can certainly go in an address reg. */ |
| |
| static rtx |
| m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode) |
| { |
| if (m68k_tls_symbol_p (x)) |
| return m68k_legitimize_tls_address (x); |
| |
| if (GET_CODE (x) == PLUS) |
| { |
| int ch = (x) != (oldx); |
| int copied = 0; |
| |
| #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; } |
| |
| if (GET_CODE (XEXP (x, 0)) == MULT) |
| { |
| COPY_ONCE (x); |
| XEXP (x, 0) = force_operand (XEXP (x, 0), 0); |
| } |
| if (GET_CODE (XEXP (x, 1)) == MULT) |
| { |
| COPY_ONCE (x); |
| XEXP (x, 1) = force_operand (XEXP (x, 1), 0); |
| } |
| if (ch) |
| { |
| if (GET_CODE (XEXP (x, 1)) == REG |
| && GET_CODE (XEXP (x, 0)) == REG) |
| { |
| if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| COPY_ONCE (x); |
| x = force_operand (x, 0); |
| } |
| return x; |
| } |
| if (memory_address_p (mode, x)) |
| return x; |
| } |
| if (GET_CODE (XEXP (x, 0)) == REG |
| || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG |
| && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)) |
| { |
| rtx temp = gen_reg_rtx (Pmode); |
| rtx val = force_operand (XEXP (x, 1), 0); |
| emit_move_insn (temp, val); |
| COPY_ONCE (x); |
| XEXP (x, 1) = temp; |
| if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT |
| && GET_CODE (XEXP (x, 0)) == REG) |
| x = force_operand (x, 0); |
| } |
| else if (GET_CODE (XEXP (x, 1)) == REG |
| || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND |
| && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG |
| && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode)) |
| { |
| rtx temp = gen_reg_rtx (Pmode); |
| rtx val = force_operand (XEXP (x, 0), 0); |
| emit_move_insn (temp, val); |
| COPY_ONCE (x); |
| XEXP (x, 0) = temp; |
| if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT |
| && GET_CODE (XEXP (x, 1)) == REG) |
| x = force_operand (x, 0); |
| } |
| } |
| |
| return x; |
| } |
| |
| /* For eliding comparisons, we remember how the flags were set. |
| FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct |
| comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2 |
| are used in more cases, they are a fallback for comparisons against |
| zero after a move or arithmetic insn. |
| FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of |
| these values. */ |
| |
| static rtx flags_compare_op0, flags_compare_op1; |
| static rtx flags_operand1, flags_operand2; |
| static attr_flags_valid flags_valid = FLAGS_VALID_NO; |
| |
| /* Return a code other than UNKNOWN if we can elide a CODE comparison of |
| OP0 with OP1. */ |
| |
| rtx_code |
| m68k_find_flags_value (rtx op0, rtx op1, rtx_code code) |
| { |
| if (flags_compare_op0 != NULL_RTX) |
| { |
| if (rtx_equal_p (op0, flags_compare_op0) |
| && rtx_equal_p (op1, flags_compare_op1)) |
| return code; |
| if (rtx_equal_p (op0, flags_compare_op1) |
| && rtx_equal_p (op1, flags_compare_op0)) |
| return swap_condition (code); |
| return UNKNOWN; |
| } |
| |
| machine_mode mode = GET_MODE (op0); |
| if (op1 != CONST0_RTX (mode)) |
| return UNKNOWN; |
| /* Comparisons against 0 with these two should have been optimized out. */ |
| gcc_assert (code != LTU && code != GEU); |
| if (flags_valid == FLAGS_VALID_NOOV && (code == GT || code == LE)) |
| return UNKNOWN; |
| if (rtx_equal_p (flags_operand1, op0) || rtx_equal_p (flags_operand2, op0)) |
| return (FLOAT_MODE_P (mode) ? code |
| : code == GE ? PLUS : code == LT ? MINUS : code); |
| /* See if we are testing whether the high part of a DImode value is |
| positive or negative and we have the full value as a remembered |
| operand. */ |
| if (code != GE && code != LT) |
| return UNKNOWN; |
| if (mode == SImode |
| && flags_operand1 != NULL_RTX && GET_MODE (flags_operand1) == DImode |
| && REG_P (flags_operand1) && REG_P (op0) |
| && hard_regno_nregs (REGNO (flags_operand1), DImode) == 2 |
| && REGNO (flags_operand1) == REGNO (op0)) |
| return code == GE ? PLUS : MINUS; |
| if (mode == SImode |
| && flags_operand2 != NULL_RTX && GET_MODE (flags_operand2) == DImode |
| && REG_P (flags_operand2) && REG_P (op0) |
| && hard_regno_nregs (REGNO (flags_operand2), DImode) == 2 |
| && REGNO (flags_operand2) == REGNO (op0)) |
| return code == GE ? PLUS : MINUS; |
| return UNKNOWN; |
| } |
| |
| /* Called through CC_STATUS_INIT, which is invoked by final whenever a |
| label is encountered. */ |
| |
| void |
| m68k_init_cc () |
| { |
| flags_compare_op0 = flags_compare_op1 = NULL_RTX; |
| flags_operand1 = flags_operand2 = NULL_RTX; |
| flags_valid = FLAGS_VALID_NO; |
| } |
| |
| /* Update flags for a move operation with OPERANDS. Called for move |
| operations where attr_flags_valid returns "set". */ |
| |
| static void |
| handle_flags_for_move (rtx *operands) |
| { |
| flags_compare_op0 = flags_compare_op1 = NULL_RTX; |
| if (!ADDRESS_REG_P (operands[0])) |
| { |
| flags_valid = FLAGS_VALID_MOVE; |
| flags_operand1 = side_effects_p (operands[0]) ? NULL_RTX : operands[0]; |
| if (side_effects_p (operands[1]) |
| /* ??? For mem->mem moves, this can discard the source as a |
| valid compare operand. If you assume aligned moves, this |
| is unnecessary, but in theory, we could have an unaligned |
| move overwriting parts of its source. */ |
| || modified_in_p (operands[1], current_output_insn)) |
| flags_operand2 = NULL_RTX; |
| else |
| flags_operand2 = operands[1]; |
| return; |
| } |
| if (flags_operand1 != NULL_RTX |
| && modified_in_p (flags_operand1, current_output_insn)) |
| flags_operand1 = NULL_RTX; |
| if (flags_operand2 != NULL_RTX |
| && modified_in_p (flags_operand2, current_output_insn)) |
| flags_operand2 = NULL_RTX; |
| } |
| |
| /* Process INSN to remember flag operands if possible. */ |
| |
| static void |
| m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int) |
| { |
| enum attr_flags_valid v = get_attr_flags_valid (insn); |
| if (v == FLAGS_VALID_SET) |
| return; |
| /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these |
| now. */ |
| flags_compare_op0 = flags_compare_op1 = NULL_RTX; |
| |
| if (v == FLAGS_VALID_NO) |
| { |
| flags_operand1 = flags_operand2 = NULL_RTX; |
| return; |
| } |
| else if (v == FLAGS_VALID_UNCHANGED) |
| { |
| if (flags_operand1 != NULL_RTX && modified_in_p (flags_operand1, insn)) |
| flags_operand1 = NULL_RTX; |
| if (flags_operand2 != NULL_RTX && modified_in_p (flags_operand2, insn)) |
| flags_operand2 = NULL_RTX; |
| return; |
| } |
| |
| flags_valid = v; |
| rtx set = single_set (insn); |
| rtx dest = SET_DEST (set); |
| rtx src = SET_SRC (set); |
| if (side_effects_p (dest)) |
| dest = NULL_RTX; |
| |
| switch (v) |
| { |
| case FLAGS_VALID_YES: |
| case FLAGS_VALID_NOOV: |
| flags_operand1 = dest; |
| flags_operand2 = NULL_RTX; |
| break; |
| case FLAGS_VALID_MOVE: |
| /* fmoves to memory or data registers do not set the condition |
| codes. Normal moves _do_ set the condition codes, but not in |
| a way that is appropriate for comparison with 0, because -0.0 |
| would be treated as a negative nonzero number. Note that it |
| isn't appropriate to conditionalize this restriction on |
| HONOR_SIGNED_ZEROS because that macro merely indicates whether |
| we care about the difference between -0.0 and +0.0. */ |
| if (dest != NULL_RTX |
| && !FP_REG_P (dest) |
| && (FP_REG_P (src) |
| || GET_CODE (src) == FIX |
| || FLOAT_MODE_P (GET_MODE (dest)))) |
| flags_operand1 = flags_operand2 = NULL_RTX; |
| else |
| { |
| flags_operand1 = dest; |
| if (GET_MODE (src) != VOIDmode && !side_effects_p (src) |
| && !modified_in_p (src, insn)) |
| flags_operand2 = src; |
| else |
| flags_operand2 = NULL_RTX; |
| } |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| return; |
| } |
| |
| /* Output a dbCC; jCC sequence. Note we do not handle the |
| floating point version of this sequence (Fdbcc). |
| OPERANDS are as in the two peepholes. CODE is the code |
| returned by m68k_output_branch_<mode>. */ |
| |
| void |
| output_dbcc_and_branch (rtx *operands, rtx_code code) |
| { |
| switch (code) |
| { |
| case EQ: |
| output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands); |
| break; |
| |
| case NE: |
| output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands); |
| break; |
| |
| case GT: |
| output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands); |
| break; |
| |
| case GTU: |
| output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands); |
| break; |
| |
| case LT: |
| output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands); |
| break; |
| |
| case LTU: |
| output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands); |
| break; |
| |
| case GE: |
| output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands); |
| break; |
| |
| case GEU: |
| output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands); |
| break; |
| |
| case LE: |
| output_asm_insn ("dble %0,%l1\n\tjle %l2", operands); |
| break; |
| |
| case LEU: |
| output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands); |
| break; |
| |
| case PLUS: |
| output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands); |
| break; |
| |
| case MINUS: |
| output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* If the decrement is to be done in SImode, then we have |
| to compensate for the fact that dbcc decrements in HImode. */ |
| switch (GET_MODE (operands[0])) |
| { |
| case E_SImode: |
| output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands); |
| break; |
| |
| case E_HImode: |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| const char * |
| output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest) |
| { |
| rtx loperands[7]; |
| enum rtx_code op_code = GET_CODE (op); |
| |
| /* This does not produce a useful cc. */ |
| CC_STATUS_INIT; |
| |
| /* The m68k cmp.l instruction requires operand1 to be a reg as used |
| below. Swap the operands and change the op if these requirements |
| are not fulfilled. */ |
| if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG) |
| { |
| rtx tmp = operand1; |
| |
| operand1 = operand2; |
| operand2 = tmp; |
| op_code = swap_condition (op_code); |
| } |
| loperands[0] = operand1; |
| if (GET_CODE (operand1) == REG) |
| loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1); |
| else |
| loperands[1] = adjust_address (operand1, SImode, 4); |
| if (operand2 != const0_rtx) |
| { |
| loperands[2] = operand2; |
| if (GET_CODE (operand2) == REG) |
| loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1); |
| else |
| loperands[3] = adjust_address (operand2, SImode, 4); |
| } |
| loperands[4] = gen_label_rtx (); |
| if (operand2 != const0_rtx) |
| output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands); |
| else |
| { |
| if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0])) |
| output_asm_insn ("tst%.l %0", loperands); |
| else |
| output_asm_insn ("cmp%.w #0,%0", loperands); |
| |
| output_asm_insn ("jne %l4", loperands); |
| |
| if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1])) |
| output_asm_insn ("tst%.l %1", loperands); |
| else |
| output_asm_insn ("cmp%.w #0,%1", loperands); |
| } |
| |
| loperands[5] = dest; |
| |
| switch (op_code) |
| { |
| case EQ: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("seq %5", loperands); |
| break; |
| |
| case NE: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("sne %5", loperands); |
| break; |
| |
| case GT: |
| loperands[6] = gen_label_rtx (); |
| output_asm_insn ("shi %5\n\tjra %l6", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("sgt %5", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[6])); |
| break; |
| |
| case GTU: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("shi %5", loperands); |
| break; |
| |
| case LT: |
| loperands[6] = gen_label_rtx (); |
| output_asm_insn ("scs %5\n\tjra %l6", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("slt %5", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[6])); |
| break; |
| |
| case LTU: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("scs %5", loperands); |
| break; |
| |
| case GE: |
| loperands[6] = gen_label_rtx (); |
| output_asm_insn ("scc %5\n\tjra %l6", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("sge %5", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[6])); |
| break; |
| |
| case GEU: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("scc %5", loperands); |
| break; |
| |
| case LE: |
| loperands[6] = gen_label_rtx (); |
| output_asm_insn ("sls %5\n\tjra %l6", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("sle %5", loperands); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[6])); |
| break; |
| |
| case LEU: |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (loperands[4])); |
| output_asm_insn ("sls %5", loperands); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| return ""; |
| } |
| |
| rtx_code |
| m68k_output_btst (rtx countop, rtx dataop, rtx_code code, int signpos) |
| { |
| rtx ops[2]; |
| ops[0] = countop; |
| ops[1] = dataop; |
| |
| if (GET_CODE (countop) == CONST_INT) |
| { |
| int count = INTVAL (countop); |
| /* If COUNT is bigger than size of storage unit in use, |
| advance to the containing unit of same size. */ |
| if (count > signpos) |
| { |
| int offset = (count & ~signpos) / 8; |
| count = count & signpos; |
| ops[1] = dataop = adjust_address (dataop, QImode, offset); |
| } |
| |
| if (code == EQ || code == NE) |
| { |
| if (count == 31) |
| { |
| output_asm_insn ("tst%.l %1", ops); |
| return code == EQ ? PLUS : MINUS; |
| } |
| if (count == 15) |
| { |
| output_asm_insn ("tst%.w %1", ops); |
| return code == EQ ? PLUS : MINUS; |
| } |
| if (count == 7) |
| { |
| output_asm_insn ("tst%.b %1", ops); |
| return code == EQ ? PLUS : MINUS; |
| } |
| } |
| /* Try to use `movew to ccr' followed by the appropriate branch insn. |
| On some m68k variants unfortunately that's slower than btst. |
| On 68000 and higher, that should also work for all HImode operands. */ |
| if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size) |
| { |
| if (count == 3 && DATA_REG_P (ops[1]) && (code == EQ || code == NE)) |
| { |
| output_asm_insn ("move%.w %1,%%ccr", ops); |
| return code == EQ ? PLUS : MINUS; |
| } |
| if (count == 2 && DATA_REG_P (ops[1]) && (code == EQ || code == NE)) |
| { |
| output_asm_insn ("move%.w %1,%%ccr", ops); |
| return code == EQ ? NE : EQ; |
| } |
| /* count == 1 followed by bvc/bvs and |
| count == 0 followed by bcc/bcs are also possible, but need |
| m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */ |
| } |
| } |
| output_asm_insn ("btst %0,%1", ops); |
| return code; |
| } |
| |
| /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2 |
| operands. CODE is the code of the comparison, and we return the code to |
| be actually used in the jump. */ |
| |
| rtx_code |
| m68k_output_bftst (rtx zxop0, rtx zxop1, rtx zxop2, rtx_code code) |
| { |
| if (zxop1 == const1_rtx && GET_CODE (zxop2) == CONST_INT) |
| { |
| int width = GET_CODE (zxop0) == REG ? 31 : 7; |
| /* Pass 1000 as SIGNPOS argument so that btst will |
| not think we are testing the sign bit for an `and' |
| and assume that nonzero implies a negative result. */ |
| return m68k_output_btst (GEN_INT (width - INTVAL (zxop2)), zxop0, code, 1000); |
| } |
| rtx ops[3] = { zxop0, zxop1, zxop2 }; |
| output_asm_insn ("bftst %0{%b2:%b1}", ops); |
| return code; |
| } |
| |
| /* Return true if X is a legitimate base register. STRICT_P says |
| whether we need strict checking. */ |
| |
| bool |
| m68k_legitimate_base_reg_p (rtx x, bool strict_p) |
| { |
| /* Allow SUBREG everywhere we allow REG. This results in better code. */ |
| if (!strict_p && GET_CODE (x) == SUBREG) |
| x = SUBREG_REG (x); |
| |
| return (REG_P (x) |
| && (strict_p |
| ? REGNO_OK_FOR_BASE_P (REGNO (x)) |
| : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x)))); |
| } |
| |
| /* Return true if X is a legitimate index register. STRICT_P says |
| whether we need strict checking. */ |
| |
| bool |
| m68k_legitimate_index_reg_p (rtx x, bool strict_p) |
| { |
| if (!strict_p && GET_CODE (x) == SUBREG) |
| x = SUBREG_REG (x); |
| |
| return (REG_P (x) |
| && (strict_p |
| ? REGNO_OK_FOR_INDEX_P (REGNO (x)) |
| : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))); |
| } |
| |
| /* Return true if X is a legitimate index expression for a (d8,An,Xn) or |
| (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of |
| ADDRESS if so. STRICT_P says whether we need strict checking. */ |
| |
| static bool |
| m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address) |
| { |
| int scale; |
| |
| /* Check for a scale factor. */ |
| scale = 1; |
| if ((TARGET_68020 || TARGET_COLDFIRE) |
| && GET_CODE (x) == MULT |
| && GET_CODE (XEXP (x, 1)) == CONST_INT |
| && (INTVAL (XEXP (x, 1)) == 2 |
| || INTVAL (XEXP (x, 1)) == 4 |
| || (INTVAL (XEXP (x, 1)) == 8 |
| && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE)))) |
| { |
| scale = INTVAL (XEXP (x, 1)); |
| x = XEXP (x, 0); |
| } |
| |
| /* Check for a word extension. */ |
| if (!TARGET_COLDFIRE |
| && GET_CODE (x) == SIGN_EXTEND |
| && GET_MODE (XEXP (x, 0)) == HImode) |
| x = XEXP (x, 0); |
| |
| if (m68k_legitimate_index_reg_p (x, strict_p)) |
| { |
| address->scale = scale; |
| address->index = x; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Return true if X is an illegitimate symbolic constant. */ |
| |
| bool |
| m68k_illegitimate_symbolic_constant_p (rtx x) |
| { |
| rtx base, offset; |
| |
| if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) |
| { |
| split_const (x, &base, &offset); |
| if (GET_CODE (base) == SYMBOL_REF |
| && !offset_within_block_p (base, INTVAL (offset))) |
| return true; |
| } |
| return m68k_tls_reference_p (x, false); |
| } |
| |
| /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ |
| |
| static bool |
| m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) |
| { |
| return m68k_illegitimate_symbolic_constant_p (x); |
| } |
| |
| /* Return true if X is a legitimate constant address that can reach |
| bytes in the range [X, X + REACH). STRICT_P says whether we need |
| strict checking. */ |
| |
| static bool |
| m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p) |
| { |
| rtx base, offset; |
| |
| if (!CONSTANT_ADDRESS_P (x)) |
| return false; |
| |
| if (flag_pic |
| && !(strict_p && TARGET_PCREL) |
| && symbolic_operand (x, VOIDmode)) |
| return false; |
| |
| if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1) |
| { |
| split_const (x, &base, &offset); |
| if (GET_CODE (base) == SYMBOL_REF |
| && !offset_within_block_p (base, INTVAL (offset) + reach - 1)) |
| return false; |
| } |
| |
| return !m68k_tls_reference_p (x, false); |
| } |
| |
| /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced |
| labels will become jump tables. */ |
| |
| static bool |
| m68k_jump_table_ref_p (rtx x) |
| { |
| if (GET_CODE (x) != LABEL_REF) |
| return false; |
| |
| rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0)); |
| if (!NEXT_INSN (insn) && !PREV_INSN (insn)) |
| return true; |
| |
| insn = next_nonnote_insn (insn); |
| return insn && JUMP_TABLE_DATA_P (insn); |
| } |
| |
| /* Return true if X is a legitimate address for values of mode MODE. |
| STRICT_P says whether strict checking is needed. If the address |
| is valid, describe its components in *ADDRESS. */ |
| |
| static bool |
| m68k_decompose_address (machine_mode mode, rtx x, |
| bool strict_p, struct m68k_address *address) |
| { |
| unsigned int reach; |
| |
| memset (address, 0, sizeof (*address)); |
| |
| if (mode == BLKmode) |
| reach = 1; |
| else |
| reach = GET_MODE_SIZE (mode); |
| |
| /* Check for (An) (mode 2). */ |
| if (m68k_legitimate_base_reg_p (x, strict_p)) |
| { |
| address->base = x; |
| return true; |
| } |
| |
| /* Check for -(An) and (An)+ (modes 3 and 4). */ |
| if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) |
| && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) |
| { |
| address->code = GET_CODE (x); |
| address->base = XEXP (x, 0); |
| return true; |
| } |
| |
| /* Check for (d16,An) (mode 5). */ |
| if (GET_CODE (x) == PLUS |
| && GET_CODE (XEXP (x, 1)) == CONST_INT |
| && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach) |
| && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) |
| { |
| address->base = XEXP (x, 0); |
| address->offset = XEXP (x, 1); |
| return true; |
| } |
| |
| /* Check for GOT loads. These are (bd,An,Xn) addresses if |
| TARGET_68020 && flag_pic == 2, otherwise they are (d16,An) |
| addresses. */ |
| if (GET_CODE (x) == PLUS |
| && XEXP (x, 0) == pic_offset_table_rtx) |
| { |
| /* As we are processing a PLUS, do not unwrap RELOC32 symbols -- |
| they are invalid in this context. */ |
| if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1)) |
| { |
| address->base = XEXP (x, 0); |
| address->offset = XEXP (x, 1); |
| return true; |
| } |
| } |
| |
| /* The ColdFire FPU only accepts addressing modes 2-5. */ |
| if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| return false; |
| |
| /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case, |
| check for (d16,PC) or (bd,PC,Xn) with a suppressed index register. |
| All these modes are variations of mode 7. */ |
| if (m68k_legitimate_constant_address_p (x, reach, strict_p)) |
| { |
| address->offset = x; |
| return true; |
| } |
| |
| /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for |
| tablejumps. |
| |
| ??? do_tablejump creates these addresses before placing the target |
| label, so we have to assume that unplaced labels are jump table |
| references. It seems unlikely that we would ever generate indexed |
| accesses to unplaced labels in other cases. */ |
| if (GET_CODE (x) == PLUS |
| && m68k_jump_table_ref_p (XEXP (x, 1)) |
| && m68k_decompose_index (XEXP (x, 0), strict_p, address)) |
| { |
| address->offset = XEXP (x, 1); |
| return true; |
| } |
| |
| /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or |
| (bd,An,Xn.SIZE*SCALE) addresses. */ |
| |
| if (TARGET_68020) |
| { |
| /* Check for a nonzero base displacement. */ |
| if (GET_CODE (x) == PLUS |
| && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p)) |
| { |
| address->offset = XEXP (x, 1); |
| x = XEXP (x, 0); |
| } |
| |
| /* Check for a suppressed index register. */ |
| if (m68k_legitimate_base_reg_p (x, strict_p)) |
| { |
| address->base = x; |
| return true; |
| } |
| |
| /* Check for a suppressed base register. Do not allow this case |
| for non-symbolic offsets as it effectively gives gcc freedom |
| to treat data registers as base registers, which can generate |
| worse code. */ |
| if (address->offset |
| && symbolic_operand (address->offset, VOIDmode) |
| && m68k_decompose_index (x, strict_p, address)) |
| return true; |
| } |
| else |
| { |
| /* Check for a nonzero base displacement. */ |
| if (GET_CODE (x) == PLUS |
| && GET_CODE (XEXP (x, 1)) == CONST_INT |
| && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach)) |
| { |
| address->offset = XEXP (x, 1); |
| x = XEXP (x, 0); |
| } |
| } |
| |
| /* We now expect the sum of a base and an index. */ |
| if (GET_CODE (x) == PLUS) |
| { |
| if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p) |
| && m68k_decompose_index (XEXP (x, 1), strict_p, address)) |
| { |
| address->base = XEXP (x, 0); |
| return true; |
| } |
| |
| if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p) |
| && m68k_decompose_index (XEXP (x, 0), strict_p, address)) |
| { |
| address->base = XEXP (x, 1); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* Return true if X is a legitimate address for values of mode MODE. |
| STRICT_P says whether strict checking is needed. */ |
| |
| bool |
| m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) |
| { |
| struct m68k_address address; |
| |
| return m68k_decompose_address (mode, x, strict_p, &address); |
| } |
| |
| /* Return true if X is a memory, describing its address in ADDRESS if so. |
| Apply strict checking if called during or after reload. */ |
| |
| static bool |
| m68k_legitimate_mem_p (rtx x, struct m68k_address *address) |
| { |
| return (MEM_P (x) |
| && m68k_decompose_address (GET_MODE (x), XEXP (x, 0), |
| reload_in_progress || reload_completed, |
| address)); |
| } |
| |
| /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ |
| |
| bool |
| m68k_legitimate_constant_p (machine_mode mode, rtx x) |
| { |
| return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x); |
| } |
| |
| /* Return true if X matches the 'Q' constraint. It must be a memory |
| with a base address and no constant offset or index. */ |
| |
| bool |
| m68k_matches_q_p (rtx x) |
| { |
| struct m68k_address address; |
| |
| return (m68k_legitimate_mem_p (x, &address) |
| && address.code == UNKNOWN |
| && address.base |
| && !address.offset |
| && !address.index); |
| } |
| |
| /* Return true if X matches the 'U' constraint. It must be a base address |
| with a constant offset and no index. */ |
| |
| bool |
| m68k_matches_u_p (rtx x) |
| { |
| struct m68k_address address; |
| |
| return (m68k_legitimate_mem_p (x, &address) |
| && address.code == UNKNOWN |
| && address.base |
| && address.offset |
| && !address.index); |
| } |
| |
| /* Return GOT pointer. */ |
| |
| static rtx |
| m68k_get_gp (void) |
| { |
| if (pic_offset_table_rtx == NULL_RTX) |
| pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG); |
| |
| crtl->uses_pic_offset_table = 1; |
| |
| return pic_offset_table_rtx; |
| } |
| |
| /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC |
| wrappers. */ |
| enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO, |
| RELOC_TLSIE, RELOC_TLSLE }; |
| |
| #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT) |
| |
| /* Wrap symbol X into unspec representing relocation RELOC. |
| BASE_REG - register that should be added to the result. |
| TEMP_REG - if non-null, temporary register. */ |
| |
| static rtx |
| m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg) |
| { |
| bool use_x_p; |
| |
| use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS; |
| |
| if (TARGET_COLDFIRE && use_x_p) |
| /* When compiling with -mx{got, tls} switch the code will look like this: |
| |
| move.l <X>@<RELOC>,<TEMP_REG> |
| add.l <BASE_REG>,<TEMP_REG> */ |
| { |
| /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra |
| to put @RELOC after reference. */ |
| x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), |
| UNSPEC_RELOC32); |
| x = gen_rtx_CONST (Pmode, x); |
| |
| if (temp_reg == NULL) |
| { |
| gcc_assert (can_create_pseudo_p ()); |
| temp_reg = gen_reg_rtx (Pmode); |
| } |
| |
| emit_move_insn (temp_reg, x); |
| emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg)); |
| x = temp_reg; |
| } |
| else |
| { |
| x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), |
| UNSPEC_RELOC16); |
| x = gen_rtx_CONST (Pmode, x); |
| |
| x = gen_rtx_PLUS (Pmode, base_reg, x); |
| } |
| |
| return x; |
| } |
| |
| /* Helper for m68k_unwrap_symbol. |
| Also, if unwrapping was successful (that is if (ORIG != <return value>)), |
| sets *RELOC_PTR to relocation type for the symbol. */ |
| |
| static rtx |
| m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p, |
| enum m68k_reloc *reloc_ptr) |
| { |
| if (GET_CODE (orig) == CONST) |
| { |
| rtx x; |
| enum m68k_reloc dummy; |
| |
| x = XEXP (orig, 0); |
| |
| if (reloc_ptr == NULL) |
| reloc_ptr = &dummy; |
| |
| /* Handle an addend. */ |
| if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) |
| && CONST_INT_P (XEXP (x, 1))) |
| x = XEXP (x, 0); |
| |
| if (GET_CODE (x) == UNSPEC) |
| { |
| switch (XINT (x, 1)) |
| { |
| case UNSPEC_RELOC16: |
| orig = XVECEXP (x, 0, 0); |
| *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); |
| break; |
| |
| case UNSPEC_RELOC32: |
| if (unwrap_reloc32_p) |
| { |
| orig = XVECEXP (x, 0, 0); |
| *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); |
| } |
| break; |
| |
| default: |
| break; |
| } |
| } |
| } |
| |
| return orig; |
| } |
| |
| /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p, |
| UNSPEC_RELOC32 wrappers. */ |
| |
| rtx |
| m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p) |
| { |
| return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL); |
| } |
| |
| /* Adjust decorated address operand before outputing assembler for it. */ |
| |
| static void |
| m68k_adjust_decorated_operand (rtx op) |
| { |
| /* Combine and, possibly, other optimizations may do good job |
| converting |
| (const (unspec [(symbol)])) |
| into |
| (const (plus (unspec [(symbol)]) |
| (const_int N))). |
| The problem with this is emitting @TLS or @GOT decorations. |
| The decoration is emitted when processing (unspec), so the |
| result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE". |
| |
| It seems that the easiest solution to this is to convert such |
| operands to |
| (const (unspec [(plus (symbol) |
| (const_int N))])). |
| Note, that the top level of operand remains intact, so we don't have |
| to patch up anything outside of the operand. */ |
| |
| subrtx_var_iterator::array_type array; |
| FOR_EACH_SUBRTX_VAR (iter, array, op, ALL) |
| { |
| rtx x = *iter; |
| if (m68k_unwrap_symbol (x, true) != x) |
| { |
| rtx plus; |
| |
| gcc_assert (GET_CODE (x) == CONST); |
| plus = XEXP (x, 0); |
| |
| if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS) |
| { |
| rtx unspec; |
| rtx addend; |
| |
| unspec = XEXP (plus, 0); |
| gcc_assert (GET_CODE (unspec) == UNSPEC); |
| addend = XEXP (plus, 1); |
| gcc_assert (CONST_INT_P (addend)); |
| |
| /* We now have all the pieces, rearrange them. */ |
| |
| /* Move symbol to plus. */ |
| XEXP (plus, 0) = XVECEXP (unspec, 0, 0); |
| |
| /* Move plus inside unspec. */ |
| XVECEXP (unspec, 0, 0) = plus; |
| |
| /* Move unspec to top level of const. */ |
| XEXP (x, 0) = unspec; |
| } |
| iter.skip_subrtxes (); |
| } |
| } |
| } |
| |
| /* Move X to a register and add REG_EQUAL note pointing to ORIG. |
| If REG is non-null, use it; generate new pseudo otherwise. */ |
| |
| static rtx |
| m68k_move_to_reg (rtx x, rtx orig, rtx reg) |
| { |
| rtx_insn *insn; |
| |
| if (reg == NULL_RTX) |
| { |
| gcc_assert (can_create_pseudo_p ()); |
| reg = gen_reg_rtx (Pmode); |
| } |
| |
| insn = emit_move_insn (reg, x); |
| /* Put a REG_EQUAL note on this insn, so that it can be optimized |
| by loop. */ |
| set_unique_reg_note (insn, REG_EQUAL, orig); |
| |
| return reg; |
| } |
| |
| /* Does the same as m68k_wrap_symbol, but returns a memory reference to |
| GOT slot. */ |
| |
| static rtx |
| m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg) |
| { |
| x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg); |
| |
| x = gen_rtx_MEM (Pmode, x); |
| MEM_READONLY_P (x) = 1; |
| |
| return x; |
| } |
| |
| /* Legitimize PIC addresses. If the address is already |
| position-independent, we return ORIG. Newly generated |
| position-independent addresses go to REG. If we need more |
| than one register, we lose. |
| |
| An address is legitimized by making an indirect reference |
| through the Global Offset Table with the name of the symbol |
| used as an offset. |
| |
| The assembler and linker are responsible for placing the |
| address of the symbol in the GOT. The function prologue |
| is responsible for initializing a5 to the starting address |
| of the GOT. |
| |
| The assembler is also responsible for translating a symbol name |
| into a constant displacement from the start of the GOT. |
| |
| A quick example may make things a little clearer: |
| |
| When not generating PIC code to store the value 12345 into _foo |
| we would generate the following code: |
| |
| movel #12345, _foo |
| |
| When generating PIC two transformations are made. First, the compiler |
| loads the address of foo into a register. So the first transformation makes: |
| |
| lea _foo, a0 |
| movel #12345, a0@ |
| |
| The code in movsi will intercept the lea instruction and call this |
| routine which will transform the instructions into: |
| |
| movel a5@(_foo:w), a0 |
| movel #12345, a0@ |
| |
| |
| That (in a nutshell) is how *all* symbol and label references are |
| handled. */ |
| |
| rtx |
| legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED, |
| rtx reg) |
| { |
| rtx pic_ref = orig; |
| |
| /* First handle a simple SYMBOL_REF or LABEL_REF */ |
| if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) |
| { |
| gcc_assert (reg); |
| |
| pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg); |
| pic_ref = m68k_move_to_reg (pic_ref, orig, reg); |
| } |
| else if (GET_CODE (orig) == CONST) |
| { |
| rtx base; |
| |
| /* Make sure this has not already been legitimized. */ |
| if (m68k_unwrap_symbol (orig, true) != orig) |
| return orig; |
| |
| gcc_assert (reg); |
| |
| /* legitimize both operands of the PLUS */ |
| gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS); |
| |
| base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); |
| orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, |
| base == reg ? 0 : reg); |
| |
| if (GET_CODE (orig) == CONST_INT) |
| pic_ref = plus_constant (Pmode, base, INTVAL (orig)); |
| else |
| pic_ref = gen_rtx_PLUS (Pmode, base, orig); |
| } |
| |
| return pic_ref; |
| } |
| |
| /* The __tls_get_addr symbol. */ |
| static GTY(()) rtx m68k_tls_get_addr; |
| |
| /* Return SYMBOL_REF for __tls_get_addr. */ |
| |
| static rtx |
| m68k_get_tls_get_addr (void) |
| { |
| if (m68k_tls_get_addr == NULL_RTX) |
| m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr"); |
| |
| return m68k_tls_get_addr; |
| } |
| |
| /* Return libcall result in A0 instead of usual D0. */ |
| static bool m68k_libcall_value_in_a0_p = false; |
| |
| /* Emit instruction sequence that calls __tls_get_addr. X is |
| the TLS symbol we are referencing and RELOC is the symbol type to use |
| (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence |
| emitted. A pseudo register with result of __tls_get_addr call is |
| returned. */ |
| |
| static rtx |
| m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc) |
| { |
| rtx a0; |
| rtx_insn *insns; |
| rtx dest; |
| |
| /* Emit the call sequence. */ |
| start_sequence (); |
| |
| /* FIXME: Unfortunately, emit_library_call_value does not |
| consider (plus (%a5) (const (unspec))) to be a good enough |
| operand for push, so it forces it into a register. The bad |
| thing about this is that combiner, due to copy propagation and other |
| optimizations, sometimes cannot later fix this. As a consequence, |
| additional register may be allocated resulting in a spill. |
| For reference, see args processing loops in |
| calls.cc:emit_library_call_value_1. |
| For testcase, see gcc.target/m68k/tls-{gd, ld}.c */ |
| x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX); |
| |
| /* __tls_get_addr() is not a libcall, but emitting a libcall_value |
| is the simpliest way of generating a call. The difference between |
| __tls_get_addr() and libcall is that the result is returned in D0 |
| instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p |
| which temporarily switches returning the result to A0. */ |
| |
| m68k_libcall_value_in_a0_p = true; |
| a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE, |
| Pmode, x, Pmode); |
| m68k_libcall_value_in_a0_p = false; |
| |
| insns = get_insns (); |
| end_sequence (); |
| |
| gcc_assert (can_create_pseudo_p ()); |
| dest = gen_reg_rtx (Pmode); |
| emit_libcall_block (insns, dest, a0, eqv); |
| |
| return dest; |
| } |
| |
| /* The __tls_get_addr symbol. */ |
| static GTY(()) rtx m68k_read_tp; |
| |
| /* Return SYMBOL_REF for __m68k_read_tp. */ |
| |
| static rtx |
| m68k_get_m68k_read_tp (void) |
| { |
| if (m68k_read_tp == NULL_RTX) |
| m68k_read_tp = init_one_libfunc ("__m68k_read_tp"); |
| |
| return m68k_read_tp; |
| } |
| |
| /* Emit instruction sequence that calls __m68k_read_tp. |
| A pseudo register with result of __m68k_read_tp call is returned. */ |
| |
| static rtx |
| m68k_call_m68k_read_tp (void) |
| { |
| rtx a0; |
| rtx eqv; |
| rtx_insn *insns; |
| rtx dest; |
| |
| start_sequence (); |
| |
| /* __m68k_read_tp() is not a libcall, but emitting a libcall_value |
| is the simpliest way of generating a call. The difference between |
| __m68k_read_tp() and libcall is that the result is returned in D0 |
| instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p |
| which temporarily switches returning the result to A0. */ |
| |
| /* Emit the call sequence. */ |
| m68k_libcall_value_in_a0_p = true; |
| a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE, |
| Pmode); |
| m68k_libcall_value_in_a0_p = false; |
| insns = get_insns (); |
| end_sequence (); |
| |
| /* Attach a unique REG_EQUIV, to allow the RTL optimizers to |
| share the m68k_read_tp result with other IE/LE model accesses. */ |
| eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32); |
| |
| gcc_assert (can_create_pseudo_p ()); |
| dest = gen_reg_rtx (Pmode); |
| emit_libcall_block (insns, dest, a0, eqv); |
| |
| return dest; |
| } |
| |
| /* Return a legitimized address for accessing TLS SYMBOL_REF X. |
| For explanations on instructions sequences see TLS/NPTL ABI for m68k and |
| ColdFire. */ |
| |
| rtx |
| m68k_legitimize_tls_address (rtx orig) |
| { |
| switch (SYMBOL_REF_TLS_MODEL (orig)) |
| { |
| case TLS_MODEL_GLOBAL_DYNAMIC: |
| orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD); |
| break; |
| |
| case TLS_MODEL_LOCAL_DYNAMIC: |
| { |
| rtx eqv; |
| rtx a0; |
| rtx x; |
| |
| /* Attach a unique REG_EQUIV, to allow the RTL optimizers to |
| share the LDM result with other LD model accesses. */ |
| eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), |
| UNSPEC_RELOC32); |
| |
| a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM); |
| |
| x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX); |
| |
| if (can_create_pseudo_p ()) |
| x = m68k_move_to_reg (x, orig, NULL_RTX); |
| |
| orig = x; |
| break; |
| } |
| |
| case TLS_MODEL_INITIAL_EXEC: |
| { |
| rtx a0; |
| rtx x; |
| |
| a0 = m68k_call_m68k_read_tp (); |
| |
| x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX); |
| x = gen_rtx_PLUS (Pmode, x, a0); |
| |
| if (can_create_pseudo_p ()) |
| x = m68k_move_to_reg (x, orig, NULL_RTX); |
| |
| orig = x; |
| break; |
| } |
| |
| case TLS_MODEL_LOCAL_EXEC: |
| { |
| rtx a0; |
| rtx x; |
| |
| a0 = m68k_call_m68k_read_tp (); |
| |
| x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX); |
| |
| if (can_create_pseudo_p ()) |
| x = m68k_move_to_reg (x, orig, NULL_RTX); |
| |
| orig = x; |
| break; |
| } |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| return orig; |
| } |
| |
| /* Return true if X is a TLS symbol. */ |
| |
| static bool |
| m68k_tls_symbol_p (rtx x) |
| { |
| if (!TARGET_HAVE_TLS) |
| return false; |
| |
| if (GET_CODE (x) != SYMBOL_REF) |
| return false; |
| |
| return SYMBOL_REF_TLS_MODEL (x) != 0; |
| } |
| |
| /* If !LEGITIMATE_P, return true if X is a TLS symbol reference, |
| though illegitimate one. |
| If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */ |
| |
| bool |
| m68k_tls_reference_p (rtx x, bool legitimate_p) |
| { |
| if (!TARGET_HAVE_TLS) |
| return false; |
| |
| if (!legitimate_p) |
| { |
| subrtx_var_iterator::array_type array; |
| FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) |
| { |
| rtx x = *iter; |
| |
| /* Note: this is not the same as m68k_tls_symbol_p. */ |
| if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0) |
| return true; |
| |
| /* Don't recurse into legitimate TLS references. */ |
| if (m68k_tls_reference_p (x, true)) |
| iter.skip_subrtxes (); |
| } |
| return false; |
| } |
| else |
| { |
| enum m68k_reloc reloc = RELOC_GOT; |
| |
| return (m68k_unwrap_symbol_1 (x, true, &reloc) != x |
| && TLS_RELOC_P (reloc)); |
| } |
| } |
| |
| |
| |
| #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255) |
| |
| /* Return the type of move that should be used for integer I. */ |
| |
| M68K_CONST_METHOD |
| m68k_const_method (HOST_WIDE_INT i) |
| { |
| unsigned u; |
| |
| if (USE_MOVQ (i)) |
| return MOVQ; |
| |
| /* The ColdFire doesn't have byte or word operations. */ |
| /* FIXME: This may not be useful for the m68060 either. */ |
| if (!TARGET_COLDFIRE) |
| { |
| /* if -256 < N < 256 but N is not in range for a moveq |
| N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */ |
| if (USE_MOVQ (i ^ 0xff)) |
| return NOTB; |
| /* Likewise, try with not.w */ |
| if (USE_MOVQ (i ^ 0xffff)) |
| return NOTW; |
| /* This is the only value where neg.w is useful */ |
| if (i == -65408) |
| return NEGW; |
| } |
| |
| /* Try also with swap. */ |
| u = i; |
| if (USE_MOVQ ((u >> 16) | (u << 16))) |
| return SWAP; |
| |
| if (TARGET_ISAB) |
| { |
| /* Try using MVZ/MVS with an immediate value to load constants. */ |
| if (i >= 0 && i <= 65535) |
| return MVZ; |
| if (i >= -32768 && i <= 32767) |
| return MVS; |
| } |
| |
| /* Otherwise, use move.l */ |
| return MOVL; |
| } |
| |
| /* Return the cost of moving constant I into a data register. */ |
| |
| static int |
| const_int_cost (HOST_WIDE_INT i) |
| { |
| switch (m68k_const_method (i)) |
| { |
| case MOVQ: |
| /* Constants between -128 and 127 are cheap due to moveq. */ |
| return 0; |
| case MVZ: |
| case MVS: |
| case NOTB: |
| case NOTW: |
| case NEGW: |
| case SWAP: |
| /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */ |
| return 1; |
| case MOVL: |
| return 2; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| static bool |
| m68k_rtx_costs (rtx x, machine_mode mode, int outer_code, |
| int opno ATTRIBUTE_UNUSED, |
| int *total, bool speed ATTRIBUTE_UNUSED) |
| { |
| int code = GET_CODE (x); |
| |
| switch (code) |
| { |
| case CONST_INT: |
| /* Constant zero is super cheap due to clr instruction. */ |
| if (x == const0_rtx) |
| *total = 0; |
| else |
| *total = const_int_cost (INTVAL (x)); |
| return true; |
| |
| case CONST: |
| case LABEL_REF: |
| case SYMBOL_REF: |
| *total = 3; |
| return true; |
| |
| case CONST_DOUBLE: |
| /* Make 0.0 cheaper than other floating constants to |
| encourage creating tstsf and tstdf insns. */ |
| if ((GET_RTX_CLASS (outer_code) == RTX_COMPARE |
| || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE) |
| && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode))) |
| *total = 4; |
| else |
| *total = 5; |
| return true; |
| |
| /* These are vaguely right for a 68020. */ |
| /* The costs for long multiply have been adjusted to work properly |
| in synth_mult on the 68020, relative to an average of the time |
| for add and the time for shift, taking away a little more because |
| sometimes move insns are needed. */ |
| /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS |
| terms. */ |
| #define MULL_COST \ |
| (TUNE_68060 ? 2 \ |
| : TUNE_68040 ? 5 \ |
| : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ |
| : (TUNE_CFV2 && TUNE_MAC) ? 4 \ |
| : TUNE_CFV2 ? 8 \ |
| : TARGET_COLDFIRE ? 3 : 13) |
| |
| #define MULW_COST \ |
| (TUNE_68060 ? 2 \ |
| : TUNE_68040 ? 3 \ |
| : TUNE_68000_10 ? 5 \ |
| : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ |
| : (TUNE_CFV2 && TUNE_MAC) ? 2 \ |
| : TUNE_CFV2 ? 8 \ |
| : TARGET_COLDFIRE ? 2 : 8) |
| |
| #define DIVW_COST \ |
| (TARGET_CF_HWDIV ? 11 \ |
| : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27) |
| |
| case PLUS: |
| /* An lea costs about three times as much as a simple add. */ |
| if (mode == SImode |
| && GET_CODE (XEXP (x, 1)) == REG |
| && GET_CODE (XEXP (x, 0)) == MULT |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG |
| && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT |
| && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2 |
| || INTVAL (XEXP (XEXP (x, 0), 1)) == 4 |
| || INTVAL (XEXP (XEXP (x, 0), 1)) == 8)) |
| { |
| /* lea an@(dx:l:i),am */ |
| *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3); |
| return true; |
| } |
| return false; |
| |
| case ASHIFT: |
| case ASHIFTRT: |
| case LSHIFTRT: |
| if (TUNE_68060) |
| { |
| *total = COSTS_N_INSNS(1); |
| return true; |
| } |
| if (TUNE_68000_10) |
| { |
| if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| { |
| if (INTVAL (XEXP (x, 1)) < 16) |
| *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2; |
| else |
| /* We're using clrw + swap for these cases. */ |
| *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2; |
| } |
| else |
| *total = COSTS_N_INSNS (10); /* Worst case. */ |
| return true; |
| } |
| /* A shift by a big integer takes an extra instruction. */ |
| if (GET_CODE (XEXP (x, 1)) == CONST_INT |
| && (INTVAL (XEXP (x, 1)) == 16)) |
| { |
| *total = COSTS_N_INSNS (2); /* clrw;swap */ |
| return true; |
| } |
| if (GET_CODE (XEXP (x, 1)) == CONST_INT |
| && !(INTVAL (XEXP (x, 1)) > 0 |
| && INTVAL (XEXP (x, 1)) <= 8)) |
| { |
| *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */ |
| return true; |
| } |
| return false; |
| |
| case MULT: |
| if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND |
| || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) |
| && mode == SImode) |
| *total = COSTS_N_INSNS (MULW_COST); |
| else if (mode == QImode || mode == HImode) |
| *total = COSTS_N_INSNS (MULW_COST); |
| else |
| *total = COSTS_N_INSNS (MULL_COST); |
| return true; |
| |
| case DIV: |
| case UDIV: |
| case MOD: |
| case UMOD: |
| if (mode == QImode || mode == HImode) |
| *total = COSTS_N_INSNS (DIVW_COST); /* div.w */ |
| else if (TARGET_CF_HWDIV) |
| *total = COSTS_N_INSNS (18); |
| else |
| *total = COSTS_N_INSNS (43); /* div.l */ |
| return true; |
| |
| case ZERO_EXTRACT: |
| if (GET_RTX_CLASS (outer_code) == RTX_COMPARE |
| || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE) |
| *total = 0; |
| return false; |
| |
| default: |
| return false; |
| } |
| } |
| |
| /* Return an instruction to move CONST_INT OPERANDS[1] into data register |
| OPERANDS[0]. */ |
| |
| static const char * |
| output_move_const_into_data_reg (rtx *operands) |
| { |
| HOST_WIDE_INT i; |
| |
| i = INTVAL (operands[1]); |
| switch (m68k_const_method (i)) |
| { |
| case MVZ: |
| return "mvzw %1,%0"; |
| case MVS: |
| return "mvsw %1,%0"; |
| case MOVQ: |
| return "moveq %1,%0"; |
| case NOTB: |
| CC_STATUS_INIT; |
| operands[1] = GEN_INT (i ^ 0xff); |
| return "moveq %1,%0\n\tnot%.b %0"; |
| case NOTW: |
| CC_STATUS_INIT; |
| operands[1] = GEN_INT (i ^ 0xffff); |
| return "moveq %1,%0\n\tnot%.w %0"; |
| case NEGW: |
| CC_STATUS_INIT; |
| return "moveq #-128,%0\n\tneg%.w %0"; |
| case SWAP: |
| { |
| unsigned u = i; |
| |
| operands[1] = GEN_INT ((u << 16) | (u >> 16)); |
| return "moveq %1,%0\n\tswap %0"; |
| } |
| case MOVL: |
| return "move%.l %1,%0"; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Return true if I can be handled by ISA B's mov3q instruction. */ |
| |
| bool |
| valid_mov3q_const (HOST_WIDE_INT i) |
| { |
| return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7)); |
| } |
| |
| /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0]. |
| I is the value of OPERANDS[1]. */ |
| |
| static const char * |
| output_move_simode_const (rtx *operands) |
| { |
| rtx dest; |
| HOST_WIDE_INT src; |
| |
| dest = operands[0]; |
| src = INTVAL (operands[1]); |
| if (src == 0 |
| && (DATA_REG_P (dest) || MEM_P (dest)) |
| /* clr insns on 68000 read before writing. */ |
| && ((TARGET_68010 || TARGET_COLDFIRE) |
| || !(MEM_P (dest) && MEM_VOLATILE_P (dest)))) |
| return "clr%.l %0"; |
| else if (GET_MODE (dest) == SImode && valid_mov3q_const (src)) |
| return "mov3q%.l %1,%0"; |
| else if (src == 0 && ADDRESS_REG_P (dest)) |
| return "sub%.l %0,%0"; |
| else if (DATA_REG_P (dest)) |
| return output_move_const_into_data_reg (operands); |
| else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff)) |
| { |
| if (valid_mov3q_const (src)) |
| return "mov3q%.l %1,%0"; |
| return "move%.w %1,%0"; |
| } |
| else if (MEM_P (dest) |
| && GET_CODE (XEXP (dest, 0)) == PRE_DEC |
| && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM |
| && IN_RANGE (src, -0x8000, 0x7fff)) |
| { |
| if (valid_mov3q_const (src)) |
| return "mov3q%.l %1,%-"; |
|