| /* Output routines for GCC for Renesas / SuperH SH. |
| Copyright (C) 1993-2022 Free Software Foundation, Inc. |
| Contributed by Steve Chamberlain (sac@cygnus.com). |
| Improved by Jim Wilson (wilson@cygnus.com). |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include <sstream> |
| |
| #define IN_TARGET_CODE 1 |
| |
| #include "config.h" |
| #define INCLUDE_VECTOR |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "df.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "optabs.h" |
| #include "emit-rtl.h" |
| #include "recog.h" |
| #include "diagnostic-core.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "calls.h" |
| #include "varasm.h" |
| #include "flags.h" |
| #include "explow.h" |
| #include "expr.h" |
| #include "reload.h" |
| #include "output.h" |
| #include "insn-attr.h" |
| #include "dwarf2.h" |
| #include "langhooks.h" |
| #include "cfgrtl.h" |
| #include "intl.h" |
| #include "sched-int.h" |
| #include "gimplify.h" |
| #include "tm-constrs.h" |
| #include "opts.h" |
| #include "tree-pass.h" |
| #include "context.h" |
| #include "builtins.h" |
| #include "rtl-iter.h" |
| #include "regs.h" |
| #include "toplev.h" |
| |
| /* This file should be included last. */ |
| #include "target-def.h" |
| |
| int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch; |
| |
| #define CONST_OK_FOR_ADD(size) CONST_OK_FOR_I08 (size) |
| #define GEN_MOV (*(gen_movsi)) |
| #define GEN_ADD3 (*(gen_addsi3)) |
| #define GEN_SUB3 (*(gen_subsi3)) |
| |
| /* Used to simplify the logic below. Find the attributes wherever |
| they may be. */ |
| #define SH_ATTRIBUTES(decl) \ |
| (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \ |
| : DECL_ATTRIBUTES (decl) \ |
| ? (DECL_ATTRIBUTES (decl)) \ |
| : TYPE_ATTRIBUTES (TREE_TYPE (decl)) |
| |
| /* Set to true by expand_prologue() when the function is an |
| interrupt handler. */ |
| bool current_function_interrupt; |
| |
| tree sh_deferred_function_attributes; |
| tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes; |
| |
| /* Global variables for machine-dependent things. */ |
| |
| /* Which cpu are we scheduling for. */ |
| enum processor_type sh_cpu; |
| |
| /* Definitions used in ready queue reordering for first scheduling pass. */ |
| |
| /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */ |
| static short *regmode_weight[2]; |
| |
| /* Total SFmode and SImode weights of scheduled insns. */ |
| static int curr_regmode_pressure[2]; |
| |
| /* Number of r0 life regions. */ |
| static int r0_life_regions; |
| |
| /* If true, skip cycles for Q -> R movement. */ |
| static int skip_cycles = 0; |
| |
| /* Cached value of can_issue_more. This is cached in sh_variable_issue hook |
| and returned from sh_reorder2. */ |
| static short cached_can_issue_more; |
| |
| /* Unique number for UNSPEC_BBR pattern. */ |
| static unsigned int unspec_bbr_uid = 1; |
| |
| /* Provides the class number of the smallest class containing |
| reg number. */ |
| enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] = |
| { |
| R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, |
| FP0_REGS,FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| FP_REGS, FP_REGS, FP_REGS, FP_REGS, |
| TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS, |
| TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS, |
| DF_REGS, DF_REGS, DF_REGS, DF_REGS, |
| DF_REGS, DF_REGS, DF_REGS, DF_REGS, |
| NO_REGS, GENERAL_REGS, PR_REGS, T_REGS, |
| MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS, |
| GENERAL_REGS, GENERAL_REGS, |
| }; |
| |
| char sh_register_names[FIRST_PSEUDO_REGISTER] \ |
| [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER; |
| |
| char sh_additional_register_names[ADDREGNAMES_SIZE] \ |
| [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1] |
| = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER; |
| |
| int assembler_dialect; |
| |
| static void split_branches (rtx_insn *); |
| static int branch_dest (rtx); |
| static void print_slot (rtx_sequence *); |
| static rtx_code_label *add_constant (rtx, machine_mode, rtx); |
| static void dump_table (rtx_insn *, rtx_insn *); |
| static bool broken_move (rtx_insn *); |
| static bool mova_p (rtx_insn *); |
| static rtx_insn *find_barrier (int, rtx_insn *, rtx_insn *); |
| static bool noncall_uses_reg (rtx, rtx_insn *, rtx *); |
| static rtx_insn *gen_block_redirect (rtx_insn *, int, int); |
| static void sh_reorg (void); |
| static void sh_option_override (void); |
| static void sh_override_options_after_change (void); |
| static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool); |
| static rtx_insn* emit_frame_insn (rtx); |
| static rtx push (int); |
| static void pop (int); |
| static void push_regs (HARD_REG_SET* mask, bool interrupt_handler); |
| static int calc_live_regs (HARD_REG_SET *); |
| static HOST_WIDE_INT rounded_frame_size (int); |
| static bool sh_frame_pointer_required (void); |
| static void sh_emit_mode_set (int, int, int, HARD_REG_SET); |
| static int sh_mode_needed (int, rtx_insn *); |
| static int sh_mode_after (int, int, rtx_insn *); |
| static int sh_mode_entry (int); |
| static int sh_mode_exit (int); |
| static int sh_mode_priority (int entity, int n); |
| |
| static rtx mark_constant_pool_use (rtx); |
| static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, |
| int, bool *); |
| static tree sh_handle_resbank_handler_attribute (tree *, tree, |
| tree, int, bool *); |
| static tree sh2a_handle_function_vector_handler_attribute (tree *, tree, |
| tree, int, bool *); |
| static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *); |
| static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *); |
| static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *); |
| static void sh_print_operand (FILE *, rtx, int); |
| static void sh_print_operand_address (FILE *, machine_mode, rtx); |
| static bool sh_print_operand_punct_valid_p (unsigned char code); |
| static bool sh_asm_output_addr_const_extra (FILE *file, rtx x); |
| static void sh_output_function_epilogue (FILE *); |
| static void sh_insert_attributes (tree, tree *); |
| static const char *sh_check_pch_target_flags (int); |
| static int sh_register_move_cost (machine_mode, reg_class_t, reg_class_t); |
| static int sh_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int); |
| static int sh_issue_rate (void); |
| static int sh_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *sort_p); |
| static short find_set_regmode_weight (rtx, machine_mode); |
| static short find_insn_regmode_weight (rtx, machine_mode); |
| static void find_regmode_weight (basic_block, machine_mode); |
| static int find_r0_life_regions (basic_block); |
| static void sh_md_init_global (FILE *, int, int); |
| static void sh_md_finish_global (FILE *, int); |
| static int rank_for_reorder (const void *, const void *); |
| static void swap_reorder (rtx_insn **, int); |
| static void ready_reorder (rtx_insn **, int); |
| static bool high_pressure (machine_mode); |
| static int sh_reorder (FILE *, int, rtx_insn **, int *, int); |
| static int sh_reorder2 (FILE *, int, rtx_insn **, int *, int); |
| static void sh_md_init (FILE *, int, int); |
| static int sh_variable_issue (FILE *, int, rtx_insn *, int); |
| |
| static bool sh_function_ok_for_sibcall (tree, tree); |
| |
| static bool sh_can_follow_jump (const rtx_insn *, const rtx_insn *); |
| static bool sh_ms_bitfield_layout_p (const_tree); |
| |
| static void sh_init_builtins (void); |
| static tree sh_builtin_decl (unsigned, bool); |
| static rtx sh_expand_builtin (tree, rtx, rtx, machine_mode, int); |
| static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, |
| HOST_WIDE_INT, tree); |
| static void sh_file_start (void); |
| static bool sh_assemble_integer (rtx, unsigned int, int); |
| static bool flow_dependent_p (rtx_insn *, rtx_insn *); |
| static void flow_dependent_p_1 (rtx, const_rtx, void *); |
| static int shiftcosts (rtx); |
| static int and_xor_ior_costs (rtx, int); |
| static int addsubcosts (rtx); |
| static int multcosts (rtx); |
| static bool unspec_caller_rtx_p (rtx); |
| static bool sh_cannot_copy_insn_p (rtx_insn *); |
| static bool sh_cannot_force_const_mem_p (machine_mode, rtx); |
| static bool sh_rtx_costs (rtx, machine_mode, int, int, int *, bool); |
| static int sh_address_cost (rtx, machine_mode, addr_space_t, bool); |
| static int sh_pr_n_sets (void); |
| static rtx sh_allocate_initial_value (rtx); |
| static reg_class_t sh_preferred_reload_class (rtx, reg_class_t); |
| static reg_class_t sh_secondary_reload (bool, rtx, reg_class_t, |
| machine_mode, |
| struct secondary_reload_info *); |
| static bool sh_legitimate_address_p (machine_mode, rtx, bool); |
| static rtx sh_legitimize_address (rtx, rtx, machine_mode); |
| static rtx sh_delegitimize_address (rtx); |
| static bool sh_cannot_substitute_mem_equiv_p (rtx); |
| static bool sh_legitimize_address_displacement (rtx *, rtx *, |
| poly_int64, machine_mode); |
| static int scavenge_reg (HARD_REG_SET *s); |
| |
| static rtx sh_struct_value_rtx (tree, int); |
| static rtx sh_function_value (const_tree, const_tree, bool); |
| static bool sh_function_value_regno_p (const unsigned int); |
| static rtx sh_libcall_value (machine_mode, const_rtx); |
| static bool sh_return_in_memory (const_tree, const_tree); |
| static rtx sh_builtin_saveregs (void); |
| static void sh_setup_incoming_varargs (cumulative_args_t, |
| const function_arg_info &, int *, int); |
| static bool sh_strict_argument_naming (cumulative_args_t); |
| static bool sh_pretend_outgoing_varargs_named (cumulative_args_t); |
| static void sh_atomic_assign_expand_fenv (tree *, tree *, tree *); |
| static tree sh_build_builtin_va_list (void); |
| static void sh_va_start (tree, rtx); |
| static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *); |
| static bool sh_promote_prototypes (const_tree); |
| static machine_mode sh_promote_function_mode (const_tree type, |
| machine_mode, |
| int *punsignedp, |
| const_tree funtype, |
| int for_return); |
| static bool sh_pass_by_reference (cumulative_args_t, |
| const function_arg_info &); |
| static bool sh_callee_copies (cumulative_args_t, const function_arg_info &); |
| static int sh_arg_partial_bytes (cumulative_args_t, const function_arg_info &); |
| static void sh_function_arg_advance (cumulative_args_t, |
| const function_arg_info &); |
| static rtx sh_function_arg (cumulative_args_t, const function_arg_info &); |
| static int sh_dwarf_calling_convention (const_tree); |
| static void sh_encode_section_info (tree, rtx, int); |
| static bool sh2a_function_vector_p (tree); |
| static void sh_trampoline_init (rtx, tree, rtx); |
| static rtx sh_trampoline_adjust_address (rtx); |
| static void sh_conditional_register_usage (void); |
| static bool sh_legitimate_constant_p (machine_mode, rtx); |
| static int mov_insn_size (machine_mode, bool); |
| static int mov_insn_alignment_mask (machine_mode, bool); |
| static bool sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT, |
| unsigned int, |
| enum by_pieces_operation, |
| bool); |
| static bool sequence_insn_p (rtx_insn *); |
| static void sh_canonicalize_comparison (int *, rtx *, rtx *, bool); |
| static void sh_canonicalize_comparison (enum rtx_code&, rtx&, rtx&, |
| machine_mode, bool); |
| static bool sh_legitimate_combined_insn (rtx_insn* insn); |
| |
| static bool sh_fixed_condition_code_regs (unsigned int* p1, unsigned int* p2); |
| |
| static void sh_init_sync_libfuncs (void) ATTRIBUTE_UNUSED; |
| static unsigned int sh_hard_regno_nregs (unsigned int, machine_mode); |
| static bool sh_hard_regno_mode_ok (unsigned int, machine_mode); |
| static bool sh_modes_tieable_p (machine_mode, machine_mode); |
| static bool sh_can_change_mode_class (machine_mode, machine_mode, reg_class_t); |
| |
| static const struct attribute_spec sh_attribute_table[] = |
| { |
| /* { name, min_len, max_len, decl_req, type_req, fn_type_req, |
| affects_type_identity, handler, exclude } */ |
| { "interrupt_handler", 0, 0, true, false, false, false, |
| sh_handle_interrupt_handler_attribute, NULL }, |
| { "sp_switch", 1, 1, true, false, false, false, |
| sh_handle_sp_switch_attribute, NULL }, |
| { "trap_exit", 1, 1, true, false, false, false, |
| sh_handle_trap_exit_attribute, NULL }, |
| { "renesas", 0, 0, false, true, false, false, |
| sh_handle_renesas_attribute, NULL }, |
| { "trapa_handler", 0, 0, true, false, false, false, |
| sh_handle_interrupt_handler_attribute, NULL }, |
| { "nosave_low_regs", 0, 0, true, false, false, false, |
| sh_handle_interrupt_handler_attribute, NULL }, |
| { "resbank", 0, 0, true, false, false, false, |
| sh_handle_resbank_handler_attribute, NULL }, |
| { "function_vector", 1, 1, true, false, false, false, |
| sh2a_handle_function_vector_handler_attribute, NULL }, |
| { NULL, 0, 0, false, false, false, false, NULL, NULL } |
| }; |
| |
| /* Initialize the GCC target structure. */ |
| #undef TARGET_ATTRIBUTE_TABLE |
| #define TARGET_ATTRIBUTE_TABLE sh_attribute_table |
| |
| /* The next two are used for debug info when compiling with -gdwarf. */ |
| #undef TARGET_ASM_UNALIGNED_HI_OP |
| #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t" |
| #undef TARGET_ASM_UNALIGNED_SI_OP |
| #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t" |
| |
| #undef TARGET_OPTION_OVERRIDE |
| #define TARGET_OPTION_OVERRIDE sh_option_override |
| |
| #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE |
| #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \ |
| sh_override_options_after_change |
| |
| #undef TARGET_PRINT_OPERAND |
| #define TARGET_PRINT_OPERAND sh_print_operand |
| #undef TARGET_PRINT_OPERAND_ADDRESS |
| #define TARGET_PRINT_OPERAND_ADDRESS sh_print_operand_address |
| #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P |
| #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sh_print_operand_punct_valid_p |
| #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA |
| #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA sh_asm_output_addr_const_extra |
| |
| #undef TARGET_ASM_FUNCTION_EPILOGUE |
| #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue |
| |
| #undef TARGET_ASM_OUTPUT_MI_THUNK |
| #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk |
| |
| #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
| #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ |
| hook_bool_const_tree_hwi_hwi_const_tree_true |
| |
| #undef TARGET_ASM_FILE_START |
| #define TARGET_ASM_FILE_START sh_file_start |
| #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE |
| #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true |
| |
| #undef TARGET_ASM_INTEGER |
| #define TARGET_ASM_INTEGER sh_assemble_integer |
| |
| #undef TARGET_REGISTER_MOVE_COST |
| #define TARGET_REGISTER_MOVE_COST sh_register_move_cost |
| |
| #undef TARGET_INSERT_ATTRIBUTES |
| #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes |
| |
| #undef TARGET_SCHED_ADJUST_COST |
| #define TARGET_SCHED_ADJUST_COST sh_adjust_cost |
| |
| #undef TARGET_SCHED_ISSUE_RATE |
| #define TARGET_SCHED_ISSUE_RATE sh_issue_rate |
| |
| /* The next 5 hooks have been implemented for reenabling sched1. With the |
| help of these macros we are limiting the movement of insns in sched1 to |
| reduce the register pressure. The overall idea is to keep count of SImode |
| and SFmode regs required by already scheduled insns. When these counts |
| cross some threshold values; give priority to insns that free registers. |
| The insn that frees registers is most likely to be the insn with lowest |
| LUID (original insn order); but such an insn might be there in the stalled |
| queue (Q) instead of the ready queue (R). To solve this, we skip cycles |
| up to a max of 8 cycles so that such insns may move from Q -> R. |
| |
| The description of the hooks are as below: |
| |
| TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic |
| scheduler; it is called inside the sched_init function just after |
| find_insn_reg_weights function call. It is used to calculate the SImode |
| and SFmode weights of insns of basic blocks; much similar to what |
| find_insn_reg_weights does. |
| TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook. |
| |
| TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is |
| indicated by TARGET_SCHED_REORDER2; doing this may move insns from |
| (Q)->(R). |
| |
| TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is |
| high; reorder the ready queue so that the insn with lowest LUID will be |
| issued next. |
| |
| TARGET_SCHED_REORDER2: If the register pressure is high, indicate to |
| TARGET_SCHED_DFA_NEW_CYCLE to skip cycles. |
| |
| TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it |
| can be returned from TARGET_SCHED_REORDER2. |
| |
| TARGET_SCHED_INIT: Reset the register pressure counting variables. */ |
| |
| #undef TARGET_SCHED_DFA_NEW_CYCLE |
| #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle |
| |
| #undef TARGET_SCHED_INIT_GLOBAL |
| #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global |
| |
| #undef TARGET_SCHED_FINISH_GLOBAL |
| #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global |
| |
| #undef TARGET_SCHED_VARIABLE_ISSUE |
| #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue |
| |
| #undef TARGET_SCHED_REORDER |
| #define TARGET_SCHED_REORDER sh_reorder |
| |
| #undef TARGET_SCHED_REORDER2 |
| #define TARGET_SCHED_REORDER2 sh_reorder2 |
| |
| #undef TARGET_SCHED_INIT |
| #define TARGET_SCHED_INIT sh_md_init |
| |
| #undef TARGET_DELEGITIMIZE_ADDRESS |
| #define TARGET_DELEGITIMIZE_ADDRESS sh_delegitimize_address |
| |
| #undef TARGET_LEGITIMIZE_ADDRESS |
| #define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address |
| |
| #undef TARGET_CAN_FOLLOW_JUMP |
| #define TARGET_CAN_FOLLOW_JUMP sh_can_follow_jump |
| |
| #undef TARGET_MS_BITFIELD_LAYOUT_P |
| #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p |
| |
| #undef TARGET_INIT_BUILTINS |
| #define TARGET_INIT_BUILTINS sh_init_builtins |
| #undef TARGET_BUILTIN_DECL |
| #define TARGET_BUILTIN_DECL sh_builtin_decl |
| #undef TARGET_EXPAND_BUILTIN |
| #define TARGET_EXPAND_BUILTIN sh_expand_builtin |
| |
| #undef TARGET_FUNCTION_OK_FOR_SIBCALL |
| #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall |
| |
| #undef TARGET_CANNOT_COPY_INSN_P |
| #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p |
| #undef TARGET_RTX_COSTS |
| #define TARGET_RTX_COSTS sh_rtx_costs |
| #undef TARGET_ADDRESS_COST |
| #define TARGET_ADDRESS_COST sh_address_cost |
| #undef TARGET_ALLOCATE_INITIAL_VALUE |
| #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value |
| |
| #undef TARGET_MACHINE_DEPENDENT_REORG |
| #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg |
| |
| #undef TARGET_DWARF_REGISTER_SPAN |
| #define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span |
| |
| #ifdef HAVE_AS_TLS |
| #undef TARGET_HAVE_TLS |
| #define TARGET_HAVE_TLS true |
| #endif |
| |
| #undef TARGET_PROMOTE_PROTOTYPES |
| #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes |
| #undef TARGET_PROMOTE_FUNCTION_MODE |
| #define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode |
| |
| #undef TARGET_FUNCTION_VALUE |
| #define TARGET_FUNCTION_VALUE sh_function_value |
| #undef TARGET_FUNCTION_VALUE_REGNO_P |
| #define TARGET_FUNCTION_VALUE_REGNO_P sh_function_value_regno_p |
| #undef TARGET_LIBCALL_VALUE |
| #define TARGET_LIBCALL_VALUE sh_libcall_value |
| #undef TARGET_STRUCT_VALUE_RTX |
| #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx |
| #undef TARGET_RETURN_IN_MEMORY |
| #define TARGET_RETURN_IN_MEMORY sh_return_in_memory |
| |
| #undef TARGET_EXPAND_BUILTIN_SAVEREGS |
| #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs |
| #undef TARGET_SETUP_INCOMING_VARARGS |
| #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs |
| #undef TARGET_STRICT_ARGUMENT_NAMING |
| #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming |
| #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED |
| #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named |
| #undef TARGET_MUST_PASS_IN_STACK |
| #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size |
| #undef TARGET_PASS_BY_REFERENCE |
| #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference |
| #undef TARGET_CALLEE_COPIES |
| #define TARGET_CALLEE_COPIES sh_callee_copies |
| #undef TARGET_ARG_PARTIAL_BYTES |
| #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes |
| #undef TARGET_FUNCTION_ARG |
| #define TARGET_FUNCTION_ARG sh_function_arg |
| #undef TARGET_FUNCTION_ARG_ADVANCE |
| #define TARGET_FUNCTION_ARG_ADVANCE sh_function_arg_advance |
| |
| #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV |
| #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sh_atomic_assign_expand_fenv |
| |
| #undef TARGET_BUILD_BUILTIN_VA_LIST |
| #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list |
| #undef TARGET_EXPAND_BUILTIN_VA_START |
| #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start |
| #undef TARGET_GIMPLIFY_VA_ARG_EXPR |
| #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr |
| |
| #undef TARGET_VECTOR_MODE_SUPPORTED_P |
| #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p |
| |
| #undef TARGET_CHECK_PCH_TARGET_FLAGS |
| #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags |
| |
| #undef TARGET_DWARF_CALLING_CONVENTION |
| #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention |
| |
| #undef TARGET_FRAME_POINTER_REQUIRED |
| #define TARGET_FRAME_POINTER_REQUIRED sh_frame_pointer_required |
| |
| #undef TARGET_MODE_EMIT |
| #define TARGET_MODE_EMIT sh_emit_mode_set |
| |
| #undef TARGET_MODE_NEEDED |
| #define TARGET_MODE_NEEDED sh_mode_needed |
| |
| #undef TARGET_MODE_AFTER |
| #define TARGET_MODE_AFTER sh_mode_after |
| |
| #undef TARGET_MODE_ENTRY |
| #define TARGET_MODE_ENTRY sh_mode_entry |
| |
| #undef TARGET_MODE_EXIT |
| #define TARGET_MODE_EXIT sh_mode_exit |
| |
| #undef TARGET_MODE_PRIORITY |
| #define TARGET_MODE_PRIORITY sh_mode_priority |
| |
| /* Return regmode weight for insn. */ |
| #define INSN_REGMODE_WEIGHT(INSN, MODE)\ |
| regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)] |
| |
| /* Return current register pressure for regmode. */ |
| #define CURR_REGMODE_PRESSURE(MODE)\ |
| curr_regmode_pressure[((MODE) == SImode) ? 0 : 1] |
| |
| #undef TARGET_ENCODE_SECTION_INFO |
| #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info |
| |
| #undef TARGET_LRA_P |
| #define TARGET_LRA_P sh_lra_p |
| |
| #undef TARGET_SECONDARY_RELOAD |
| #define TARGET_SECONDARY_RELOAD sh_secondary_reload |
| |
| #undef TARGET_PREFERRED_RELOAD_CLASS |
| #define TARGET_PREFERRED_RELOAD_CLASS sh_preferred_reload_class |
| |
| #undef TARGET_CONDITIONAL_REGISTER_USAGE |
| #define TARGET_CONDITIONAL_REGISTER_USAGE sh_conditional_register_usage |
| |
| #undef TARGET_LEGITIMATE_ADDRESS_P |
| #define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p |
| |
| #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P |
| #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P sh_cannot_substitute_mem_equiv_p |
| |
| #undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT |
| #define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT \ |
| sh_legitimize_address_displacement |
| |
| #undef TARGET_TRAMPOLINE_INIT |
| #define TARGET_TRAMPOLINE_INIT sh_trampoline_init |
| #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS |
| #define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address |
| |
| #undef TARGET_LEGITIMATE_CONSTANT_P |
| #define TARGET_LEGITIMATE_CONSTANT_P sh_legitimate_constant_p |
| |
| #undef TARGET_CANONICALIZE_COMPARISON |
| #define TARGET_CANONICALIZE_COMPARISON sh_canonicalize_comparison |
| |
| #undef TARGET_LEGITIMATE_COMBINED_INSN |
| #define TARGET_LEGITIMATE_COMBINED_INSN sh_legitimate_combined_insn |
| |
| #undef TARGET_FIXED_CONDITION_CODE_REGS |
| #define TARGET_FIXED_CONDITION_CODE_REGS sh_fixed_condition_code_regs |
| |
| #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P |
| #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \ |
| sh_use_by_pieces_infrastructure_p |
| |
| /* Machine-specific symbol_ref flags. */ |
| #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0) |
| |
| /* The tas.b instruction sets the 7th bit in the byte, i.e. 0x80. This value |
| is used by optabs.cc atomic op expansion code as well as in sync.md. */ |
| #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL |
| #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0x80 |
| |
| #undef TARGET_CANNOT_FORCE_CONST_MEM |
| #define TARGET_CANNOT_FORCE_CONST_MEM sh_cannot_force_const_mem_p |
| |
| #undef TARGET_HARD_REGNO_NREGS |
| #define TARGET_HARD_REGNO_NREGS sh_hard_regno_nregs |
| #undef TARGET_HARD_REGNO_MODE_OK |
| #define TARGET_HARD_REGNO_MODE_OK sh_hard_regno_mode_ok |
| |
| #undef TARGET_MODES_TIEABLE_P |
| #define TARGET_MODES_TIEABLE_P sh_modes_tieable_p |
| |
| #undef TARGET_CAN_CHANGE_MODE_CLASS |
| #define TARGET_CAN_CHANGE_MODE_CLASS sh_can_change_mode_class |
| |
| #undef TARGET_CONSTANT_ALIGNMENT |
| #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings |
| |
| #undef TARGET_HAVE_SPECULATION_SAFE_VALUE |
| #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed |
| |
| struct gcc_target targetm = TARGET_INITIALIZER; |
| |
| |
| /* Information on the currently selected atomic model. |
| This is initialized in sh_option_override. */ |
| static sh_atomic_model selected_atomic_model_; |
| |
| const sh_atomic_model& |
| selected_atomic_model (void) |
| { |
| return selected_atomic_model_; |
| } |
| |
| static sh_atomic_model |
| parse_validate_atomic_model_option (const char* str) |
| { |
| const char* model_names[sh_atomic_model::num_models]; |
| model_names[sh_atomic_model::none] = "none"; |
| model_names[sh_atomic_model::soft_gusa] = "soft-gusa"; |
| model_names[sh_atomic_model::hard_llcs] = "hard-llcs"; |
| model_names[sh_atomic_model::soft_tcb] = "soft-tcb"; |
| model_names[sh_atomic_model::soft_imask] = "soft-imask"; |
| |
| const char* model_cdef_names[sh_atomic_model::num_models]; |
| model_cdef_names[sh_atomic_model::none] = "NONE"; |
| model_cdef_names[sh_atomic_model::soft_gusa] = "SOFT_GUSA"; |
| model_cdef_names[sh_atomic_model::hard_llcs] = "HARD_LLCS"; |
| model_cdef_names[sh_atomic_model::soft_tcb] = "SOFT_TCB"; |
| model_cdef_names[sh_atomic_model::soft_imask] = "SOFT_IMASK"; |
| |
| sh_atomic_model ret; |
| ret.type = sh_atomic_model::none; |
| ret.name = model_names[sh_atomic_model::none]; |
| ret.cdef_name = model_cdef_names[sh_atomic_model::none]; |
| ret.strict = false; |
| ret.tcb_gbr_offset = -1; |
| |
| /* Handle empty string as 'none'. */ |
| if (str == NULL || *str == '\0') |
| return ret; |
| |
| #define err_ret(...) do { error (__VA_ARGS__); return ret; } while (0) |
| |
| std::vector<std::string> tokens; |
| for (std::stringstream ss (str); ss.good (); ) |
| { |
| tokens.push_back (std::string ()); |
| std::getline (ss, tokens.back (), ','); |
| } |
| |
| if (tokens.empty ()) |
| err_ret ("invalid atomic model option"); |
| |
| /* The first token must be the atomic model name. */ |
| { |
| for (size_t i = 0; i < sh_atomic_model::num_models; ++i) |
| if (tokens.front () == model_names[i]) |
| { |
| ret.type = (sh_atomic_model::enum_type)i; |
| ret.name = model_names[i]; |
| ret.cdef_name = model_cdef_names[i]; |
| goto got_mode_name; |
| } |
| |
| err_ret ("invalid atomic model name %qs", tokens.front ().c_str ()); |
| got_mode_name:; |
| } |
| |
| /* Go through the remaining tokens. */ |
| for (size_t i = 1; i < tokens.size (); ++i) |
| { |
| if (tokens[i] == "strict") |
| ret.strict = true; |
| else if (!tokens[i].compare (0, strlen ("gbr-offset="), "gbr-offset=")) |
| { |
| std::string offset_str = tokens[i].substr (strlen ("gbr-offset=")); |
| ret.tcb_gbr_offset = integral_argument (offset_str.c_str ()); |
| if (offset_str.empty () || ret.tcb_gbr_offset == -1) |
| err_ret ("could not parse gbr-offset value %qs in atomic model " |
| "option", offset_str.c_str ()); |
| } |
| else |
| err_ret ("unknown parameter %qs in atomic model option", |
| tokens[i].c_str ()); |
| } |
| |
| /* Check that the selection makes sense. */ |
| if (ret.type == sh_atomic_model::soft_gusa && !TARGET_SH3) |
| err_ret ("atomic model %s is only available on SH3 and SH4 targets", |
| ret.name); |
| |
| if (ret.type == sh_atomic_model::hard_llcs && !TARGET_SH4A) |
| err_ret ("atomic model %s is only available on SH4A targets", ret.name); |
| |
| if (ret.type == sh_atomic_model::soft_tcb && ret.tcb_gbr_offset == -1) |
| err_ret ("atomic model %s requires gbr-offset parameter", ret.name); |
| |
| if (ret.type == sh_atomic_model::soft_tcb |
| && (ret.tcb_gbr_offset < 0 || ret.tcb_gbr_offset > 1020 |
| || (ret.tcb_gbr_offset & 3) != 0)) |
| err_ret ("invalid gbr-offset value \"%d\" for atomic model %s; it must be " |
| "a multiple of 4 in the range 0-1020", ret.tcb_gbr_offset, |
| ret.name); |
| |
| if (ret.type == sh_atomic_model::soft_imask && TARGET_USERMODE) |
| err_ret ("cannot use atomic model %s in user mode", ret.name); |
| |
| return ret; |
| |
| #undef err_ret |
| } |
| |
| /* Register SH specific RTL passes. */ |
| extern opt_pass* make_pass_sh_treg_combine (gcc::context* ctx, bool split_insns, |
| const char* name); |
| extern opt_pass* make_pass_sh_optimize_sett_clrt (gcc::context* ctx, |
| const char* name); |
| static void |
| register_sh_passes (void) |
| { |
| /* Running the sh_treg_combine pass after ce1 generates better code when |
| comparisons are combined and reg-reg moves are introduced, because |
| reg-reg moves will be eliminated afterwards. However, there are quite |
| some cases where combine will be unable to fold comparison related insns, |
| thus for now don't do it. |
| register_pass (make_pass_sh_treg_combine (g, false, "sh_treg_combine1"), |
| PASS_POS_INSERT_AFTER, "ce1", 1); |
| */ |
| |
| /* Run sh_treg_combine pass after combine but before register allocation. */ |
| register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine2"), |
| PASS_POS_INSERT_AFTER, "split1", 1); |
| |
| /* Run sh_treg_combine pass after register allocation and basic block |
| reordering as this sometimes creates new opportunities. */ |
| register_pass (make_pass_sh_treg_combine (g, true, "sh_treg_combine3"), |
| PASS_POS_INSERT_AFTER, "split3", 1); |
| |
| /* Optimize sett and clrt insns, by e.g. removing them if the T bit value |
| is known after a conditional branch. |
| This must be done after basic blocks and branch conditions have |
| stabilized and won't be changed by further passes. */ |
| register_pass (make_pass_sh_optimize_sett_clrt (g, "sh_optimize_sett_clrt"), |
| PASS_POS_INSERT_BEFORE, "sched2", 1); |
| } |
| |
| /* Implement TARGET_OPTION_OVERRIDE macro. Validate and override |
| various options, and do some machine dependent initialization. */ |
| static void |
| sh_option_override (void) |
| { |
| int regno; |
| |
| SUBTARGET_OVERRIDE_OPTIONS; |
| |
| sh_cpu = PROCESSOR_SH1; |
| assembler_dialect = 0; |
| if (TARGET_SH2) |
| sh_cpu = PROCESSOR_SH2; |
| if (TARGET_SH2E) |
| sh_cpu = PROCESSOR_SH2E; |
| if (TARGET_SH2A) |
| sh_cpu = PROCESSOR_SH2A; |
| if (TARGET_SH3) |
| sh_cpu = PROCESSOR_SH3; |
| if (TARGET_SH3E) |
| sh_cpu = PROCESSOR_SH3E; |
| if (TARGET_SH4) |
| { |
| assembler_dialect = 1; |
| sh_cpu = PROCESSOR_SH4; |
| } |
| if (TARGET_SH4A) |
| { |
| assembler_dialect = 1; |
| sh_cpu = PROCESSOR_SH4A; |
| } |
| |
| /* User/priviledged mode is supported only on SH3* and SH4*. |
| Disable it for everything else. */ |
| if (!TARGET_SH3 && TARGET_USERMODE) |
| TARGET_USERMODE = false; |
| |
| if (! strcmp (sh_div_str, "call-div1")) |
| sh_div_strategy = SH_DIV_CALL_DIV1; |
| else if (! strcmp (sh_div_str, "call-fp") && TARGET_FPU_ANY) |
| sh_div_strategy = SH_DIV_CALL_FP; |
| else if (! strcmp (sh_div_str, "call-table") && TARGET_DYNSHIFT) |
| sh_div_strategy = SH_DIV_CALL_TABLE; |
| else |
| { |
| /* Pick one that makes most sense for the target in general. |
| It is not much good to use different functions depending on -Os, |
| since then we'll end up with two different functions when some of |
| the code is compiled for size, and some for speed. */ |
| |
| /* SH4 tends to emphasize speed. */ |
| if (TARGET_HARD_SH4) |
| sh_div_strategy = SH_DIV_CALL_TABLE; |
| /* These have their own way of doing things. */ |
| else if (TARGET_SH2A) |
| sh_div_strategy = SH_DIV_INTRINSIC; |
| /* SH1 .. SH3 cores often go into small-footprint systems, so |
| default to the smallest implementation available. */ |
| else |
| sh_div_strategy = SH_DIV_CALL_DIV1; |
| } |
| |
| if (sh_divsi3_libfunc[0]) |
| ; /* User supplied - leave it alone. */ |
| else if (TARGET_DIVIDE_CALL_FP) |
| sh_divsi3_libfunc = "__sdivsi3_i4"; |
| else if (TARGET_DIVIDE_CALL_TABLE) |
| sh_divsi3_libfunc = "__sdivsi3_i4i"; |
| else |
| sh_divsi3_libfunc = "__sdivsi3"; |
| |
| if (sh_branch_cost == -1) |
| { |
| /* The SH1 does not have delay slots, hence we get a pipeline stall |
| at every branch. The SH4 is superscalar, so the single delay slot |
| is not sufficient to keep both pipelines filled. |
| In any case, set the default branch cost to '2', as it results in |
| slightly overall smaller code and also enables some if conversions |
| that are required for matching special T bit related insns. */ |
| sh_branch_cost = 2; |
| } |
| |
| /* Set -mzdcbranch for SH4 / SH4A if not otherwise specified by the user. */ |
| if (! OPTION_SET_P (TARGET_ZDCBRANCH) && TARGET_HARD_SH4) |
| TARGET_ZDCBRANCH = 1; |
| |
| /* FDPIC code is a special form of PIC, and the vast majority of code |
| generation constraints that apply to PIC also apply to FDPIC, so we |
| set flag_pic to avoid the need to check TARGET_FDPIC everywhere |
| flag_pic is checked. */ |
| if (TARGET_FDPIC && !flag_pic) |
| flag_pic = 2; |
| |
| for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
| if (! VALID_REGISTER_P (regno)) |
| sh_register_names[regno][0] = '\0'; |
| |
| for (regno = 0; regno < ADDREGNAMES_SIZE; regno++) |
| if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno))) |
| sh_additional_register_names[regno][0] = '\0'; |
| |
| if (flag_pic && ! TARGET_PREFERGOT) |
| flag_no_function_cse = 1; |
| |
| if (targetm.small_register_classes_for_mode_p (VOIDmode)) |
| { |
| /* Never run scheduling before reload, since that can |
| break global alloc, and generates slower code anyway due |
| to the pressure on R0. */ |
| /* Enable sched1 for SH4 if the user explicitly requests. |
| When sched1 is enabled, the ready queue will be reordered by |
| the target hooks if pressure is high. We cannot do this for |
| PIC, SH3 and lower as they give spill failures for R0. */ |
| if (!TARGET_HARD_SH4 || flag_pic) |
| flag_schedule_insns = 0; |
| /* ??? Current exception handling places basic block boundaries |
| after call_insns. It causes the high pressure on R0 and gives |
| spill failures for R0 in reload. See PR 22553 and the thread |
| on gcc-patches |
| <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */ |
| else if (flag_exceptions) |
| { |
| if (flag_schedule_insns && OPTION_SET_P (flag_schedule_insns)) |
| warning (0, "ignoring %<-fschedule-insns%> because of exception " |
| "handling bug"); |
| flag_schedule_insns = 0; |
| } |
| else if (flag_schedule_insns |
| && !OPTION_SET_P (flag_schedule_insns)) |
| flag_schedule_insns = 0; |
| } |
| |
| /* Unwind info is not correct around the CFG unless either a frame |
| pointer is present or M_A_O_A is set. Fixing this requires rewriting |
| unwind info generation to be aware of the CFG and propagating states |
| around edges. */ |
| if ((flag_unwind_tables || flag_asynchronous_unwind_tables |
| || flag_exceptions || flag_non_call_exceptions) |
| && flag_omit_frame_pointer && !TARGET_ACCUMULATE_OUTGOING_ARGS) |
| { |
| warning (0, "unwind tables currently require either a frame pointer " |
| "or %<-maccumulate-outgoing-args%> for correctness"); |
| TARGET_ACCUMULATE_OUTGOING_ARGS = 1; |
| } |
| |
| if (flag_unsafe_math_optimizations) |
| { |
| /* Enable fsca insn for SH4A if not otherwise specified by the user. */ |
| if (OPTION_SET_P (TARGET_FSCA) == 0 |
| && (TARGET_SH4A_FP || TARGET_FPU_SH4_300)) |
| TARGET_FSCA = 1; |
| |
| /* Enable fsrra insn for SH4A if not otherwise specified by the user. */ |
| if (OPTION_SET_P (TARGET_FSRRA) == 0 |
| && (TARGET_SH4A_FP || TARGET_FPU_SH4_300)) |
| TARGET_FSRRA = 1; |
| } |
| |
| /* Allow fsrra insn only if -funsafe-math-optimizations and |
| -ffinite-math-only is enabled. */ |
| TARGET_FSRRA = TARGET_FSRRA |
| && flag_unsafe_math_optimizations |
| && flag_finite_math_only; |
| |
| /* If the -mieee option was not explicitly set by the user, turn it on |
| unless -ffinite-math-only was specified. See also PR 33135. */ |
| if (! OPTION_SET_P (TARGET_IEEE)) |
| TARGET_IEEE = ! flag_finite_math_only; |
| |
| if (sh_fixed_range_str) |
| sh_fix_range (sh_fixed_range_str); |
| |
| /* This target defaults to strict volatile bitfields. */ |
| if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2)) |
| flag_strict_volatile_bitfields = 1; |
| |
| sh_override_options_after_change (); |
| |
| /* Parse atomic model option and make sure it is valid for the current |
| target CPU. */ |
| selected_atomic_model_ |
| = parse_validate_atomic_model_option (sh_atomic_model_str); |
| |
| register_sh_passes (); |
| } |
| |
| /* Implement targetm.override_options_after_change. */ |
| |
| static void |
| sh_override_options_after_change (void) |
| { |
| /* Adjust loop, jump and function alignment values (in bytes), if those |
| were not specified by the user using -falign-loops, -falign-jumps |
| and -falign-functions options. |
| 32 bit alignment is better for speed, because instructions can be |
| fetched as a pair from a longword boundary. For size use 16 bit |
| alignment to get more compact code. |
| Aligning all jumps increases the code size, even if it might |
| result in slightly faster code. Thus, it is set to the smallest |
| alignment possible if not specified by the user. */ |
| if (flag_align_loops && !str_align_loops) |
| str_align_loops = optimize_size ? "2" : "4"; |
| |
| /* Parse values so that we can compare for current value. */ |
| parse_alignment_opts (); |
| if (flag_align_jumps && !str_align_jumps) |
| str_align_jumps = "2"; |
| else if (align_jumps.levels[0].get_value () < 2) |
| str_align_jumps = "2"; |
| |
| if (flag_align_functions && !str_align_functions) |
| str_align_functions = optimize_size ? "2" : "4"; |
| |
| /* The linker relaxation code breaks when a function contains |
| alignments that are larger than that at the start of a |
| compilation unit. */ |
| if (TARGET_RELAX) |
| { |
| /* Parse values so that we can compare for current value. */ |
| parse_alignment_opts (); |
| int min_align = MAX (align_loops.levels[0].get_value (), |
| align_jumps.levels[0].get_value ()); |
| |
| /* Also take possible .long constants / mova tables into account. */ |
| if (min_align < 4) |
| min_align = 4; |
| if (align_functions.levels[0].get_value () < min_align) |
| { |
| char *r = XNEWVEC (char, 16); |
| sprintf (r, "%d", min_align); |
| str_align_functions = r; |
| } |
| } |
| } |
| |
| /* Print the operand address in x to the stream. */ |
| static void |
| sh_print_operand_address (FILE *stream, machine_mode /*mode*/, rtx x) |
| { |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| case SUBREG: |
| fprintf (stream, "@%s", reg_names[true_regnum (x)]); |
| break; |
| |
| case PLUS: |
| { |
| rtx base = XEXP (x, 0); |
| rtx index = XEXP (x, 1); |
| |
| switch (GET_CODE (index)) |
| { |
| case CONST_INT: |
| fprintf (stream, "@(%d,%s)", (int) INTVAL (index), |
| reg_names[true_regnum (base)]); |
| break; |
| |
| case REG: |
| case SUBREG: |
| { |
| int base_num = true_regnum (base); |
| int index_num = true_regnum (index); |
| |
| /* If base or index is R0, make sure that it comes first. |
| Usually one of them will be R0, but the order might be wrong. |
| If neither base nor index are R0 it's an error and we just |
| pass it on to the assembler. This avoids silent wrong code |
| bugs. */ |
| if (base_num == 0 && index_num != 0) |
| std::swap (base_num, index_num); |
| |
| fprintf (stream, "@(%s,%s)", reg_names[index_num], |
| reg_names[base_num]); |
| break; |
| } |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| break; |
| |
| case PRE_DEC: |
| fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]); |
| break; |
| |
| case POST_INC: |
| fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]); |
| break; |
| |
| default: |
| x = mark_constant_pool_use (x); |
| output_addr_const (stream, x); |
| break; |
| } |
| } |
| |
| /* Print operand x (an rtx) in assembler syntax to file stream |
| according to modifier code. |
| |
| '.' print a .s if insn needs delay slot |
| ',' print LOCAL_LABEL_PREFIX |
| '@' print trap, rte or rts depending upon pragma interruptness |
| '#' output a nop if there is nothing to put in the delay slot |
| ''' print likelihood suffix (/u for unlikely). |
| '>' print branch target if -fverbose-asm |
| 'O' print a constant without the # |
| 'R' print the LSW of a dp value - changes if in little endian |
| 'S' print the MSW of a dp value - changes if in little endian |
| 'T' print the next word of a dp value - same as 'R' in big endian mode. |
| 'M' print .b / .w / .l / .s / .d suffix if operand is a MEM. |
| 'N' print 'r63' if the operand is (const_int 0). |
| 'd' print a V2SF reg as dN instead of fpN. |
| 'm' print a pair `base,offset' or `base,index', for LD and ST. |
| 'U' Likewise for {LD,ST}{HI,LO}. |
| 'V' print the position of a single bit set. |
| 'W' print the position of a single bit cleared. |
| 't' print a memory address which is a register. |
| 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value. |
| 'o' output an operator. */ |
| static void |
| sh_print_operand (FILE *stream, rtx x, int code) |
| { |
| int regno; |
| machine_mode mode; |
| |
| switch (code) |
| { |
| tree trapa_attr; |
| |
| case '.': |
| if (final_sequence |
| && ! INSN_ANNULLED_BRANCH_P (final_sequence->insn (0)) |
| && get_attr_length (final_sequence->insn (1))) |
| fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s"); |
| break; |
| case ',': |
| fprintf (stream, "%s", LOCAL_LABEL_PREFIX); |
| break; |
| case '@': |
| trapa_attr = lookup_attribute ("trap_exit", |
| DECL_ATTRIBUTES (current_function_decl)); |
| if (trapa_attr) |
| fprintf (stream, "trapa #%ld", |
| (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr)))); |
| else if (sh_cfun_interrupt_handler_p ()) |
| { |
| if (sh_cfun_resbank_handler_p ()) |
| fprintf (stream, "resbank\n"); |
| fprintf (stream, "rte"); |
| } |
| else |
| fprintf (stream, "rts"); |
| break; |
| case '#': |
| /* Output a nop if there's nothing in the delay slot. */ |
| if (dbr_sequence_length () == 0) |
| fprintf (stream, "\n\tnop"); |
| break; |
| case '\'': |
| { |
| rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0); |
| |
| if (note |
| && profile_probability::from_reg_br_prob_note (XINT (note, 0)) |
| < profile_probability::even ()) |
| fputs ("/u", stream); |
| break; |
| } |
| case '>': |
| if (flag_verbose_asm && JUMP_LABEL (current_output_insn)) |
| { |
| fputs ("\t! target: ", stream); |
| output_addr_const (stream, JUMP_LABEL (current_output_insn)); |
| } |
| break; |
| case 'O': |
| x = mark_constant_pool_use (x); |
| output_addr_const (stream, x); |
| break; |
| /* N.B.: %R / %S / %T adjust memory addresses by four. |
| While they can be used to access 64 bit parts of a larger value |
| held in general purpose registers, that won't work with memory - |
| neither for fp registers, since the frxx names are used. */ |
| case 'R': |
| if (REG_P (x) || GET_CODE (x) == SUBREG) |
| { |
| regno = true_regnum (x); |
| regno += FP_REGISTER_P (regno) ? 1 : SH_REG_LSW_OFFSET; |
| fputs (reg_names[regno], (stream)); |
| } |
| else if (MEM_P (x)) |
| { |
| x = adjust_address (x, SImode, 4 * SH_REG_LSW_OFFSET); |
| sh_print_operand_address (stream, GET_MODE (x), XEXP (x, 0)); |
| } |
| else |
| { |
| rtx sub = NULL_RTX; |
| |
| mode = GET_MODE (x); |
| if (mode == VOIDmode) |
| mode = DImode; |
| if (GET_MODE_SIZE (mode) >= 8) |
| sub = simplify_subreg (SImode, x, mode, 4 * SH_REG_LSW_OFFSET); |
| if (sub) |
| sh_print_operand (stream, sub, 0); |
| else |
| output_operand_lossage ("invalid operand to %%R"); |
| } |
| break; |
| case 'S': |
| if (REG_P (x) || GET_CODE (x) == SUBREG) |
| { |
| regno = true_regnum (x); |
| regno += FP_REGISTER_P (regno) ? 0 : SH_REG_MSW_OFFSET; |
| fputs (reg_names[regno], (stream)); |
| } |
| else if (MEM_P (x)) |
| { |
| x = adjust_address (x, SImode, 4 * SH_REG_MSW_OFFSET); |
| sh_print_operand_address (stream, GET_MODE (x), XEXP (x, 0)); |
| } |
| else |
| { |
| rtx sub = NULL_RTX; |
| |
| mode = GET_MODE (x); |
| if (mode == VOIDmode) |
| mode = DImode; |
| if (GET_MODE_SIZE (mode) >= 8) |
| sub = simplify_subreg (SImode, x, mode, 4 * SH_REG_MSW_OFFSET); |
| if (sub) |
| sh_print_operand (stream, sub, 0); |
| else |
| output_operand_lossage ("invalid operand to %%S"); |
| } |
| break; |
| case 'T': |
| /* Next word of a double. */ |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| fputs (reg_names[REGNO (x) + 1], (stream)); |
| break; |
| case MEM: |
| { |
| machine_mode mode = GET_MODE (x); |
| if (GET_CODE (XEXP (x, 0)) != PRE_DEC |
| && GET_CODE (XEXP (x, 0)) != POST_INC) |
| x = adjust_address (x, SImode, 4); |
| sh_print_operand_address (stream, mode, XEXP (x, 0)); |
| } |
| break; |
| default: |
| break; |
| } |
| break; |
| |
| case 't': |
| gcc_assert (MEM_P (x)); |
| x = XEXP (x, 0); |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| case SUBREG: |
| sh_print_operand (stream, x, 0); |
| break; |
| default: |
| break; |
| } |
| break; |
| |
| case 'o': |
| switch (GET_CODE (x)) |
| { |
| case PLUS: fputs ("add", stream); break; |
| case MINUS: fputs ("sub", stream); break; |
| case MULT: fputs ("mul", stream); break; |
| case DIV: fputs ("div", stream); break; |
| case EQ: fputs ("eq", stream); break; |
| case NE: fputs ("ne", stream); break; |
| case GT: case LT: fputs ("gt", stream); break; |
| case GE: case LE: fputs ("ge", stream); break; |
| case GTU: case LTU: fputs ("gtu", stream); break; |
| case GEU: case LEU: fputs ("geu", stream); break; |
| default: |
| break; |
| } |
| break; |
| case 'M': |
| if (MEM_P (x)) |
| { |
| switch (GET_MODE (x)) |
| { |
| case E_QImode: fputs (".b", stream); break; |
| case E_HImode: fputs (".w", stream); break; |
| case E_SImode: fputs (".l", stream); break; |
| case E_SFmode: fputs (".s", stream); break; |
| case E_DFmode: fputs (".d", stream); break; |
| default: gcc_unreachable (); |
| } |
| } |
| break; |
| |
| case 'm': |
| gcc_assert (MEM_P (x)); |
| x = XEXP (x, 0); |
| /* Fall through. */ |
| case 'U': |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| case SUBREG: |
| sh_print_operand (stream, x, 0); |
| fputs (", 0", stream); |
| break; |
| |
| case PLUS: |
| sh_print_operand (stream, XEXP (x, 0), 0); |
| fputs (", ", stream); |
| sh_print_operand (stream, XEXP (x, 1), 0); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| break; |
| |
| case 'V': |
| { |
| int num = exact_log2 (INTVAL (x)); |
| gcc_assert (num >= 0); |
| fprintf (stream, "#%d", num); |
| } |
| break; |
| |
| case 'W': |
| { |
| int num = exact_log2 (~INTVAL (x)); |
| gcc_assert (num >= 0); |
| fprintf (stream, "#%d", num); |
| } |
| break; |
| |
| case 'd': |
| gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode); |
| |
| fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1); |
| break; |
| |
| case 'N': |
| if (x == CONST0_RTX (GET_MODE (x))) |
| { |
| fprintf ((stream), "r63"); |
| break; |
| } |
| goto default_output; |
| case 'u': |
| if (CONST_INT_P (x)) |
| { |
| fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1)); |
| break; |
| } |
| /* Fall through. */ |
| |
| default_output: |
| default: |
| regno = 0; |
| mode = GET_MODE (x); |
| |
| switch (GET_CODE (x)) |
| { |
| case TRUNCATE: |
| { |
| rtx inner = XEXP (x, 0); |
| int offset = 0; |
| machine_mode inner_mode; |
| |
| /* We might see SUBREGs with vector mode registers inside. */ |
| if (GET_CODE (inner) == SUBREG |
| && (GET_MODE_SIZE (GET_MODE (inner)) |
| == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner)))) |
| && subreg_lowpart_p (inner)) |
| inner = SUBREG_REG (inner); |
| if (CONST_INT_P (inner)) |
| { |
| x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x))); |
| goto default_output; |
| } |
| inner_mode = GET_MODE (inner); |
| if (GET_CODE (inner) == SUBREG |
| && (GET_MODE_SIZE (GET_MODE (inner)) |
| < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner)))) |
| && REG_P (SUBREG_REG (inner))) |
| { |
| offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)), |
| GET_MODE (SUBREG_REG (inner)), |
| SUBREG_BYTE (inner), |
| GET_MODE (inner)); |
| inner = SUBREG_REG (inner); |
| } |
| if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8) |
| abort (); |
| /* Floating point register pairs are always big endian; |
| general purpose registers are 64 bit wide. */ |
| regno = REGNO (inner); |
| regno = (hard_regno_nregs (regno, inner_mode) |
| - hard_regno_nregs (regno, mode)) |
| + offset; |
| x = inner; |
| goto reg; |
| } |
| case SIGN_EXTEND: |
| x = XEXP (x, 0); |
| goto reg; |
| case SUBREG: |
| gcc_assert (SUBREG_BYTE (x) == 0 |
| && REG_P (SUBREG_REG (x))); |
| |
| x = SUBREG_REG (x); |
| /* Fall through. */ |
| |
| reg: |
| case REG: |
| regno += REGNO (x); |
| if (FP_REGISTER_P (regno) |
| && mode == V16SFmode) |
| fprintf ((stream), "mtrx%s", reg_names[regno] + 2); |
| else if (FP_REGISTER_P (REGNO (x)) |
| && mode == V4SFmode) |
| fprintf ((stream), "fv%s", reg_names[regno] + 2); |
| else if (REG_P (x) |
| && mode == V2SFmode) |
| fprintf ((stream), "fp%s", reg_names[regno] + 2); |
| else if (FP_REGISTER_P (REGNO (x)) |
| && GET_MODE_SIZE (mode) > 4) |
| fprintf ((stream), "d%s", reg_names[regno] + 1); |
| else |
| fputs (reg_names[regno], (stream)); |
| break; |
| |
| case MEM: |
| output_address (GET_MODE (x), XEXP (x, 0)); |
| break; |
| |
| default: |
| fputc ('#', stream); |
| output_addr_const (stream, x); |
| break; |
| } |
| break; |
| } |
| } |
| |
| static bool |
| sh_print_operand_punct_valid_p (unsigned char code) |
| { |
| return (code == '.' || code == '#' || code == '@' || code == ',' |
| || code == '$' || code == '\'' || code == '>'); |
| } |
| |
| /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */ |
| static bool |
| sh_asm_output_addr_const_extra (FILE *file, rtx x) |
| { |
| if (GET_CODE (x) == UNSPEC) |
| { |
| switch (XINT (x, 1)) |
| { |
| case UNSPEC_PIC: |
| /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */ |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| break; |
| case UNSPEC_GOT: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOT", file); |
| break; |
| case UNSPEC_GOTOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOTOFF", file); |
| break; |
| case UNSPEC_PLT: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@PLT", file); |
| break; |
| case UNSPEC_GOTPLT: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOTPLT", file); |
| break; |
| case UNSPEC_PCREL: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@PCREL", file); |
| break; |
| case UNSPEC_DTPOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@DTPOFF", file); |
| break; |
| case UNSPEC_GOTTPOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOTTPOFF", file); |
| break; |
| case UNSPEC_TPOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@TPOFF", file); |
| break; |
| case UNSPEC_CALLER: |
| { |
| char name[32]; |
| /* LPCS stands for Label for PIC Call Site. */ |
| targetm.asm_out.generate_internal_label (name, "LPCS", |
| INTVAL (XVECEXP (x, 0, 0))); |
| assemble_name (file, name); |
| } |
| break; |
| case UNSPEC_SYMOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputc ('-', file); |
| if (GET_CODE (XVECEXP (x, 0, 1)) == CONST) |
| { |
| fputc ('(', file); |
| output_addr_const (file, XVECEXP (x, 0, 1)); |
| fputc (')', file); |
| } |
| else |
| output_addr_const (file, XVECEXP (x, 0, 1)); |
| break; |
| case UNSPEC_PCREL_SYMOFF: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("-(", file); |
| output_addr_const (file, XVECEXP (x, 0, 1)); |
| fputs ("-.)", file); |
| break; |
| case UNSPEC_GOTFUNCDESC: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOTFUNCDESC", file); |
| break; |
| case UNSPEC_GOTOFFFUNCDESC: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs ("@GOTOFFFUNCDESC", file); |
| break; |
| default: |
| return false; |
| } |
| return true; |
| } |
| else |
| return false; |
| } |
| |
| /* Encode symbol attributes of a SYMBOL_REF into its |
| SYMBOL_REF_FLAGS. */ |
| static void |
| sh_encode_section_info (tree decl, rtx rtl, int first) |
| { |
| default_encode_section_info (decl, rtl, first); |
| |
| if (TREE_CODE (decl) == FUNCTION_DECL |
| && sh2a_function_vector_p (decl) && TARGET_SH2A) |
| SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION; |
| } |
| |
| /* Prepare operands for a move define_expand; specifically, one of the |
| operands must be in a register. */ |
| void |
| prepare_move_operands (rtx operands[], machine_mode mode) |
| { |
| if ((mode == SImode || mode == DImode) |
| && flag_pic |
| && ! ((mode == Pmode || mode == ptr_mode) |
| && tls_symbolic_operand (operands[1], Pmode) != TLS_MODEL_NONE)) |
| { |
| rtx temp; |
| if (SYMBOLIC_CONST_P (operands[1])) |
| { |
| if (MEM_P (operands[0])) |
| operands[1] = force_reg (Pmode, operands[1]); |
| else |
| { |
| temp = (!can_create_pseudo_p () |
| ? operands[0] |
| : gen_reg_rtx (Pmode)); |
| operands[1] = legitimize_pic_address (operands[1], mode, temp); |
| } |
| } |
| else if (GET_CODE (operands[1]) == CONST |
| && GET_CODE (XEXP (operands[1], 0)) == PLUS |
| && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0))) |
| { |
| temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode); |
| temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0), |
| mode, temp); |
| operands[1] = expand_binop (mode, add_optab, temp, |
| XEXP (XEXP (operands[1], 0), 1), |
| (!can_create_pseudo_p () |
| ? temp |
| : gen_reg_rtx (Pmode)), |
| 0, OPTAB_LIB_WIDEN); |
| } |
| } |
| |
| if (! reload_in_progress && ! reload_completed) |
| { |
| /* Copy the source to a register if both operands aren't registers. */ |
| if (! register_operand (operands[0], mode) |
| && ! register_operand (operands[1], mode)) |
| operands[1] = copy_to_mode_reg (mode, operands[1]); |
| |
| if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode)) |
| { |
| /* This is like change_address_1 (operands[0], mode, 0, 1) , |
| except that we can't use that function because it is static. */ |
| rtx new_rtx = change_address (operands[0], mode, 0); |
| MEM_COPY_ATTRIBUTES (new_rtx, operands[0]); |
| operands[0] = new_rtx; |
| } |
| |
| /* This case can happen while generating code to move the result |
| of a library call to the target. Reject `st r0,@(rX,rY)' because |
| reload will fail to find a spill register for rX, since r0 is already |
| being used for the source. */ |
| else if (refers_to_regno_p (R0_REG, operands[1]) |
| && MEM_P (operands[0]) |
| && GET_CODE (XEXP (operands[0], 0)) == PLUS |
| && REG_P (XEXP (XEXP (operands[0], 0), 1))) |
| operands[1] = copy_to_mode_reg (mode, operands[1]); |
| |
| /* When the displacement addressing is used, RA will assign r0 to |
| the pseudo register operand for the QI/HImode load/store. |
| This tends to make a long live range for R0 and might cause |
| anomalous register spills in some case with LRA. See PR |
| target/55212. |
| We split possible load/store to two move insns via r0 so as to |
| shorten R0 live range. It will make some codes worse but will |
| win on average for LRA. |
| Also when base+index addressing is used and the index term is |
| a subreg, LRA assumes that more hard registers can be available |
| in some situation. It isn't the case for SH in the problematic |
| case. We can pre-allocate R0 for that index term to avoid |
| the issue. See PR target/66591. */ |
| else if (sh_lra_p () |
| && ! TARGET_SH2A |
| && ((REG_P (operands[0]) && MEM_P (operands[1])) |
| || (REG_P (operands[1]) && MEM_P (operands[0])))) |
| { |
| bool load_p = REG_P (operands[0]); |
| rtx reg = operands[load_p ? 0 : 1]; |
| rtx adr = XEXP (operands[load_p ? 1 : 0], 0); |
| |
| if ((mode == QImode || mode == HImode) |
| && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
| && GET_CODE (adr) == PLUS |
| && REG_P (XEXP (adr, 0)) |
| && (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER) |
| && CONST_INT_P (XEXP (adr, 1)) |
| && INTVAL (XEXP (adr, 1)) != 0 |
| && sh_legitimate_index_p (mode, XEXP (adr, 1), false, true)) |
| { |
| rtx r0_rtx = gen_rtx_REG (mode, R0_REG); |
| emit_move_insn (r0_rtx, operands[1]); |
| operands[1] = r0_rtx; |
| } |
| if (REGNO (reg) >= FIRST_PSEUDO_REGISTER |
| && GET_CODE (adr) == PLUS |
| && REG_P (XEXP (adr, 0)) |
| && (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER) |
| && SUBREG_P (XEXP (adr, 1)) |
| && REG_P (SUBREG_REG (XEXP (adr, 1)))) |
| { |
| rtx r0_rtx = gen_rtx_REG (GET_MODE (XEXP (adr, 1)), R0_REG); |
| emit_move_insn (r0_rtx, XEXP (adr, 1)); |
| XEXP (adr, 1) = r0_rtx; |
| } |
| } |
| } |
| |
| if (mode == Pmode || mode == ptr_mode) |
| { |
| rtx op0 = operands[0]; |
| rtx op1 = operands[1]; |
| rtx opc; |
| if (GET_CODE (op1) == CONST |
| && GET_CODE (XEXP (op1, 0)) == PLUS |
| && (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode) |
| != TLS_MODEL_NONE)) |
| { |
| opc = XEXP (XEXP (op1, 0), 1); |
| op1 = XEXP (XEXP (op1, 0), 0); |
| } |
| else |
| opc = NULL_RTX; |
| |
| enum tls_model tls_kind; |
| |
| if (! reload_in_progress && ! reload_completed |
| && (tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE) |
| { |
| rtx tga_op1, tga_ret, tmp, tmp2; |
| |
| if (! flag_pic |
| && (tls_kind == TLS_MODEL_GLOBAL_DYNAMIC |
| || tls_kind == TLS_MODEL_LOCAL_DYNAMIC |
| || tls_kind == TLS_MODEL_INITIAL_EXEC)) |
| { |
| static int got_labelno; |
| /* Don't schedule insns for getting GOT address when |
| the first scheduling is enabled, to avoid spill |
| failures for R0. */ |
| if (flag_schedule_insns) |
| emit_insn (gen_blockage ()); |
| emit_insn (gen_GOTaddr2picreg (GEN_INT (++got_labelno))); |
| emit_use (gen_rtx_REG (SImode, PIC_REG)); |
| if (flag_schedule_insns) |
| emit_insn (gen_blockage ()); |
| } |
| |
| switch (tls_kind) |
| { |
| case TLS_MODEL_GLOBAL_DYNAMIC: |
| tga_ret = gen_rtx_REG (Pmode, R0_REG); |
| if (TARGET_FDPIC) |
| emit_move_insn (gen_rtx_REG (Pmode, PIC_REG), |
| sh_get_fdpic_reg_initial_val ()); |
| emit_call_insn (gen_tls_global_dynamic (tga_ret, op1)); |
| tmp = gen_reg_rtx (Pmode); |
| emit_move_insn (tmp, tga_ret); |
| op1 = tmp; |
| break; |
| |
| case TLS_MODEL_LOCAL_DYNAMIC: |
| tga_ret = gen_rtx_REG (Pmode, R0_REG); |
| if (TARGET_FDPIC) |
| emit_move_insn (gen_rtx_REG (Pmode, PIC_REG), |
| sh_get_fdpic_reg_initial_val ()); |
| emit_call_insn (gen_tls_local_dynamic (tga_ret, op1)); |
| |
| tmp = gen_reg_rtx (Pmode); |
| emit_move_insn (tmp, tga_ret); |
| |
| if (register_operand (op0, Pmode)) |
| tmp2 = op0; |
| else |
| tmp2 = gen_reg_rtx (Pmode); |
| |
| emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp)); |
| op1 = tmp2; |
| break; |
| |
| case TLS_MODEL_INITIAL_EXEC: |
| tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode); |
| tmp = gen_sym2GOTTPOFF (op1); |
| if (TARGET_FDPIC) |
| emit_move_insn (gen_rtx_REG (Pmode, PIC_REG), |
| sh_get_fdpic_reg_initial_val ()); |
| emit_insn (gen_tls_initial_exec (tga_op1, tmp)); |
| op1 = tga_op1; |
| break; |
| |
| case TLS_MODEL_LOCAL_EXEC: |
| tmp2 = gen_reg_rtx (Pmode); |
| emit_insn (gen_store_gbr (tmp2)); |
| tmp = gen_reg_rtx (Pmode); |
| emit_insn (gen_symTPOFF2reg (tmp, op1)); |
| |
| if (register_operand (op0, Pmode)) |
| op1 = op0; |
| else |
| op1 = gen_reg_rtx (Pmode); |
| |
| emit_insn (gen_addsi3 (op1, tmp, tmp2)); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| if (opc) |
| emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc))); |
| operands[1] = op1; |
| } |
| } |
| |
| if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) |
| { |
| rtx base, offset; |
| split_const (operands[1], &base, &offset); |
| |
| if (GET_CODE (base) == SYMBOL_REF |
| && !offset_within_block_p (base, INTVAL (offset))) |
| { |
| rtx tmp = can_create_pseudo_p () ? gen_reg_rtx (mode) : operands[0]; |
| emit_move_insn (tmp, base); |
| if (!arith_operand (offset, mode)) |
| offset = force_reg (mode, offset); |
| emit_insn (gen_add3_insn (operands[0], tmp, offset)); |
| } |
| } |
| } |
| |
| /* Implement the canonicalize_comparison target hook for the combine |
| pass. For the target hook this function is invoked via |
| sh_canonicalize_comparison. This function is also re-used to |
| canonicalize comparisons in cbranch pattern expanders. */ |
| static void |
| sh_canonicalize_comparison (enum rtx_code& cmp, rtx& op0, rtx& op1, |
| machine_mode mode, |
| bool op0_preserve_value) |
| { |
| /* When invoked from within the combine pass the mode is not specified, |
| so try to get it from one of the operands. */ |
| if (mode == VOIDmode) |
| mode = GET_MODE (op0); |
| if (mode == VOIDmode) |
| mode = GET_MODE (op1); |
| |
| // We need to have a mode to do something useful here. |
| if (mode == VOIDmode) |
| return; |
| |
| // Currently, we don't deal with floats here. |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| return; |
| |
| // Make sure that the constant operand is the second operand. |
| if (CONST_INT_P (op0) && !CONST_INT_P (op1)) |
| { |
| if (op0_preserve_value) |
| return; |
| |
| std::swap (op0, op1); |
| cmp = swap_condition (cmp); |
| } |
| |
| if (CONST_INT_P (op1)) |
| { |
| /* Try to adjust the constant operand in such a way that available |
| comparison insns can be utilized better and the constant can be |
| loaded with a 'mov #imm,Rm' insn. This avoids a load from the |
| constant pool. */ |
| const HOST_WIDE_INT val = INTVAL (op1); |
| |
| /* x > -1 --> x >= 0 |
| x > 0xFFFFFF7F --> x >= 0xFFFFFF80 |
| x <= -1 --> x < 0 |
| x <= 0xFFFFFF7F --> x < 0xFFFFFF80 */ |
| if ((val == -1 || val == -0x81) && (cmp == GT || cmp == LE)) |
| { |
| cmp = cmp == GT ? GE : LT; |
| op1 = gen_int_mode (val + 1, mode); |
| } |
| |
| /* x >= 1 --> x > 0 |
| x >= 0x80 --> x > 0x7F |
| x < 1 --> x <= 0 |
| x < 0x80 --> x <= 0x7F */ |
| else if ((val == 1 || val == 0x80) && (cmp == GE || cmp == LT)) |
| { |
| cmp = cmp == GE ? GT : LE; |
| op1 = gen_int_mode (val - 1, mode); |
| } |
| |
| /* unsigned x >= 1 --> x != 0 |
| unsigned x < 1 --> x == 0 */ |
| else if (val == 1 && (cmp == GEU || cmp == LTU)) |
| { |
| cmp = cmp == GEU ? NE : EQ; |
| op1 = CONST0_RTX (mode); |
| } |
| |
| /* unsigned x >= 0x80 --> unsigned x > 0x7F |
| unsigned x < 0x80 --> unsigned x < 0x7F */ |
| else if (val == 0x80 && (cmp == GEU || cmp == LTU)) |
| { |
| cmp = cmp == GEU ? GTU : LEU; |
| op1 = gen_int_mode (val - 1, mode); |
| } |
| |
| /* unsigned x > 0 --> x != 0 |
| unsigned x <= 0 --> x == 0 */ |
| else if (val == 0 && (cmp == GTU || cmp == LEU)) |
| cmp = cmp == GTU ? NE : EQ; |
| |
| /* unsigned x > 0x7FFFFFFF --> signed x < 0 |
| unsigned x <= 0x7FFFFFFF --> signed x >= 0 */ |
| else if (mode == SImode && (cmp == GTU || cmp == LEU) |
| && val == 0x7FFFFFFF) |
| { |
| cmp = cmp == GTU ? LT : GE; |
| op1 = const0_rtx; |
| } |
| |
| /* unsigned x >= 0x80000000 --> signed x < 0 |
| unsigned x < 0x80000000 --> signed x >= 0 */ |
| else if (mode == SImode && (cmp == GEU || cmp == LTU) |
| && (unsigned HOST_WIDE_INT)val |
| == ((unsigned HOST_WIDE_INT)0x7FFFFFFF + 1)) |
| { |
| cmp = cmp == GEU ? LT : GE; |
| op1 = const0_rtx; |
| } |
| } |
| } |
| |
| /* This function implements the canonicalize_comparison target hook. |
| This wrapper around the internally used sh_canonicalize_comparison |
| function is needed to do the enum rtx_code <-> int conversion. |
| Target hooks cannot use enum rtx_code in its definition. */ |
| static void |
| sh_canonicalize_comparison (int *code, rtx *op0, rtx *op1, |
| bool op0_preserve_value) |
| { |
| enum rtx_code tmp_code = (enum rtx_code)*code; |
| sh_canonicalize_comparison (tmp_code, *op0, *op1, |
| VOIDmode, op0_preserve_value); |
| *code = (int)tmp_code; |
| } |
| |
| /* This function implements the legitimate_combined_insn target hook, |
| which the combine pass uses to early reject combined insns, before |
| it tries to recog the insn and determine its cost. */ |
| static bool |
| sh_legitimate_combined_insn (rtx_insn* insn) |
| { |
| /* Reject combinations of memory loads and zero extensions, as these |
| interfere with other combine patterns such as zero extracts and bit |
| tests. The SH2A movu.{b|w} insns are formed later in the |
| 'sh_optimize_extu_exts' pass after combine/split1. */ |
| rtx p = PATTERN (insn); |
| if (GET_CODE (p) == SET |
| && REG_P (XEXP (p, 0)) && GET_MODE (XEXP (p, 0)) == SImode |
| && GET_CODE (XEXP (p, 1)) == ZERO_EXTEND |
| && MEM_P (XEXP (XEXP (p, 1), 0))) |
| return false; |
| |
| return true; |
| } |
| |
| bool |
| sh_fixed_condition_code_regs (unsigned int* p1, unsigned int* p2) |
| { |
| *p1 = T_REG; |
| *p2 = INVALID_REGNUM; |
| return true; |
| } |
| |
| /* Try to calculate the branch distance of a conditional branch in bytes. |
| |
| FIXME: Because of PR 59189 we can't use the CFG here. Instead just |
| walk from this insn into the next (fall-through) basic block and see if |
| we hit the label. */ |
| unsigned int |
| sh_cbranch_distance (rtx_insn* _cbranch_insn, unsigned int max_dist) |
| { |
| rtx_jump_insn* cbranch_insn = safe_as_a<rtx_jump_insn*> (_cbranch_insn); |
| |
| if (dump_file) |
| { |
| fprintf (dump_file, "sh_cbranch_distance insn = \n"); |
| print_rtl_single (dump_file, cbranch_insn); |
| } |
| |
| unsigned int dist = 0; |
| |
| for (rtx_insn* i = next_nonnote_insn (cbranch_insn); |
| i != NULL && dist < max_dist; i = next_nonnote_insn (i)) |
| { |
| const unsigned int i_len = get_attr_length (i); |
| dist += i_len; |
| |
| if (dump_file) |
| fprintf (dump_file, " insn %d length = %u dist = %u\n", |
| INSN_UID (i), i_len, dist); |
| |
| if (rtx_code_label* l = dyn_cast<rtx_code_label*> (i)) |
| { |
| if (l == cbranch_insn->jump_target ()) |
| { |
| if (dump_file) |
| fprintf (dump_file, " cbranch dist = %u\n", dist); |
| return dist; |
| } |
| break; |
| } |
| } |
| |
| if (dump_file) |
| fprintf (dump_file, " cbranch dist = unknown\n"); |
| |
| return unknown_cbranch_distance; |
| } |
| |
| enum rtx_code |
| prepare_cbranch_operands (rtx *operands, machine_mode mode, |
| enum rtx_code comparison) |
| { |
| gcc_assert (can_create_pseudo_p ()); |
| |
| if (comparison == LAST_AND_UNUSED_RTX_CODE) |
| comparison = GET_CODE (operands[0]); |
| |
| sh_canonicalize_comparison (comparison, operands[1], operands[2], |
| mode, false); |
| |
| rtx op1 = operands[1]; |
| operands[1] = force_reg (mode, op1); |
| |
| /* When we are handling DImode comparisons, we want to keep constants so |
| that we can optimize the component comparisons; however, memory loads |
| are better issued as a whole so that they can be scheduled well. |
| SImode equality comparisons allow I08 constants, but only when they |
| compare r0. Hence, if operands[1] has to be loaded from somewhere else |
| into a register, that register might as well be r0, and we allow the |
| constant. If it is already in a register, this is likely to be |
| allocated to a different hard register, thus we load the constant into |
| a register unless it is zero. */ |
| if (!REG_P (operands[2]) |
| && (!CONST_INT_P (operands[2]) |
| || (mode == SImode && operands[2] != CONST0_RTX (SImode) |
| && ((comparison != EQ && comparison != NE) |
| || (REG_P (op1) && REGNO (op1) != R0_REG) |
| || !satisfies_constraint_I08 (operands[2]))))) |
| operands[2] = force_reg (mode, operands[2]); |
| |
| return comparison; |
| } |
| |
| static void |
| expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, |
| profile_probability probability) |
| { |
| rtx (*branch_expander) (rtx) = gen_branch_true; |
| comparison = prepare_cbranch_operands (operands, SImode, comparison); |
| switch (comparison) |
| { |
| case NE: case LT: case LE: case LTU: case LEU: |
| comparison = reverse_condition (comparison); |
| branch_expander = gen_branch_false; |
| default: ; |
| } |
| emit_insn (gen_rtx_SET (get_t_reg_rtx (), |
| gen_rtx_fmt_ee (comparison, SImode, |
| operands[1], operands[2]))); |
| rtx_insn *jump = emit_jump_insn (branch_expander (operands[3])); |
| if (probability.initialized_p ()) |
| add_reg_br_prob_note (jump, probability); |
| } |
| |
| void |
| expand_cbranchsi4 (rtx *operands, enum rtx_code comparison) |
| { |
| expand_cbranchsi4 (operands, comparison, |
| profile_probability::uninitialized ()); |
| } |
| |
| /* ??? How should we distribute probabilities when more than one branch |
| is generated. So far we only have some ad-hoc observations: |
| - If the operands are random, they are likely to differ in both parts. |
| - If comparing items in a hash chain, the operands are random or equal; |
| operation should be EQ or NE. |
| - If items are searched in an ordered tree from the root, we can expect |
| the highpart to be unequal about half of the time; operation should be |
| an inequality comparison, operands non-constant, and overall probability |
| about 50%. Likewise for quicksort. |
| - Range checks will be often made against constants. Even if we assume for |
| simplicity an even distribution of the non-constant operand over a |
| sub-range here, the same probability could be generated with differently |
| wide sub-ranges - as long as the ratio of the part of the subrange that |
| is before the threshold to the part that comes after the threshold stays |
| the same. Thus, we can't really tell anything here; |
| assuming random distribution is at least simple. |
| */ |
| bool |
| expand_cbranchdi4 (rtx *operands, enum rtx_code comparison) |
| { |
| enum rtx_code msw_taken, msw_skip, lsw_taken; |
| rtx_code_label *skip_label = NULL; |
| rtx op1h, op1l, op2h, op2l; |
| int num_branches; |
| profile_probability prob, rev_prob; |
| profile_probability msw_taken_prob = profile_probability::uninitialized (), |
| msw_skip_prob = profile_probability::uninitialized (), |
| lsw_taken_prob = profile_probability::uninitialized (); |
| |
| comparison = prepare_cbranch_operands (operands, DImode, comparison); |
| op1h = gen_highpart_mode (SImode, DImode, operands[1]); |
| op2h = gen_highpart_mode (SImode, DImode, operands[2]); |
| op1l = gen_lowpart (SImode, operands[1]); |
| op2l = gen_lowpart (SImode, operands[2]); |
| msw_taken = msw_skip = lsw_taken = LAST_AND_UNUSED_RTX_CODE; |
| prob = split_branch_probability; |
| rev_prob = prob.invert (); |
| switch (comparison) |
| { |
| case EQ: |
| msw_skip = NE; |
| lsw_taken = EQ; |
| if (prob.initialized_p ()) |
| { |
| /* FIXME: This is not optimal. We do not really know the probability |
| that values differ by MCW only, but we should probably distribute |
| probabilities more evenly. */ |
| msw_skip_prob = rev_prob; |
| lsw_taken_prob = prob > profile_probability::never () |
| ? profile_probability::guessed_always () |
| : profile_probability::guessed_never (); |
| } |
| break; |
| case NE: |
| msw_taken = NE; |
| msw_taken_prob = prob; |
| lsw_taken = NE; |
| lsw_taken_prob = profile_probability::guessed_never (); |
| break; |
| case GTU: case GT: |
| msw_taken = comparison; |
| if (CONST_INT_P (op2l) && INTVAL (op2l) == -1) |
| break; |
| if (comparison != GTU || op2h != CONST0_RTX (SImode)) |
| msw_skip = swap_condition (msw_taken); |
| lsw_taken = GTU; |
| break; |
| case GEU: case GE: |
| if (op2l == CONST0_RTX (SImode)) |
| msw_taken = comparison; |
| else |
| { |
| msw_taken = comparison == GE ? GT : GTU; |
| msw_skip = swap_condition (msw_taken); |
| lsw_taken = GEU; |
| } |
| break; |
| case LTU: case LT: |
| msw_taken = comparison; |
| if (op2l == CONST0_RTX (SImode)) |
| break; |
| msw_skip = swap_condition (msw_taken); |
| lsw_taken = LTU; |
| break; |
| case LEU: case LE: |
| if (CONST_INT_P (op2l) && INTVAL (op2l) == -1) |
| msw_taken = comparison; |
| else |
| { |
| lsw_taken = LEU; |
| if (comparison == LE) |
| msw_taken = LT; |
| else if (op2h != CONST0_RTX (SImode)) |
| msw_taken = LTU; |
| else |
| { |
| msw_skip = swap_condition (LTU); |
| break; |
| } |
| msw_skip = swap_condition (msw_taken); |
| } |
| break; |
| default: return false; |
| } |
| num_branches = ((msw_taken != LAST_AND_UNUSED_RTX_CODE) |
| + (msw_skip != LAST_AND_UNUSED_RTX_CODE) |
| + (lsw_taken != LAST_AND_UNUSED_RTX_CODE)); |
| if (comparison != EQ && comparison != NE && num_branches > 1) |
| { |
| if (!CONSTANT_P (operands[2]) |
| && prob.initialized_p () |
| && prob.to_reg_br_prob_base () >= (int) (REG_BR_PROB_BASE * 3 / 8U) |
| && prob.to_reg_br_prob_base () <= (int) (REG_BR_PROB_BASE * 5 / 8U)) |
| { |
| msw_taken_prob = prob.apply_scale (1, 2); |
| msw_skip_prob = rev_prob.apply_scale (REG_BR_PROB_BASE, |
| rev_prob.to_reg_br_prob_base () |
| + REG_BR_PROB_BASE); |
| lsw_taken_prob = prob; |
| } |
| else |
| { |
| msw_taken_prob = prob; |
| msw_skip_prob = profile_probability::guessed_always (); |
| /* ??? If we have a constant op2h, should we use that when |
| calculating lsw_taken_prob? */ |
| lsw_taken_prob = prob; |
| } |
| } |
| operands[1] = op1h; |
| operands[2] = op2h; |
| |
| if (msw_taken != LAST_AND_UNUSED_RTX_CODE) |
| expand_cbranchsi4 (operands, msw_taken, msw_taken_prob); |
| if (msw_skip != LAST_AND_UNUSED_RTX_CODE) |
| { |
| rtx taken_label = operands[3]; |
| |
| /* Operands were possibly modified, but msw_skip doesn't expect this. |
| Always use the original ones. */ |
| if (msw_taken != LAST_AND_UNUSED_RTX_CODE) |
| { |
| operands[1] = op1h; |
| operands[2] = op2h; |
| } |
| |
| operands[3] = skip_label = gen_label_rtx (); |
| expand_cbranchsi4 (operands, msw_skip, msw_skip_prob); |
| operands[3] = taken_label; |
| } |
| operands[1] = op1l; |
| operands[2] = op2l; |
| if (lsw_taken != LAST_AND_UNUSED_RTX_CODE) |
| expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob); |
| if (msw_skip != LAST_AND_UNUSED_RTX_CODE) |
| emit_label (skip_label); |
| return true; |
| } |
| |
| /* Given an operand, return 1 if the evaluated operand plugged into an |
| if_then_else will result in a branch_true, 0 if branch_false, or |
| -1 if neither nor applies. The truth table goes like this: |
| |
| op | cmpval | code | result |
| ---------+--------+---------+-------------------- |
| T (0) | 0 | EQ (1) | 0 = 0 ^ (0 == 1) |
| T (0) | 1 | EQ (1) | 1 = 0 ^ (1 == 1) |
| T (0) | 0 | NE (0) | 1 = 0 ^ (0 == 0) |
| T (0) | 1 | NE (0) | 0 = 0 ^ (1 == 0) |
| !T (1) | 0 | EQ (1) | 1 = 1 ^ (0 == 1) |
| !T (1) | 1 | EQ (1) | 0 = 1 ^ (1 == 1) |
| !T (1) | 0 | NE (0) | 0 = 1 ^ (0 == 0) |
| !T (1) | 1 | NE (0) | 1 = 1 ^ (1 == 0) */ |
| int |
| sh_eval_treg_value (rtx op) |
| { |
| if (t_reg_operand (op, GET_MODE (op))) |
| return 1; |
| if (negt_reg_operand (op, GET_MODE (op))) |
| return 0; |
| |
| rtx_code code = GET_CODE (op); |
| if ((code != EQ && code != NE) || !CONST_INT_P (XEXP (op, 1))) |
| return -1; |
| |
| int cmpop = code == EQ ? 1 : 0; |
| int cmpval = INTVAL (XEXP (op, 1)); |
| if (cmpval != 0 && cmpval != 1) |
| return -1; |
| |
| int t; |
| if (t_reg_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))) |
| t = 0; |
| else if (negt_reg_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))) |
| t = 1; |
| else |
| return -1; |
| |
| return t ^ (cmpval == cmpop); |
| } |
| |
| /* Emit INSN, possibly in a PARALLEL with an USE/CLOBBER of FPSCR bits in case |
| of floating-point comparisons. */ |
| static void |
| sh_emit_set_t_insn (rtx insn, machine_mode mode) |
| { |
| if (TARGET_FPU_ANY && GET_MODE_CLASS (mode) == MODE_FLOAT |
| && GET_CODE (insn) != PARALLEL) |
| { |
| insn = gen_rtx_PARALLEL (VOIDmode, |
| gen_rtvec (3, insn, |
| gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, FPSCR_STAT_REG)), |
| gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, FPSCR_MODES_REG)))); |
| } |
| emit_insn (insn); |
| } |
| |
| /* Prepare the operands for an scc instruction; make sure that the |
| compare has been done and the result is in T_REG. */ |
| void |
| sh_emit_scc_to_t (enum rtx_code code, rtx op0, rtx op1) |
| { |
| rtx t_reg = get_t_reg_rtx (); |
| enum rtx_code oldcode = code; |
| |
| /* First need a compare insn. */ |
| switch (code) |
| { |
| case NE: |
| /* It isn't possible to handle this case. */ |
| gcc_unreachable (); |
| case LT: |
| code = GT; |
| break; |
| case LE: |
| code = GE; |
| break; |
| case LTU: |
| code = GTU; |
| break; |
| case LEU: |
| code = GEU; |
| break; |
| default: |
| break; |
| } |
| if (code != oldcode) |
| std::swap (op0, op1); |
| |
| machine_mode mode = GET_MODE (op0); |
| if (mode == VOIDmode) |
| mode = GET_MODE (op1); |
| |
| op0 = force_reg (mode, op0); |
| if ((code != EQ && code != NE |
| && (op1 != const0_rtx |
| || code == GTU || code == GEU || code == LTU || code == LEU)) |
| || (mode == DImode && op1 != const0_rtx) |
| || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)) |
| op1 = force_reg (mode, op1); |
| |
| sh_emit_set_t_insn (gen_rtx_SET (t_reg, |
| gen_rtx_fmt_ee (code, SImode, op0, op1)), |
| mode); |
| } |
| |
| /* Called from the md file, set up the operands of a compare instruction. */ |
| void |
| sh_emit_compare_and_branch (rtx *operands, machine_mode mode) |
| { |
| enum rtx_code code = GET_CODE (operands[0]); |
| enum rtx_code branch_code; |
| rtx op0 = operands[1]; |
| rtx op1 = operands[2]; |
| rtx insn; |
| bool need_ccmpeq = false; |
| |
| if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| op0 = force_reg (mode, op0); |
| op1 = force_reg (mode, op1); |
| } |
| else |
| { |
| if (code != EQ || mode == DImode) |
| { |
| /* Force args into regs, since we can't use constants here. */ |
| op0 = force_reg (mode, op0); |
| if (op1 != const0_rtx || code == GTU || code == GEU) |
| op1 = force_reg (mode, op1); |
| } |
| } |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| if (code == LT |
| || (code == LE && TARGET_IEEE && TARGET_SH2E) |
| || (code == GE && !(TARGET_IEEE && TARGET_SH2E))) |
| { |
| std::swap (op0, op1); |
| code = swap_condition (code); |
| } |
| |
| /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only. */ |
| if (code == GE) |
| { |
| gcc_assert (TARGET_IEEE && TARGET_SH2E); |
| need_ccmpeq = true; |
| code = GT; |
| } |
| |
| /* Now we can have EQ, NE, GT, LE. NE and LE are then transformed |
| to EQ/GT respectively. */ |
| gcc_assert (code == EQ || code == GT || code == NE || code == LE); |
| } |
| |
| switch (code) |
| { |
| case EQ: |
| case GT: |
| case GE: |
| case GTU: |
| case GEU: |
| branch_code = code; |
| break; |
| case NE: |
| case LT: |
| case LE: |
| case LTU: |
| case LEU: |
| branch_code = reverse_condition (code); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| insn = gen_rtx_SET (get_t_reg_rtx (), |
| gen_rtx_fmt_ee (branch_code, SImode, op0, op1)); |
| |
| sh_emit_set_t_insn (insn, mode); |
| if (need_ccmpeq) |
| sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0, op1), mode); |
| |
| if (branch_code == code) |
| emit_jump_insn (gen_branch_true (operands[3])); |
| else |
| emit_jump_insn (gen_branch_false (operands[3])); |
| } |
| |
| void |
| sh_emit_compare_and_set (rtx *operands, machine_mode mode) |
| { |
| enum rtx_code code = GET_CODE (operands[1]); |
| rtx op0 = operands[2]; |
| rtx op1 = operands[3]; |
| rtx_code_label *lab = NULL; |
| bool invert = false; |
| |
| op0 = force_reg (mode, op0); |
| if ((code != EQ && code != NE |
| && (op1 != const0_rtx |
| || code == GTU || code == GEU || code == LTU || code == LEU)) |
| || (mode == DImode && op1 != const0_rtx) |
| || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)) |
| op1 = force_reg (mode, op1); |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| if (code == LT || code == LE) |
| { |
| std::swap (op0, op1); |
| code = swap_condition (code); |
| } |
| if (code == GE) |
| { |
| if (TARGET_IEEE) |
| { |
| lab = gen_label_rtx (); |
| sh_emit_scc_to_t (EQ, op0, op1); |
| emit_jump_insn (gen_branch_true (lab)); |
| code = GT; |
| } |
| else |
| { |
| code = LT; |
| invert = true; |
| } |
| } |
| } |
| |
| if (code == NE) |
| { |
| code = EQ; |
| invert = true; |
| } |
| |
| sh_emit_scc_to_t (code, op0, op1); |
| if (lab) |
| emit_label (lab); |
| if (invert) |
| emit_insn (gen_movnegt (operands[0], get_t_reg_rtx ())); |
| else |
| emit_move_insn (operands[0], get_t_reg_rtx ()); |
| } |
| |
| /* Functions to output assembly code. */ |
| |
| /* Return a sequence of instructions to perform DI or DF move. |
| |
| Since the SH cannot move a DI or DF in one instruction, we have |
| to take care when we see overlapping source and dest registers. */ |
| const char * |
| output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[], |
| machine_mode mode) |
| { |
| rtx dst = operands[0]; |
| rtx src = operands[1]; |
| |
| if (MEM_P (dst) |
| && GET_CODE (XEXP (dst, 0)) == PRE_DEC) |
| return "mov.l %T1,%0" "\n" |
| " mov.l %1,%0"; |
| |
| if (register_operand (dst, mode) |
| && register_operand (src, mode)) |
| { |
| if (REGNO (src) == MACH_REG) |
| return "sts mach,%S0" "\n" |
| " sts macl,%R0"; |
| |
| /* When mov.d r1,r2 do r2->r3 then r1->r2; |
| when mov.d r1,r0 do r1->r0 then r2->r1. */ |
| if (REGNO (src) + 1 == REGNO (dst)) |
| return "mov %T1,%T0" "\n" |
| " mov %1,%0"; |
| else |
| return "mov %1,%0" "\n" |
| " mov %T1,%T0"; |
| } |
| else if (CONST_INT_P (src)) |
| { |
| if (INTVAL (src) < 0) |
| output_asm_insn ("mov #-1,%S0", operands); |
| else |
| output_asm_insn ("mov #0,%S0", operands); |
| |
| return "mov %1,%R0"; |
| } |
| else if (MEM_P (src)) |
| { |
| int ptrreg = -1; |
| int dreg = REGNO (dst); |
| rtx inside = XEXP (src, 0); |
| |
| switch (GET_CODE (inside)) |
| { |
| case REG: |
| ptrreg = REGNO (inside); |
| break; |
| |
| case SUBREG: |
| ptrreg = subreg_regno (inside); |
| break; |
| |
| case PLUS: |
| ptrreg = REGNO (XEXP (inside, 0)); |
| /* ??? A r0+REG address shouldn't be possible here, because it isn't |
| an offsettable address. Unfortunately, offsettable addresses use |
| QImode to check the offset, and a QImode offsettable address |
| requires r0 for the other operand, which is not currently |
| supported, so we can't use the 'o' constraint. |
| Thus we must check for and handle r0+REG addresses here. |
| We punt for now, since this is likely very rare. */ |
| gcc_assert (!REG_P (XEXP (inside, 1))); |
| break; |
| |
| case LABEL_REF: |
| return "mov.l %1,%0" "\n" |
| " mov.l %1+4,%T0"; |
| case POST_INC: |
| return "mov.l %1,%0" "\n" |
| " mov.l %1,%T0"; |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* Work out the safe way to copy. Copy into the second half first. */ |
| if (dreg == ptrreg) |
| return "mov.l %T1,%T0" "\n" |
| " mov.l %1,%0"; |
| } |
| |
| return "mov.l %1,%0" "\n" |
| " mov.l %T1,%T0"; |
| } |
| |
| /* Print an instruction which would have gone into a delay slot after |
| another instruction, but couldn't because the other instruction expanded |
| into a sequence where putting the slot insn at the end wouldn't work. */ |
| static void |
| print_slot (rtx_sequence *seq) |
| { |
| final_scan_insn (seq->insn (1), asm_out_file, optimize, 1, NULL); |
| |
| seq->insn (1)->set_deleted (); |
| } |
| |
| const char * |
| output_far_jump (rtx_insn *insn, rtx op) |
| { |
| struct { rtx lab, reg, op; } this_jmp; |
| rtx_code_label *braf_base_lab = NULL; |
| const char *jump; |
| int far; |
| int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn)); |
| rtx_insn *prev; |
| |
| this_jmp.lab = gen_label_rtx (); |
| |
| if (TARGET_SH2 |
| && offset >= -32764 |
| && offset - get_attr_length (insn) <= 32766 |
| && ! CROSSING_JUMP_P (insn)) |
| { |
| far = 0; |
| jump = "mov.w %O0,%1" "\n" |
| " braf %1"; |
| } |
| else |
| { |
| far = 1; |
| if (flag_pic) |
| { |
| if (TARGET_SH2) |
| jump = "mov.l %O0,%1" "\n" |
| " braf %1"; |
| else |
| jump = "mov.l r0,@-r15" "\n" |
| " mova %O0,r0" "\n" |
| " mov.l @r0,%1" "\n" |
| " add r0,%1" "\n" |
| " mov.l @r15+,r0" "\n" |
| " jmp @%1"; |
| } |
| else |
| jump = "mov.l %O0,%1" "\n" |
| " jmp @%1"; |
| } |
| /* If we have a scratch register available, use it. */ |
| if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn))) |
| && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch) |
| { |
| this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0)); |
| if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2) |
| jump = "mov.l r1,@-r15" "\n" |
| " mova %O0,r0" "\n" |
| " mov.l @r0,r1" "\n" |
| " add r1,r0" "\n" |
| " mov.l @r15+,r1" "\n" |
| " jmp @%1"; |
| output_asm_insn (jump, &this_jmp.lab); |
| if (dbr_sequence_length ()) |
| print_slot (final_sequence); |
| else |
| output_asm_insn ("nop", 0); |
| } |
| else |
| { |
| /* Output the delay slot insn first if any. */ |
| if (dbr_sequence_length ()) |
| print_slot (final_sequence); |
| |
| this_jmp.reg = gen_rtx_REG (SImode, 13); |
| output_asm_insn ("mov.l r13,@-r15", 0); |
| output_asm_insn (jump, &this_jmp.lab); |
| output_asm_insn ("mov.l @r15+,r13", 0); |
| } |
| if (far && flag_pic && TARGET_SH2) |
| { |
| braf_base_lab = gen_label_rtx (); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| CODE_LABEL_NUMBER (braf_base_lab)); |
| } |
| if (far) |
| output_asm_insn (".align 2", 0); |
| (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab)); |
| this_jmp.op = op; |
| if (far && flag_pic) |
| { |
| if (TARGET_SH2) |
| this_jmp.lab = braf_base_lab; |
| output_asm_insn (".long %O2-%O0", &this_jmp.lab); |
| } |
| else |
| output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this_jmp.lab); |
| return ""; |
| } |
| |
| /* Local label counter, used for constants in the pool and inside |
| pattern branches. */ |
| static int lf = 100; |
| |
| /* Output code for ordinary branches. */ |
| const char * |
| output_branch (int logic, rtx_insn *insn, rtx *operands) |
| { |
| switch (get_attr_length (insn)) |
| { |
| case 6: |
| /* This can happen if filling the delay slot has caused a forward |
| branch to exceed its range (we could reverse it, but only |
| when we know we won't overextend other branches; this should |
| best be handled by relaxation). |
| It can also happen when other condbranches hoist delay slot insn |
| from their destination, thus leading to code size increase. |
| But the branch will still be in the range -4092..+4098 bytes. */ |
| if (! TARGET_RELAX) |
| { |
| int label = lf++; |
| /* The call to print_slot will clobber the operands. */ |
| rtx op0 = operands[0]; |
| |
| /* If the instruction in the delay slot is annulled (true), then |
| there is no delay slot where we can put it now. The only safe |
| place for it is after the label. final will do that by default. */ |
| |
| if (final_sequence |
| && ! INSN_ANNULLED_BRANCH_P (final_sequence->insn (0)) |
| && get_attr_length (final_sequence->insn (1))) |
| { |
| asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t", |
| ASSEMBLER_DIALECT ? "/" : ".", label); |
| print_slot (final_sequence); |
| } |
| else |
| asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label); |
| |
| output_asm_insn ("bra\t%l0", &op0); |
| fprintf (asm_out_file, "\tnop\n"); |
| (*targetm.asm_out.internal_label) (asm_out_file, "LF", label); |
| |
| return ""; |
| } |
| /* FALLTHRU */ |
| /* When relaxing, handle this like a short branch. The linker |
| will fix it up if it still doesn't fit after relaxation. */ |
| case 2: |
| return logic ? "bt%.\t%l0" : "bf%.\t%l0"; |
| |
| /* These are for SH2e, in which we have to account for the |
| extra nop because of the hardware bug in annulled branches. */ |
| case 8: |
| if (! TARGET_RELAX) |
| { |
| int label = lf++; |
| |
| gcc_assert (!final_sequence |
| || !(INSN_ANNULLED_BRANCH_P |
| (XVECEXP (final_sequence, 0, 0)))); |
| asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n", |
| logic ? "f" : "t", |
| ASSEMBLER_DIALECT ? "/" : ".", label); |
| fprintf (asm_out_file, "\tnop\n"); |
| output_asm_insn ("bra\t%l0", operands); |
| fprintf (asm_out_file, "\tnop\n"); |
| (*targetm.asm_out.internal_label) (asm_out_file, "LF", label); |
| |
| return ""; |
| } |
| /* FALLTHRU */ |
| case 4: |
| { |
| char buffer[10]; |
| |
| sprintf (buffer, "b%s%ss\t%%l0", |
| logic ? "t" : "f", |
| ASSEMBLER_DIALECT ? "/" : "."); |
| output_asm_insn (buffer, &operands[0]); |
| return "nop"; |
| } |
| |
| default: |
| /* There should be no longer branches now - that would |
| indicate that something has destroyed the branches set |
| up in machine_dependent_reorg. */ |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Output a code sequence for INSN using TEMPL with OPERANDS; but before, |
| fill in operands 9 as a label to the successor insn. |
| We try to use jump threading where possible. |
| IF CODE matches the comparison in the IF_THEN_ELSE of a following jump, |
| we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means |
| follow jmp and bt, if the address is in range. */ |
| const char * |
| output_branchy_insn (enum rtx_code code, const char *templ, |
| rtx_insn *insn, rtx *operands) |
| { |
| rtx_insn *next_insn = NEXT_INSN (insn); |
| |
| if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn)) |
| { |
| rtx src = SET_SRC (PATTERN (next_insn)); |
| if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code) |
| { |
| /* Following branch not taken */ |
| rtx_code_label *lab = gen_label_rtx (); |
| emit_label_after (lab, next_insn); |
| INSN_ADDRESSES_NEW (lab, |
| INSN_ADDRESSES (INSN_UID (next_insn)) |
| + get_attr_length (next_insn)); |
| operands[9] = lab; |
| return templ; |
| } |
| else |
| { |
| int offset = (branch_dest (next_insn) |
| - INSN_ADDRESSES (INSN_UID (next_insn)) + 4); |
| if (offset >= -252 && offset <= 258) |
| { |
| if (GET_CODE (src) == IF_THEN_ELSE) |
| /* branch_true */ |
| src = XEXP (src, 1); |
| operands[9] = src; |
| return templ; |
| } |
| } |
| } |
| rtx_code_label *lab = gen_label_rtx (); |
| emit_label_after (lab, insn); |
| INSN_ADDRESSES_NEW (lab, |
| INSN_ADDRESSES (INSN_UID (insn)) |
| + get_attr_length (insn)); |
| operands[9] = lab; |
| return templ; |
| } |
| |
| const char * |
| output_ieee_ccmpeq (rtx_insn *insn, rtx *operands) |
| { |
| return output_branchy_insn (NE, "bt %l9" "\n" |
| " fcmp/eq %1,%0", |
| insn, operands); |
| } |
| |
| /* Output the start of the assembler file. */ |
| static void |
| sh_file_start (void) |
| { |
| default_file_start (); |
| |
| if (TARGET_ELF) |
| /* We need to show the text section with the proper |
| attributes as in TEXT_SECTION_ASM_OP, before dwarf2out |
| emits it without attributes in TEXT_SECTION_ASM_OP, else GAS |
| will complain. We can teach GAS specifically about the |
| default attributes for our choice of text section, but |
| then we would have to change GAS again if/when we change |
| the text section name. */ |
| fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP); |
| else |
| /* Switch to the data section so that the coffsem symbol |
| isn't in the text section. */ |
| switch_to_section (data_section); |
| |
| if (TARGET_LITTLE_ENDIAN) |
| fputs ("\t.little\n", asm_out_file); |
| } |
| |
| /* Implementation of TARGET_ASM_INTEGER for SH. Pointers to functions |
| need to be output as pointers to function descriptors for |
| FDPIC. */ |
| |
| static bool |
| sh_assemble_integer (rtx value, unsigned int size, int aligned_p) |
| { |
| if (TARGET_FDPIC && size == UNITS_PER_WORD |
| && GET_CODE (value) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (value)) |
| { |
| fputs ("\t.long\t", asm_out_file); |
| output_addr_const (asm_out_file, value); |
| fputs ("@FUNCDESC\n", asm_out_file); |
| return true; |
| } |
| return default_assemble_integer (value, size, aligned_p); |
| } |
| |
| /* Check if PAT includes UNSPEC_CALLER unspec pattern. */ |
| static bool |
| unspec_caller_rtx_p (rtx pat) |
| { |
| rtx base, offset; |
| split_const (pat, &base, &offset); |
| |
| if (GET_CODE (base) == UNSPEC) |
| { |
| if (XINT (base, 1) == UNSPEC_CALLER) |
| return true; |
| for (int i = 0; i < XVECLEN (base, 0); i++) |
| if (unspec_caller_rtx_p (XVECEXP (base, 0, i))) |
| return true; |
| } |
| return false; |
| } |
| |
| /* Indicate that INSN cannot be duplicated. This is true for insn |
| that generates a unique label. */ |
| static bool |
| sh_cannot_copy_insn_p (rtx_insn *insn) |
| { |
| if (!reload_completed || !flag_pic) |
| return false; |
| |
| if (!NONJUMP_INSN_P (insn)) |
| return false; |
| if (asm_noperands (insn) >= 0) |
| return false; |
| |
| rtx pat = PATTERN (insn); |
| |
| if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == USE) |
| return false; |
| |
| if (TARGET_FDPIC && GET_CODE (pat) == PARALLEL) |
| { |
| rtx t = XVECEXP (pat, 0, XVECLEN (pat, 0) - 1); |
| if (GET_CODE (t) == USE && unspec_caller_rtx_p (XEXP (t, 0))) |
| return true; |
| } |
| |
| if (GET_CODE (pat) != SET) |
| return false; |
| pat = SET_SRC (pat); |
| |
| if (unspec_caller_rtx_p (pat)) |
| return true; |
| |
| return false; |
| } |
| |
| /* Number of instructions used to make an arithmetic right shift by N. */ |
| static const char ashiftrt_insns[] = |
| { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2}; |
| |
| /* Description of a logical left or right shift, when expanded to a sequence |
| of 1/2/8/16 shifts. |
| Notice that one bit right shifts clobber the T bit. One bit left shifts |
| are done with an 'add Rn,Rm' insn and thus do not clobber the T bit. */ |
| enum |
| { |
| ASHL_CLOBBERS_T = 1 << 0, |
| LSHR_CLOBBERS_T = 1 << 1 |
| }; |
| |
| struct ashl_lshr_sequence |
| { |
| char insn_count; |
| signed char amount[6]; |
| char clobbers_t; |
| }; |
| |
| static const struct ashl_lshr_sequence ashl_lshr_seq[32] = |
| { |
| { 0, { 0 }, 0 }, // 0 |
| { 1, { 1 }, LSHR_CLOBBERS_T }, |
| { 1, { 2 }, 0 }, |
| { 2, { 2, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 2, 2 }, 0 }, // 4 |
| { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 2, 2, 2 }, 0 }, |
| { 4, { 2, 2, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 1, { 8 }, 0 }, // 8 |
| { 2, { 8, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 8, 2 }, 0 }, |
| { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 8, 2, 2 }, 0 }, // 12 |
| { 4, { 8, 2, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 8, -2, 8 }, 0 }, |
| { 3, { 8, -1, 8 }, ASHL_CLOBBERS_T }, |
| { 1, { 16 }, 0 }, // 16 |
| { 2, { 16, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 16, 2 }, 0 }, |
| { 3, { 16, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 16, 2, 2 }, 0 }, // 20 |
| { 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 16, -2, 8 }, 0 }, |
| { 3, { 16, -1, 8 }, ASHL_CLOBBERS_T }, |
| { 2, { 16, 8 }, 0 }, // 24 |
| { 3, { 16, 1, 8 }, LSHR_CLOBBERS_T }, |
| { 3, { 16, 8, 2 }, 0 }, |
| { 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 4, { 16, 8, 2, 2 }, 0 }, // 28 |
| { 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T }, |
| { 3, { 16, -2, 16 }, 0 }, |
| |
| /* For a right shift by 31 a 2 insn shll-movt sequence can be used. |
| For a left shift by 31 a 2 insn and-rotl sequences can be used. |
| However, the shift-and combiner code needs this entry here to be in |
| terms of real shift insns. */ |
| { 3, { 16, -1, 16 }, ASHL_CLOBBERS_T } |
| }; |
| |
| /* Individual shift amounts for shift amounts < 16, up to three highmost |
| bits might be clobbered. This is typically used when combined with some |
| kind of sign or zero extension. */ |
| static const struct ashl_lshr_sequence ext_ashl_lshr_seq[32] = |
| { |
| { 0, { 0 }, 0 }, // 0 |
| { 1, { 1 }, LSHR_CLOBBERS_T }, |
| { 1, { 2 }, 0 }, |
| { 2, { 2, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 2, 2 }, 0 }, // 4 |
| { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 2, { 8, -2 }, 0 }, |
| { 2, { 8, -1 }, ASHL_CLOBBERS_T }, |
| { 1, { 8 }, 0 }, // 8 |
| { 2, { 8, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 8, 2 }, 0 }, |
| { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T }, |
| { 3, { 8, 2, 2 }, 0 }, // 12 |
| { 3, { 16, -2, -1 }, ASHL_CLOBBERS_T }, |
| { 2, { 16, -2 }, 0 }, |
| { 2, { 16, -1 }, ASHL_CLOBBERS_T }, |
| { 1, { 16 }, 0 }, // 16 |
| { 2, { 16, 1 }, LSHR_CLOBBERS_T }, |
| { 2, { 16, 2 }, 0 }, |
| |