| /* Subroutines for insn-output.c for SPARC. |
| Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, |
| 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. |
| Contributed by Michael Tiemann (tiemann@cygnus.com) |
| 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans, |
| at Cygnus Support. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 2, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING. If not, write to |
| the Free Software Foundation, 59 Temple Place - Suite 330, |
| Boston, MA 02111-1307, USA. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "tree.h" |
| #include "rtl.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "real.h" |
| #include "insn-config.h" |
| #include "conditions.h" |
| #include "output.h" |
| #include "insn-attr.h" |
| #include "flags.h" |
| #include "function.h" |
| #include "expr.h" |
| #include "optabs.h" |
| #include "recog.h" |
| #include "toplev.h" |
| #include "ggc.h" |
| #include "tm_p.h" |
| #include "debug.h" |
| #include "target.h" |
| #include "target-def.h" |
| #include "cfglayout.h" |
| |
| /* 1 if the caller has placed an "unimp" insn immediately after the call. |
| This is used in v8 code when calling a function that returns a structure. |
| v9 doesn't have this. Be careful to have this test be the same as that |
| used on the call. */ |
| |
| #define SKIP_CALLERS_UNIMP_P \ |
| (!TARGET_ARCH64 && current_function_returns_struct \ |
| && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))) \ |
| && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl))) \ |
| == INTEGER_CST)) |
| |
| /* Global variables for machine-dependent things. */ |
| |
| /* Size of frame. Need to know this to emit return insns from leaf procedures. |
| ACTUAL_FSIZE is set by compute_frame_size() which is called during the |
| reload pass. This is important as the value is later used in insn |
| scheduling (to see what can go in a delay slot). |
| APPARENT_FSIZE is the size of the stack less the register save area and less |
| the outgoing argument area. It is used when saving call preserved regs. */ |
| static HOST_WIDE_INT apparent_fsize; |
| static HOST_WIDE_INT actual_fsize; |
| |
| /* Number of live general or floating point registers needed to be |
| saved (as 4-byte quantities). */ |
| static int num_gfregs; |
| |
| /* Save the operands last given to a compare for use when we |
| generate a scc or bcc insn. */ |
| rtx sparc_compare_op0, sparc_compare_op1; |
| |
| /* Coordinate with the md file wrt special insns created by |
| sparc_nonflat_function_epilogue. */ |
| bool sparc_emitting_epilogue; |
| |
| /* Vector to say how input registers are mapped to output registers. |
| HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to |
| eliminate it. You must use -fomit-frame-pointer to get that. */ |
| char leaf_reg_remap[] = |
| { 0, 1, 2, 3, 4, 5, 6, 7, |
| -1, -1, -1, -1, -1, -1, 14, -1, |
| -1, -1, -1, -1, -1, -1, -1, -1, |
| 8, 9, 10, 11, 12, 13, -1, 15, |
| |
| 32, 33, 34, 35, 36, 37, 38, 39, |
| 40, 41, 42, 43, 44, 45, 46, 47, |
| 48, 49, 50, 51, 52, 53, 54, 55, |
| 56, 57, 58, 59, 60, 61, 62, 63, |
| 64, 65, 66, 67, 68, 69, 70, 71, |
| 72, 73, 74, 75, 76, 77, 78, 79, |
| 80, 81, 82, 83, 84, 85, 86, 87, |
| 88, 89, 90, 91, 92, 93, 94, 95, |
| 96, 97, 98, 99, 100}; |
| |
| /* Vector, indexed by hard register number, which contains 1 |
| for a register that is allowable in a candidate for leaf |
| function treatment. */ |
| char sparc_leaf_regs[] = |
| { 1, 1, 1, 1, 1, 1, 1, 1, |
| 0, 0, 0, 0, 0, 0, 1, 0, |
| 0, 0, 0, 0, 0, 0, 0, 0, |
| 1, 1, 1, 1, 1, 1, 0, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1, 1, 1, 1, |
| 1, 1, 1, 1, 1}; |
| |
| struct machine_function GTY(()) |
| { |
| /* Some local-dynamic TLS symbol name. */ |
| const char *some_ld_name; |
| }; |
| |
| /* Name of where we pretend to think the frame pointer points. |
| Normally, this is "%fp", but if we are in a leaf procedure, |
| this is "%sp+something". We record "something" separately as it may be |
| too big for reg+constant addressing. */ |
| |
| static const char *frame_base_name; |
| static HOST_WIDE_INT frame_base_offset; |
| |
| static void sparc_init_modes (void); |
| static int save_regs (FILE *, int, int, const char *, int, int, HOST_WIDE_INT); |
| static int restore_regs (FILE *, int, int, const char *, int, int); |
| static void build_big_number (FILE *, HOST_WIDE_INT, const char *); |
| static void scan_record_type (tree, int *, int *, int *); |
| static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode, |
| tree, int, int, int *, int *); |
| |
| static int supersparc_adjust_cost (rtx, rtx, rtx, int); |
| static int hypersparc_adjust_cost (rtx, rtx, rtx, int); |
| |
| static void sparc_output_addr_vec (rtx); |
| static void sparc_output_addr_diff_vec (rtx); |
| static void sparc_output_deferred_case_vectors (void); |
| static int check_return_regs (rtx); |
| static int epilogue_renumber (rtx *, int); |
| static bool sparc_assemble_integer (rtx, unsigned int, int); |
| static int set_extends (rtx); |
| static void output_restore_regs (FILE *, int); |
| static void sparc_output_function_prologue (FILE *, HOST_WIDE_INT); |
| static void sparc_output_function_epilogue (FILE *, HOST_WIDE_INT); |
| static void sparc_flat_function_epilogue (FILE *, HOST_WIDE_INT); |
| static void sparc_flat_function_prologue (FILE *, HOST_WIDE_INT); |
| static void sparc_flat_save_restore (FILE *, const char *, int, |
| unsigned long, unsigned long, |
| const char *, const char *, |
| HOST_WIDE_INT); |
| static void sparc_nonflat_function_epilogue (FILE *, HOST_WIDE_INT, int); |
| static void sparc_nonflat_function_prologue (FILE *, HOST_WIDE_INT, int); |
| #ifdef OBJECT_FORMAT_ELF |
| static void sparc_elf_asm_named_section (const char *, unsigned int); |
| #endif |
| static void sparc_aout_select_section (tree, int, unsigned HOST_WIDE_INT) |
| ATTRIBUTE_UNUSED; |
| static void sparc_aout_select_rtx_section (enum machine_mode, rtx, |
| unsigned HOST_WIDE_INT) |
| ATTRIBUTE_UNUSED; |
| |
| static int sparc_adjust_cost (rtx, rtx, rtx, int); |
| static int sparc_issue_rate (void); |
| static void sparc_sched_init (FILE *, int, int); |
| static int sparc_use_dfa_pipeline_interface (void); |
| static int sparc_use_sched_lookahead (void); |
| |
| static void emit_soft_tfmode_libcall (const char *, int, rtx *); |
| static void emit_soft_tfmode_binop (enum rtx_code, rtx *); |
| static void emit_soft_tfmode_unop (enum rtx_code, rtx *); |
| static void emit_soft_tfmode_cvt (enum rtx_code, rtx *); |
| static void emit_hard_tfmode_operation (enum rtx_code, rtx *); |
| |
| static bool sparc_function_ok_for_sibcall (tree, tree); |
| static void sparc_init_libfuncs (void); |
| static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, |
| HOST_WIDE_INT, tree); |
| static struct machine_function * sparc_init_machine_status (void); |
| static bool sparc_cannot_force_const_mem (rtx); |
| static rtx sparc_tls_get_addr (void); |
| static rtx sparc_tls_got (void); |
| static const char *get_some_local_dynamic_name (void); |
| static int get_some_local_dynamic_name_1 (rtx *, void *); |
| static bool sparc_rtx_costs (rtx, int, int, int *); |
| |
| /* Option handling. */ |
| |
| /* Code model option as passed by user. */ |
| const char *sparc_cmodel_string; |
| /* Parsed value. */ |
| enum cmodel sparc_cmodel; |
| |
| char sparc_hard_reg_printed[8]; |
| |
| struct sparc_cpu_select sparc_select[] = |
| { |
| /* switch name, tune arch */ |
| { (char *)0, "default", 1, 1 }, |
| { (char *)0, "-mcpu=", 1, 1 }, |
| { (char *)0, "-mtune=", 1, 0 }, |
| { 0, 0, 0, 0 } |
| }; |
| |
| /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */ |
| enum processor_type sparc_cpu; |
| |
| /* Initialize the GCC target structure. */ |
| |
| /* The sparc default is to use .half rather than .short for aligned |
| HI objects. Use .word instead of .long on non-ELF systems. */ |
| #undef TARGET_ASM_ALIGNED_HI_OP |
| #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" |
| #ifndef OBJECT_FORMAT_ELF |
| #undef TARGET_ASM_ALIGNED_SI_OP |
| #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" |
| #endif |
| |
| #undef TARGET_ASM_UNALIGNED_HI_OP |
| #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t" |
| #undef TARGET_ASM_UNALIGNED_SI_OP |
| #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t" |
| #undef TARGET_ASM_UNALIGNED_DI_OP |
| #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t" |
| |
| /* The target hook has to handle DI-mode values. */ |
| #undef TARGET_ASM_INTEGER |
| #define TARGET_ASM_INTEGER sparc_assemble_integer |
| |
| #undef TARGET_ASM_FUNCTION_PROLOGUE |
| #define TARGET_ASM_FUNCTION_PROLOGUE sparc_output_function_prologue |
| #undef TARGET_ASM_FUNCTION_EPILOGUE |
| #define TARGET_ASM_FUNCTION_EPILOGUE sparc_output_function_epilogue |
| |
| #undef TARGET_SCHED_ADJUST_COST |
| #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost |
| #undef TARGET_SCHED_ISSUE_RATE |
| #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate |
| #undef TARGET_SCHED_INIT |
| #define TARGET_SCHED_INIT sparc_sched_init |
| #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE |
| #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE sparc_use_dfa_pipeline_interface |
| #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD |
| #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead |
| |
| #undef TARGET_FUNCTION_OK_FOR_SIBCALL |
| #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall |
| |
| #undef TARGET_INIT_LIBFUNCS |
| #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs |
| |
| #ifdef HAVE_AS_TLS |
| #undef TARGET_HAVE_TLS |
| #define TARGET_HAVE_TLS true |
| #endif |
| #undef TARGET_CANNOT_FORCE_CONST_MEM |
| #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem |
| |
| #undef TARGET_ASM_OUTPUT_MI_THUNK |
| #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk |
| #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
| #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall |
| |
| #undef TARGET_RTX_COSTS |
| #define TARGET_RTX_COSTS sparc_rtx_costs |
| #undef TARGET_ADDRESS_COST |
| #define TARGET_ADDRESS_COST hook_int_rtx_0 |
| |
| struct gcc_target targetm = TARGET_INITIALIZER; |
| |
| /* Validate and override various options, and do some machine dependent |
| initialization. */ |
| |
| void |
| sparc_override_options (void) |
| { |
| static struct code_model { |
| const char *const name; |
| const int value; |
| } const cmodels[] = { |
| { "32", CM_32 }, |
| { "medlow", CM_MEDLOW }, |
| { "medmid", CM_MEDMID }, |
| { "medany", CM_MEDANY }, |
| { "embmedany", CM_EMBMEDANY }, |
| { 0, 0 } |
| }; |
| const struct code_model *cmodel; |
| /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */ |
| static struct cpu_default { |
| const int cpu; |
| const char *const name; |
| } const cpu_default[] = { |
| /* There must be one entry here for each TARGET_CPU value. */ |
| { TARGET_CPU_sparc, "cypress" }, |
| { TARGET_CPU_sparclet, "tsc701" }, |
| { TARGET_CPU_sparclite, "f930" }, |
| { TARGET_CPU_v8, "v8" }, |
| { TARGET_CPU_hypersparc, "hypersparc" }, |
| { TARGET_CPU_sparclite86x, "sparclite86x" }, |
| { TARGET_CPU_supersparc, "supersparc" }, |
| { TARGET_CPU_v9, "v9" }, |
| { TARGET_CPU_ultrasparc, "ultrasparc" }, |
| { TARGET_CPU_ultrasparc3, "ultrasparc3" }, |
| { 0, 0 } |
| }; |
| const struct cpu_default *def; |
| /* Table of values for -m{cpu,tune}=. */ |
| static struct cpu_table { |
| const char *const name; |
| const enum processor_type processor; |
| const int disable; |
| const int enable; |
| } const cpu_table[] = { |
| { "v7", PROCESSOR_V7, MASK_ISA, 0 }, |
| { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 }, |
| { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 }, |
| /* TI TMS390Z55 supersparc */ |
| { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 }, |
| { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE }, |
| /* The Fujitsu MB86930 is the original sparclite chip, with no fpu. |
| The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */ |
| { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE }, |
| { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU }, |
| { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU }, |
| { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU, |
| MASK_SPARCLITE }, |
| { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET }, |
| /* TEMIC sparclet */ |
| { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET }, |
| { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 }, |
| /* TI ultrasparc I, II, IIi */ |
| { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9 |
| /* Although insns using %y are deprecated, it is a clear win on current |
| ultrasparcs. */ |
| |MASK_DEPRECATED_V8_INSNS}, |
| /* TI ultrasparc III */ |
| /* ??? Check if %y issue still holds true in ultra3. */ |
| { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS}, |
| { 0, 0, 0, 0 } |
| }; |
| const struct cpu_table *cpu; |
| const struct sparc_cpu_select *sel; |
| int fpu; |
| |
| #ifndef SPARC_BI_ARCH |
| /* Check for unsupported architecture size. */ |
| if (! TARGET_64BIT != DEFAULT_ARCH32_P) |
| error ("%s is not supported by this configuration", |
| DEFAULT_ARCH32_P ? "-m64" : "-m32"); |
| #endif |
| |
| /* We force all 64bit archs to use 128 bit long double */ |
| if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128) |
| { |
| error ("-mlong-double-64 not allowed with -m64"); |
| target_flags |= MASK_LONG_DOUBLE_128; |
| } |
| |
| /* Code model selection. */ |
| sparc_cmodel = SPARC_DEFAULT_CMODEL; |
| |
| #ifdef SPARC_BI_ARCH |
| if (TARGET_ARCH32) |
| sparc_cmodel = CM_32; |
| #endif |
| |
| if (sparc_cmodel_string != NULL) |
| { |
| if (TARGET_ARCH64) |
| { |
| for (cmodel = &cmodels[0]; cmodel->name; cmodel++) |
| if (strcmp (sparc_cmodel_string, cmodel->name) == 0) |
| break; |
| if (cmodel->name == NULL) |
| error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string); |
| else |
| sparc_cmodel = cmodel->value; |
| } |
| else |
| error ("-mcmodel= is not supported on 32 bit systems"); |
| } |
| |
| fpu = TARGET_FPU; /* save current -mfpu status */ |
| |
| /* Set the default CPU. */ |
| for (def = &cpu_default[0]; def->name; ++def) |
| if (def->cpu == TARGET_CPU_DEFAULT) |
| break; |
| if (! def->name) |
| abort (); |
| sparc_select[0].string = def->name; |
| |
| for (sel = &sparc_select[0]; sel->name; ++sel) |
| { |
| if (sel->string) |
| { |
| for (cpu = &cpu_table[0]; cpu->name; ++cpu) |
| if (! strcmp (sel->string, cpu->name)) |
| { |
| if (sel->set_tune_p) |
| sparc_cpu = cpu->processor; |
| |
| if (sel->set_arch_p) |
| { |
| target_flags &= ~cpu->disable; |
| target_flags |= cpu->enable; |
| } |
| break; |
| } |
| |
| if (! cpu->name) |
| error ("bad value (%s) for %s switch", sel->string, sel->name); |
| } |
| } |
| |
| /* If -mfpu or -mno-fpu was explicitly used, don't override with |
| the processor default. Clear MASK_FPU_SET to avoid confusing |
| the reverse mapping from switch values to names. */ |
| if (TARGET_FPU_SET) |
| { |
| target_flags = (target_flags & ~MASK_FPU) | fpu; |
| target_flags &= ~MASK_FPU_SET; |
| } |
| |
| /* Don't allow -mvis if FPU is disabled. */ |
| if (! TARGET_FPU) |
| target_flags &= ~MASK_VIS; |
| |
| /* -mvis assumes UltraSPARC+, so we are sure v9 instructions |
| are available. |
| -m64 also implies v9. */ |
| if (TARGET_VIS || TARGET_ARCH64) |
| { |
| target_flags |= MASK_V9; |
| target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE); |
| } |
| |
| /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */ |
| if (TARGET_V9 && TARGET_ARCH32) |
| target_flags |= MASK_DEPRECATED_V8_INSNS; |
| |
| /* V8PLUS requires V9, makes no sense in 64 bit mode. */ |
| if (! TARGET_V9 || TARGET_ARCH64) |
| target_flags &= ~MASK_V8PLUS; |
| |
| /* Don't use stack biasing in 32 bit mode. */ |
| if (TARGET_ARCH32) |
| target_flags &= ~MASK_STACK_BIAS; |
| |
| /* Supply a default value for align_functions. */ |
| if (align_functions == 0 |
| && (sparc_cpu == PROCESSOR_ULTRASPARC |
| || sparc_cpu == PROCESSOR_ULTRASPARC3)) |
| align_functions = 32; |
| |
| /* Validate PCC_STRUCT_RETURN. */ |
| if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN) |
| flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1); |
| |
| /* Only use .uaxword when compiling for a 64-bit target. */ |
| if (!TARGET_ARCH64) |
| targetm.asm_out.unaligned_op.di = NULL; |
| |
| /* Do various machine dependent initializations. */ |
| sparc_init_modes (); |
| |
| /* Set up function hooks. */ |
| init_machine_status = sparc_init_machine_status; |
| } |
| |
| /* Miscellaneous utilities. */ |
| |
| /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move |
| or branch on register contents instructions. */ |
| |
| int |
| v9_regcmp_p (enum rtx_code code) |
| { |
| return (code == EQ || code == NE || code == GE || code == LT |
| || code == LE || code == GT); |
| } |
| |
| |
| /* Operand constraints. */ |
| |
| /* Return nonzero only if OP is a register of mode MODE, |
| or const0_rtx. */ |
| |
| int |
| reg_or_0_operand (rtx op, enum machine_mode mode) |
| { |
| if (register_operand (op, mode)) |
| return 1; |
| if (op == const0_rtx) |
| return 1; |
| if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE |
| && CONST_DOUBLE_HIGH (op) == 0 |
| && CONST_DOUBLE_LOW (op) == 0) |
| return 1; |
| if (fp_zero_operand (op, mode)) |
| return 1; |
| return 0; |
| } |
| |
| /* Return nonzero only if OP is const1_rtx. */ |
| |
| int |
| const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return op == const1_rtx; |
| } |
| |
| /* Nonzero if OP is a floating point value with value 0.0. */ |
| |
| int |
| fp_zero_operand (rtx op, enum machine_mode mode) |
| { |
| if (GET_MODE_CLASS (GET_MODE (op)) != MODE_FLOAT) |
| return 0; |
| return op == CONST0_RTX (mode); |
| } |
| |
| /* Nonzero if OP is a register operand in floating point register. */ |
| |
| int |
| fp_register_operand (rtx op, enum machine_mode mode) |
| { |
| if (! register_operand (op, mode)) |
| return 0; |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| return GET_CODE (op) == REG && SPARC_FP_REG_P (REGNO (op)); |
| } |
| |
| /* Nonzero if OP is a floating point constant which can |
| be loaded into an integer register using a single |
| sethi instruction. */ |
| |
| int |
| fp_sethi_p (rtx op) |
| { |
| if (GET_CODE (op) == CONST_DOUBLE) |
| { |
| REAL_VALUE_TYPE r; |
| long i; |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (r, op); |
| if (REAL_VALUES_EQUAL (r, dconst0) && |
| ! REAL_VALUE_MINUS_ZERO (r)) |
| return 0; |
| REAL_VALUE_TO_TARGET_SINGLE (r, i); |
| if (SPARC_SETHI_P (i)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Nonzero if OP is a floating point constant which can |
| be loaded into an integer register using a single |
| mov instruction. */ |
| |
| int |
| fp_mov_p (rtx op) |
| { |
| if (GET_CODE (op) == CONST_DOUBLE) |
| { |
| REAL_VALUE_TYPE r; |
| long i; |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (r, op); |
| if (REAL_VALUES_EQUAL (r, dconst0) && |
| ! REAL_VALUE_MINUS_ZERO (r)) |
| return 0; |
| REAL_VALUE_TO_TARGET_SINGLE (r, i); |
| if (SPARC_SIMM13_P (i)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Nonzero if OP is a floating point constant which can |
| be loaded into an integer register using a high/losum |
| instruction sequence. */ |
| |
| int |
| fp_high_losum_p (rtx op) |
| { |
| /* The constraints calling this should only be in |
| SFmode move insns, so any constant which cannot |
| be moved using a single insn will do. */ |
| if (GET_CODE (op) == CONST_DOUBLE) |
| { |
| REAL_VALUE_TYPE r; |
| long i; |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (r, op); |
| if (REAL_VALUES_EQUAL (r, dconst0) && |
| ! REAL_VALUE_MINUS_ZERO (r)) |
| return 0; |
| REAL_VALUE_TO_TARGET_SINGLE (r, i); |
| if (! SPARC_SETHI_P (i) |
| && ! SPARC_SIMM13_P (i)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Nonzero if OP is an integer register. */ |
| |
| int |
| intreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return (register_operand (op, SImode) |
| || (TARGET_ARCH64 && register_operand (op, DImode))); |
| } |
| |
| /* Nonzero if OP is a floating point condition code register. */ |
| |
| int |
| fcc_reg_operand (rtx op, enum machine_mode mode) |
| { |
| /* This can happen when recog is called from combine. Op may be a MEM. |
| Fail instead of calling abort in this case. */ |
| if (GET_CODE (op) != REG) |
| return 0; |
| |
| if (mode != VOIDmode && mode != GET_MODE (op)) |
| return 0; |
| if (mode == VOIDmode |
| && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode)) |
| return 0; |
| |
| #if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */ |
| if (reg_renumber == 0) |
| return REGNO (op) >= FIRST_PSEUDO_REGISTER; |
| return REGNO_OK_FOR_CCFP_P (REGNO (op)); |
| #else |
| return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4; |
| #endif |
| } |
| |
| /* Nonzero if OP is a floating point condition code fcc0 register. */ |
| |
| int |
| fcc0_reg_operand (rtx op, enum machine_mode mode) |
| { |
| /* This can happen when recog is called from combine. Op may be a MEM. |
| Fail instead of calling abort in this case. */ |
| if (GET_CODE (op) != REG) |
| return 0; |
| |
| if (mode != VOIDmode && mode != GET_MODE (op)) |
| return 0; |
| if (mode == VOIDmode |
| && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode)) |
| return 0; |
| |
| return REGNO (op) == SPARC_FCC_REG; |
| } |
| |
| /* Nonzero if OP is an integer or floating point condition code register. */ |
| |
| int |
| icc_or_fcc_reg_operand (rtx op, enum machine_mode mode) |
| { |
| if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG) |
| { |
| if (mode != VOIDmode && mode != GET_MODE (op)) |
| return 0; |
| if (mode == VOIDmode |
| && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode) |
| return 0; |
| return 1; |
| } |
| |
| return fcc_reg_operand (op, mode); |
| } |
| |
| /* Nonzero if OP can appear as the dest of a RESTORE insn. */ |
| int |
| restore_operand (rtx op, enum machine_mode mode) |
| { |
| return (GET_CODE (op) == REG && GET_MODE (op) == mode |
| && (REGNO (op) < 8 || (REGNO (op) >= 24 && REGNO (op) < 32))); |
| } |
| |
| /* Call insn on SPARC can take a PC-relative constant address, or any regular |
| memory address. */ |
| |
| int |
| call_operand (rtx op, enum machine_mode mode) |
| { |
| if (GET_CODE (op) != MEM) |
| abort (); |
| op = XEXP (op, 0); |
| return (symbolic_operand (op, mode) || memory_address_p (Pmode, op)); |
| } |
| |
| int |
| call_operand_address (rtx op, enum machine_mode mode) |
| { |
| return (symbolic_operand (op, mode) || memory_address_p (Pmode, op)); |
| } |
| |
| /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode, |
| otherwise return 0. */ |
| |
| int |
| tls_symbolic_operand (rtx op) |
| { |
| if (GET_CODE (op) != SYMBOL_REF) |
| return 0; |
| return SYMBOL_REF_TLS_MODEL (op); |
| } |
| |
| int |
| tgd_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return tls_symbolic_operand (op) == TLS_MODEL_GLOBAL_DYNAMIC; |
| } |
| |
| int |
| tld_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_DYNAMIC; |
| } |
| |
| int |
| tie_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return tls_symbolic_operand (op) == TLS_MODEL_INITIAL_EXEC; |
| } |
| |
| int |
| tle_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_EXEC; |
| } |
| |
| /* Returns 1 if OP is either a symbol reference or a sum of a symbol |
| reference and a constant. */ |
| |
| int |
| symbolic_operand (register rtx op, enum machine_mode mode) |
| { |
| enum machine_mode omode = GET_MODE (op); |
| |
| if (omode != mode && omode != VOIDmode && mode != VOIDmode) |
| return 0; |
| |
| switch (GET_CODE (op)) |
| { |
| case SYMBOL_REF: |
| return !SYMBOL_REF_TLS_MODEL (op); |
| |
| case LABEL_REF: |
| return 1; |
| |
| case CONST: |
| op = XEXP (op, 0); |
| return (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF |
| && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0))) |
| || GET_CODE (XEXP (op, 0)) == LABEL_REF) |
| && GET_CODE (XEXP (op, 1)) == CONST_INT); |
| |
| default: |
| return 0; |
| } |
| } |
| |
| /* Return truth value of statement that OP is a symbolic memory |
| operand of mode MODE. */ |
| |
| int |
| symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| if (GET_CODE (op) != MEM) |
| return 0; |
| op = XEXP (op, 0); |
| return ((GET_CODE (op) == SYMBOL_REF && !SYMBOL_REF_TLS_MODEL (op)) |
| || GET_CODE (op) == CONST || GET_CODE (op) == HIGH |
| || GET_CODE (op) == LABEL_REF); |
| } |
| |
| /* Return truth value of statement that OP is a LABEL_REF of mode MODE. */ |
| |
| int |
| label_ref_operand (rtx op, enum machine_mode mode) |
| { |
| if (GET_CODE (op) != LABEL_REF) |
| return 0; |
| if (GET_MODE (op) != mode) |
| return 0; |
| return 1; |
| } |
| |
| /* Return 1 if the operand is an argument used in generating pic references |
| in either the medium/low or medium/anywhere code models of sparc64. */ |
| |
| int |
| sp64_medium_pic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| /* Check for (const (minus (symbol_ref:GOT) |
| (const (minus (label) (pc))))). */ |
| if (GET_CODE (op) != CONST) |
| return 0; |
| op = XEXP (op, 0); |
| if (GET_CODE (op) != MINUS) |
| return 0; |
| if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF) |
| return 0; |
| /* ??? Ensure symbol is GOT. */ |
| if (GET_CODE (XEXP (op, 1)) != CONST) |
| return 0; |
| if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS) |
| return 0; |
| return 1; |
| } |
| |
| /* Return 1 if the operand is a data segment reference. This includes |
| the readonly data segment, or in other words anything but the text segment. |
| This is needed in the medium/anywhere code model on v9. These values |
| are accessed with EMBMEDANY_BASE_REG. */ |
| |
| int |
| data_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| switch (GET_CODE (op)) |
| { |
| case SYMBOL_REF : |
| return ! SYMBOL_REF_FUNCTION_P (op); |
| case PLUS : |
| /* Assume canonical format of symbol + constant. |
| Fall through. */ |
| case CONST : |
| return data_segment_operand (XEXP (op, 0), VOIDmode); |
| default : |
| return 0; |
| } |
| } |
| |
| /* Return 1 if the operand is a text segment reference. |
| This is needed in the medium/anywhere code model on v9. */ |
| |
| int |
| text_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| switch (GET_CODE (op)) |
| { |
| case LABEL_REF : |
| return 1; |
| case SYMBOL_REF : |
| return SYMBOL_REF_FUNCTION_P (op); |
| case PLUS : |
| /* Assume canonical format of symbol + constant. |
| Fall through. */ |
| case CONST : |
| return text_segment_operand (XEXP (op, 0), VOIDmode); |
| default : |
| return 0; |
| } |
| } |
| |
| /* Return 1 if the operand is either a register or a memory operand that is |
| not symbolic. */ |
| |
| int |
| reg_or_nonsymb_mem_operand (register rtx op, enum machine_mode mode) |
| { |
| if (register_operand (op, mode)) |
| return 1; |
| |
| if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode)) |
| return 1; |
| |
| return 0; |
| } |
| |
| int |
| splittable_symbolic_memory_operand (rtx op, |
| enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| if (GET_CODE (op) != MEM) |
| return 0; |
| if (! symbolic_operand (XEXP (op, 0), Pmode)) |
| return 0; |
| return 1; |
| } |
| |
| int |
| splittable_immediate_memory_operand (rtx op, |
| enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| if (GET_CODE (op) != MEM) |
| return 0; |
| if (! immediate_operand (XEXP (op, 0), Pmode)) |
| return 0; |
| return 1; |
| } |
| |
| /* Return truth value of whether OP is EQ or NE. */ |
| |
| int |
| eq_or_neq (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return (GET_CODE (op) == EQ || GET_CODE (op) == NE); |
| } |
| |
| /* Return 1 if this is a comparison operator, but not an EQ, NE, GEU, |
| or LTU for non-floating-point. We handle those specially. */ |
| |
| int |
| normal_comp_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| enum rtx_code code = GET_CODE (op); |
| |
| if (GET_RTX_CLASS (code) != '<') |
| return 0; |
| |
| if (GET_MODE (XEXP (op, 0)) == CCFPmode |
| || GET_MODE (XEXP (op, 0)) == CCFPEmode) |
| return 1; |
| |
| return (code != NE && code != EQ && code != GEU && code != LTU); |
| } |
| |
| /* Return 1 if this is a comparison operator. This allows the use of |
| MATCH_OPERATOR to recognize all the branch insns. */ |
| |
| int |
| noov_compare_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| enum rtx_code code = GET_CODE (op); |
| |
| if (GET_RTX_CLASS (code) != '<') |
| return 0; |
| |
| if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode |
| || GET_MODE (XEXP (op, 0)) == CCX_NOOVmode) |
| /* These are the only branches which work with CC_NOOVmode. */ |
| return (code == EQ || code == NE || code == GE || code == LT); |
| return 1; |
| } |
| |
| /* Return 1 if this is a 64-bit comparison operator. This allows the use of |
| MATCH_OPERATOR to recognize all the branch insns. */ |
| |
| int |
| noov_compare64_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| enum rtx_code code = GET_CODE (op); |
| |
| if (! TARGET_V9) |
| return 0; |
| |
| if (GET_RTX_CLASS (code) != '<') |
| return 0; |
| |
| if (GET_MODE (XEXP (op, 0)) == CCX_NOOVmode) |
| /* These are the only branches which work with CCX_NOOVmode. */ |
| return (code == EQ || code == NE || code == GE || code == LT); |
| return (GET_MODE (XEXP (op, 0)) == CCXmode); |
| } |
| |
| /* Nonzero if OP is a comparison operator suitable for use in v9 |
| conditional move or branch on register contents instructions. */ |
| |
| int |
| v9_regcmp_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| enum rtx_code code = GET_CODE (op); |
| |
| if (GET_RTX_CLASS (code) != '<') |
| return 0; |
| |
| return v9_regcmp_p (code); |
| } |
| |
| /* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */ |
| |
| int |
| extend_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND; |
| } |
| |
| /* Return nonzero if OP is an operator of mode MODE which can set |
| the condition codes explicitly. We do not include PLUS and MINUS |
| because these require CC_NOOVmode, which we handle explicitly. */ |
| |
| int |
| cc_arithop (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| if (GET_CODE (op) == AND |
| || GET_CODE (op) == IOR |
| || GET_CODE (op) == XOR) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* Return nonzero if OP is an operator of mode MODE which can bitwise |
| complement its second operand and set the condition codes explicitly. */ |
| |
| int |
| cc_arithopn (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| /* XOR is not here because combine canonicalizes (xor (not ...) ...) |
| and (xor ... (not ...)) to (not (xor ...)). */ |
| return (GET_CODE (op) == AND |
| || GET_CODE (op) == IOR); |
| } |
| |
| /* Return true if OP is a register, or is a CONST_INT that can fit in a |
| signed 13 bit immediate field. This is an acceptable SImode operand for |
| most 3 address instructions. */ |
| |
| int |
| arith_operand (rtx op, enum machine_mode mode) |
| { |
| if (register_operand (op, mode)) |
| return 1; |
| if (GET_CODE (op) != CONST_INT) |
| return 0; |
| return SMALL_INT32 (op); |
| } |
| |
| /* Return true if OP is a constant 4096 */ |
| |
| int |
| arith_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| if (GET_CODE (op) != CONST_INT) |
| return 0; |
| else |
| return INTVAL (op) == 4096; |
| } |
| |
| /* Return true if OP is suitable as second operand for add/sub */ |
| |
| int |
| arith_add_operand (rtx op, enum machine_mode mode) |
| { |
| return arith_operand (op, mode) || arith_4096_operand (op, mode); |
| } |
| |
| /* Return true if OP is a CONST_INT or a CONST_DOUBLE which can fit in the |
| immediate field of OR and XOR instructions. Used for 64-bit |
| constant formation patterns. */ |
| int |
| const64_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return ((GET_CODE (op) == CONST_INT |
| && SPARC_SIMM13_P (INTVAL (op))) |
| #if HOST_BITS_PER_WIDE_INT != 64 |
| || (GET_CODE (op) == CONST_DOUBLE |
| && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)) |
| && (CONST_DOUBLE_HIGH (op) == |
| ((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ? |
| (HOST_WIDE_INT)-1 : 0))) |
| #endif |
| ); |
| } |
| |
| /* The same, but only for sethi instructions. */ |
| int |
| const64_high_operand (rtx op, enum machine_mode mode) |
| { |
| return ((GET_CODE (op) == CONST_INT |
| && (INTVAL (op) & ~(HOST_WIDE_INT)0x3ff) != 0 |
| && SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode)) |
| ) |
| || (GET_CODE (op) == CONST_DOUBLE |
| && CONST_DOUBLE_HIGH (op) == 0 |
| && (CONST_DOUBLE_LOW (op) & ~(HOST_WIDE_INT)0x3ff) != 0 |
| && SPARC_SETHI_P (CONST_DOUBLE_LOW (op)))); |
| } |
| |
| /* Return true if OP is a register, or is a CONST_INT that can fit in a |
| signed 11 bit immediate field. This is an acceptable SImode operand for |
| the movcc instructions. */ |
| |
| int |
| arith11_operand (rtx op, enum machine_mode mode) |
| { |
| return (register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op)))); |
| } |
| |
| /* Return true if OP is a register, or is a CONST_INT that can fit in a |
| signed 10 bit immediate field. This is an acceptable SImode operand for |
| the movrcc instructions. */ |
| |
| int |
| arith10_operand (rtx op, enum machine_mode mode) |
| { |
| return (register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op)))); |
| } |
| |
| /* Return true if OP is a register, is a CONST_INT that fits in a 13 bit |
| immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit |
| immediate field. |
| v9: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that |
| can fit in a 13 bit immediate field. This is an acceptable DImode operand |
| for most 3 address instructions. */ |
| |
| int |
| arith_double_operand (rtx op, enum machine_mode mode) |
| { |
| return (register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT && SMALL_INT (op)) |
| || (! TARGET_ARCH64 |
| && GET_CODE (op) == CONST_DOUBLE |
| && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000 |
| && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000) |
| || (TARGET_ARCH64 |
| && GET_CODE (op) == CONST_DOUBLE |
| && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000 |
| && ((CONST_DOUBLE_HIGH (op) == -1 |
| && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000) |
| || (CONST_DOUBLE_HIGH (op) == 0 |
| && (CONST_DOUBLE_LOW (op) & 0x1000) == 0)))); |
| } |
| |
| /* Return true if OP is a constant 4096 for DImode on ARCH64 */ |
| |
| int |
| arith_double_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return (TARGET_ARCH64 && |
| ((GET_CODE (op) == CONST_INT && INTVAL (op) == 4096) || |
| (GET_CODE (op) == CONST_DOUBLE && |
| CONST_DOUBLE_LOW (op) == 4096 && |
| CONST_DOUBLE_HIGH (op) == 0))); |
| } |
| |
| /* Return true if OP is suitable as second operand for add/sub in DImode */ |
| |
| int |
| arith_double_add_operand (rtx op, enum machine_mode mode) |
| { |
| return arith_double_operand (op, mode) || arith_double_4096_operand (op, mode); |
| } |
| |
| /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that |
| can fit in an 11 bit immediate field. This is an acceptable DImode |
| operand for the movcc instructions. */ |
| /* ??? Replace with arith11_operand? */ |
| |
| int |
| arith11_double_operand (rtx op, enum machine_mode mode) |
| { |
| return (register_operand (op, mode) |
| || (GET_CODE (op) == CONST_DOUBLE |
| && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode) |
| && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800 |
| && ((CONST_DOUBLE_HIGH (op) == -1 |
| && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400) |
| || (CONST_DOUBLE_HIGH (op) == 0 |
| && (CONST_DOUBLE_LOW (op) & 0x400) == 0))) |
| || (GET_CODE (op) == CONST_INT |
| && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode) |
| && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800)); |
| } |
| |
| /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that |
| can fit in an 10 bit immediate field. This is an acceptable DImode |
| operand for the movrcc instructions. */ |
| /* ??? Replace with arith10_operand? */ |
| |
| int |
| arith10_double_operand (rtx op, enum machine_mode mode) |
| { |
| return (register_operand (op, mode) |
| || (GET_CODE (op) == CONST_DOUBLE |
| && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode) |
| && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400 |
| && ((CONST_DOUBLE_HIGH (op) == -1 |
| && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200) |
| || (CONST_DOUBLE_HIGH (op) == 0 |
| && (CONST_DOUBLE_LOW (op) & 0x200) == 0))) |
| || (GET_CODE (op) == CONST_INT |
| && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode) |
| && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400)); |
| } |
| |
| /* Return truth value of whether OP is an integer which fits the |
| range constraining immediate operands in most three-address insns, |
| which have a 13 bit immediate field. */ |
| |
| int |
| small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return (GET_CODE (op) == CONST_INT && SMALL_INT (op)); |
| } |
| |
| int |
| small_int_or_double (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return ((GET_CODE (op) == CONST_INT && SMALL_INT (op)) |
| || (GET_CODE (op) == CONST_DOUBLE |
| && CONST_DOUBLE_HIGH (op) == 0 |
| && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)))); |
| } |
| |
| /* Recognize operand values for the umul instruction. That instruction sign |
| extends immediate values just like all other sparc instructions, but |
| interprets the extended result as an unsigned number. */ |
| |
| int |
| uns_small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| #if HOST_BITS_PER_WIDE_INT > 32 |
| /* All allowed constants will fit a CONST_INT. */ |
| return (GET_CODE (op) == CONST_INT |
| && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000) |
| || (INTVAL (op) >= 0xFFFFF000 |
| && INTVAL (op) <= 0xFFFFFFFF))); |
| #else |
| return ((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000) |
| || (GET_CODE (op) == CONST_DOUBLE |
| && CONST_DOUBLE_HIGH (op) == 0 |
| && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000)); |
| #endif |
| } |
| |
| int |
| uns_arith_operand (rtx op, enum machine_mode mode) |
| { |
| return register_operand (op, mode) || uns_small_int (op, mode); |
| } |
| |
| /* Return truth value of statement that OP is a call-clobbered register. */ |
| int |
| clobbered_register (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
| { |
| return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]); |
| } |
| |
| /* Return 1 if OP is a valid operand for the source of a move insn. */ |
| |
| int |
| input_operand (rtx op, enum machine_mode mode) |
| { |
| /* If both modes are non-void they must be the same. */ |
| if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op)) |
| return 0; |
| |
| /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and result in 0/1. */ |
| if (GET_CODE (op) == CONSTANT_P_RTX) |
| return 1; |
| |
| /* Allow any one instruction integer constant, and all CONST_INT |
| variants when we are working in DImode and !arch64. */ |
| if (GET_MODE_CLASS (mode) == MODE_INT |
| && ((GET_CODE (op) == CONST_INT |
| && (SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode)) |
| || SPARC_SIMM13_P (INTVAL (op)) |
| || (mode == DImode |
| && ! TARGET_ARCH64))) |
| || (TARGET_ARCH64 |
| && GET_CODE (op) == CONST_DOUBLE |
| && ((CONST_DOUBLE_HIGH (op) == 0 |
| && SPARC_SETHI_P (CONST_DOUBLE_LOW (op))) |
| || |
| #if HOST_BITS_PER_WIDE_INT == 64 |
| (CONST_DOUBLE_HIGH (op) == 0 |
| && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))) |
| #else |
| (SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)) |
| && (((CONST_DOUBLE_LOW (op) & 0x80000000) == 0 |
| && CONST_DOUBLE_HIGH (op) == 0) |
| || (CONST_DOUBLE_HIGH (op) == -1 |
| && CONST_DOUBLE_LOW (op) & 0x80000000) != 0)) |
| #endif |
| )))) |
| return 1; |
| |
| /* If !arch64 and this is a DImode const, allow it so that |
| the splits can be generated. */ |
| if (! TARGET_ARCH64 |
| && mode == DImode |
| && GET_CODE (op) == CONST_DOUBLE) |
| return 1; |
| |
| if (register_operand (op, mode)) |
| return 1; |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT |
| && GET_CODE (op) == CONST_DOUBLE) |
| return 1; |
| |
| /* If this is a SUBREG, look inside so that we handle |
| paradoxical ones. */ |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| |
| /* Check for valid MEM forms. */ |
| if (GET_CODE (op) == MEM) |
| { |
| rtx inside = XEXP (op, 0); |
| |
| if (GET_CODE (inside) == LO_SUM) |
| { |
| /* We can't allow these because all of the splits |
| (eventually as they trickle down into DFmode |
| splits) require offsettable memory references. */ |
| if (! TARGET_V9 |
| && GET_MODE (op) == TFmode) |
| return 0; |
| |
| return (register_operand (XEXP (inside, 0), Pmode) |
| && CONSTANT_P (XEXP (inside, 1))); |
| } |
| return memory_address_p (mode, inside); |
| } |
| |
| return 0; |
| } |
| |
| /* Return 1 if OP is valid for the lhs of a compare insn. */ |
| |
| int |
| compare_operand (rtx op, enum machine_mode mode) |
| { |
| if (GET_CODE (op) == ZERO_EXTRACT) |
| return (register_operand (XEXP (op, 0), mode) |
| && small_int_or_double (XEXP (op, 1), mode) |
| && small_int_or_double (XEXP (op, 2), mode) |
| /* This matches cmp_zero_extract. */ |
| && ((mode == SImode |
| && ((GET_CODE (XEXP (op, 2)) == CONST_INT |
| && INTVAL (XEXP (op, 2)) > 19) |
| || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE |
| && CONST_DOUBLE_LOW (XEXP (op, 2)) > 19))) |
| /* This matches cmp_zero_extract_sp64. */ |
| || (mode == DImode |
| && TARGET_ARCH64 |
| && ((GET_CODE (XEXP (op, 2)) == CONST_INT |
| && INTVAL (XEXP (op, 2)) > 51) |
| || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE |
| && CONST_DOUBLE_LOW (XEXP (op, 2)) > 51))))); |
| else |
| return register_operand (op, mode); |
| } |
| |
| |
| /* We know it can't be done in one insn when we get here, |
| the movsi expander guarantees this. */ |
| void |
| sparc_emit_set_const32 (rtx op0, rtx op1) |
| { |
| enum machine_mode mode = GET_MODE (op0); |
| rtx temp; |
| |
| if (GET_CODE (op1) == CONST_INT) |
| { |
| HOST_WIDE_INT value = INTVAL (op1); |
| |
| if (SPARC_SETHI_P (value & GET_MODE_MASK (mode)) |
| || SPARC_SIMM13_P (value)) |
| abort (); |
| } |
| |
| /* Full 2-insn decomposition is needed. */ |
| if (reload_in_progress || reload_completed) |
| temp = op0; |
| else |
| temp = gen_reg_rtx (mode); |
| |
| if (GET_CODE (op1) == CONST_INT) |
| { |
| /* Emit them as real moves instead of a HIGH/LO_SUM, |
| this way CSE can see everything and reuse intermediate |
| values if it wants. */ |
| if (TARGET_ARCH64 |
| && HOST_BITS_PER_WIDE_INT != 64 |
| && (INTVAL (op1) & 0x80000000) != 0) |
| emit_insn (gen_rtx_SET |
| (VOIDmode, temp, |
| immed_double_const (INTVAL (op1) & ~(HOST_WIDE_INT)0x3ff, |
| 0, DImode))); |
| else |
| emit_insn (gen_rtx_SET (VOIDmode, temp, |
| GEN_INT (INTVAL (op1) |
| & ~(HOST_WIDE_INT)0x3ff))); |
| |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_rtx_IOR (mode, temp, |
| GEN_INT (INTVAL (op1) & 0x3ff)))); |
| } |
| else |
| { |
| /* A symbol, emit in the traditional way. */ |
| emit_insn (gen_rtx_SET (VOIDmode, temp, |
| gen_rtx_HIGH (mode, op1))); |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, gen_rtx_LO_SUM (mode, temp, op1))); |
| |
| } |
| } |
| |
| |
| /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register. |
| If TEMP is non-zero, we are forbidden to use any other scratch |
| registers. Otherwise, we are allowed to generate them as needed. |
| |
| Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY |
| or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */ |
| void |
| sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp) |
| { |
| rtx temp1, temp2, temp3, temp4, temp5; |
| rtx ti_temp = 0; |
| |
| if (temp && GET_MODE (temp) == TImode) |
| { |
| ti_temp = temp; |
| temp = gen_rtx_REG (DImode, REGNO (temp)); |
| } |
| |
| /* SPARC-V9 code-model support. */ |
| switch (sparc_cmodel) |
| { |
| case CM_MEDLOW: |
| /* The range spanned by all instructions in the object is less |
| than 2^31 bytes (2GB) and the distance from any instruction |
| to the location of the label _GLOBAL_OFFSET_TABLE_ is less |
| than 2^31 bytes (2GB). |
| |
| The executable must be in the low 4TB of the virtual address |
| space. |
| |
| sethi %hi(symbol), %temp1 |
| or %temp1, %lo(symbol), %reg */ |
| if (temp) |
| temp1 = temp; /* op0 is allowed. */ |
| else |
| temp1 = gen_reg_rtx (DImode); |
| |
| emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1))); |
| emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1))); |
| break; |
| |
| case CM_MEDMID: |
| /* The range spanned by all instructions in the object is less |
| than 2^31 bytes (2GB) and the distance from any instruction |
| to the location of the label _GLOBAL_OFFSET_TABLE_ is less |
| than 2^31 bytes (2GB). |
| |
| The executable must be in the low 16TB of the virtual address |
| space. |
| |
| sethi %h44(symbol), %temp1 |
| or %temp1, %m44(symbol), %temp2 |
| sllx %temp2, 12, %temp3 |
| or %temp3, %l44(symbol), %reg */ |
| if (temp) |
| { |
| temp1 = op0; |
| temp2 = op0; |
| temp3 = temp; /* op0 is allowed. */ |
| } |
| else |
| { |
| temp1 = gen_reg_rtx (DImode); |
| temp2 = gen_reg_rtx (DImode); |
| temp3 = gen_reg_rtx (DImode); |
| } |
| |
| emit_insn (gen_seth44 (temp1, op1)); |
| emit_insn (gen_setm44 (temp2, temp1, op1)); |
| emit_insn (gen_rtx_SET (VOIDmode, temp3, |
| gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12)))); |
| emit_insn (gen_setl44 (op0, temp3, op1)); |
| break; |
| |
| case CM_MEDANY: |
| /* The range spanned by all instructions in the object is less |
| than 2^31 bytes (2GB) and the distance from any instruction |
| to the location of the label _GLOBAL_OFFSET_TABLE_ is less |
| than 2^31 bytes (2GB). |
| |
| The executable can be placed anywhere in the virtual address |
| space. |
| |
| sethi %hh(symbol), %temp1 |
| sethi %lm(symbol), %temp2 |
| or %temp1, %hm(symbol), %temp3 |
| sllx %temp3, 32, %temp4 |
| or %temp4, %temp2, %temp5 |
| or %temp5, %lo(symbol), %reg */ |
| if (temp) |
| { |
| /* It is possible that one of the registers we got for operands[2] |
| might coincide with that of operands[0] (which is why we made |
| it TImode). Pick the other one to use as our scratch. */ |
| if (rtx_equal_p (temp, op0)) |
| { |
| if (ti_temp) |
| temp = gen_rtx_REG (DImode, REGNO (temp) + 1); |
| else |
| abort(); |
| } |
| temp1 = op0; |
| temp2 = temp; /* op0 is _not_ allowed, see above. */ |
| temp3 = op0; |
| temp4 = op0; |
| temp5 = op0; |
| } |
| else |
| { |
| temp1 = gen_reg_rtx (DImode); |
| temp2 = gen_reg_rtx (DImode); |
| temp3 = gen_reg_rtx (DImode); |
| temp4 = gen_reg_rtx (DImode); |
| temp5 = gen_reg_rtx (DImode); |
| } |
| |
| emit_insn (gen_sethh (temp1, op1)); |
| emit_insn (gen_setlm (temp2, op1)); |
| emit_insn (gen_sethm (temp3, temp1, op1)); |
| emit_insn (gen_rtx_SET (VOIDmode, temp4, |
| gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32)))); |
| emit_insn (gen_rtx_SET (VOIDmode, temp5, |
| gen_rtx_PLUS (DImode, temp4, temp2))); |
| emit_insn (gen_setlo (op0, temp5, op1)); |
| break; |
| |
| case CM_EMBMEDANY: |
| /* Old old old backwards compatibility kruft here. |
| Essentially it is MEDLOW with a fixed 64-bit |
| virtual base added to all data segment addresses. |
| Text-segment stuff is computed like MEDANY, we can't |
| reuse the code above because the relocation knobs |
| look different. |
| |
| Data segment: sethi %hi(symbol), %temp1 |
| add %temp1, EMBMEDANY_BASE_REG, %temp2 |
| or %temp2, %lo(symbol), %reg */ |
| if (data_segment_operand (op1, GET_MODE (op1))) |
| { |
| if (temp) |
| { |
| temp1 = temp; /* op0 is allowed. */ |
| temp2 = op0; |
| } |
| else |
| { |
| temp1 = gen_reg_rtx (DImode); |
| temp2 = gen_reg_rtx (DImode); |
| } |
| |
| emit_insn (gen_embmedany_sethi (temp1, op1)); |
| emit_insn (gen_embmedany_brsum (temp2, temp1)); |
| emit_insn (gen_embmedany_losum (op0, temp2, op1)); |
| } |
| |
| /* Text segment: sethi %uhi(symbol), %temp1 |
| sethi %hi(symbol), %temp2 |
| or %temp1, %ulo(symbol), %temp3 |
| sllx %temp3, 32, %temp4 |
| or %temp4, %temp2, %temp5 |
| or %temp5, %lo(symbol), %reg */ |
| else |
| { |
| if (temp) |
| { |
| /* It is possible that one of the registers we got for operands[2] |
| might coincide with that of operands[0] (which is why we made |
| it TImode). Pick the other one to use as our scratch. */ |
| if (rtx_equal_p (temp, op0)) |
| { |
| if (ti_temp) |
| temp = gen_rtx_REG (DImode, REGNO (temp) + 1); |
| else |
| abort(); |
| } |
| temp1 = op0; |
| temp2 = temp; /* op0 is _not_ allowed, see above. */ |
| temp3 = op0; |
| temp4 = op0; |
| temp5 = op0; |
| } |
| else |
| { |
| temp1 = gen_reg_rtx (DImode); |
| temp2 = gen_reg_rtx (DImode); |
| temp3 = gen_reg_rtx (DImode); |
| temp4 = gen_reg_rtx (DImode); |
| temp5 = gen_reg_rtx (DImode); |
| } |
| |
| emit_insn (gen_embmedany_textuhi (temp1, op1)); |
| emit_insn (gen_embmedany_texthi (temp2, op1)); |
| emit_insn (gen_embmedany_textulo (temp3, temp1, op1)); |
| emit_insn (gen_rtx_SET (VOIDmode, temp4, |
| gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32)))); |
| emit_insn (gen_rtx_SET (VOIDmode, temp5, |
| gen_rtx_PLUS (DImode, temp4, temp2))); |
| emit_insn (gen_embmedany_textlo (op0, temp5, op1)); |
| } |
| break; |
| |
| default: |
| abort(); |
| } |
| } |
| |
| /* These avoid problems when cross compiling. If we do not |
| go through all this hair then the optimizer will see |
| invalid REG_EQUAL notes or in some cases none at all. */ |
| static void sparc_emit_set_safe_HIGH64 (rtx, HOST_WIDE_INT); |
| static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT); |
| static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT); |
| static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT); |
| |
| #if HOST_BITS_PER_WIDE_INT == 64 |
| #define GEN_HIGHINT64(__x) GEN_INT ((__x) & ~(HOST_WIDE_INT)0x3ff) |
| #define GEN_INT64(__x) GEN_INT (__x) |
| #else |
| #define GEN_HIGHINT64(__x) \ |
| immed_double_const ((__x) & ~(HOST_WIDE_INT)0x3ff, 0, DImode) |
| #define GEN_INT64(__x) \ |
| immed_double_const ((__x) & 0xffffffff, \ |
| ((__x) & 0x80000000 ? -1 : 0), DImode) |
| #endif |
| |
| /* The optimizer is not to assume anything about exactly |
| which bits are set for a HIGH, they are unspecified. |
| Unfortunately this leads to many missed optimizations |
| during CSE. We mask out the non-HIGH bits, and matches |
| a plain movdi, to alleviate this problem. */ |
| static void |
| sparc_emit_set_safe_HIGH64 (rtx dest, HOST_WIDE_INT val) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_HIGHINT64 (val))); |
| } |
| |
| static rtx |
| gen_safe_SET64 (rtx dest, HOST_WIDE_INT val) |
| { |
| return gen_rtx_SET (VOIDmode, dest, GEN_INT64 (val)); |
| } |
| |
| static rtx |
| gen_safe_OR64 (rtx src, HOST_WIDE_INT val) |
| { |
| return gen_rtx_IOR (DImode, src, GEN_INT64 (val)); |
| } |
| |
| static rtx |
| gen_safe_XOR64 (rtx src, HOST_WIDE_INT val) |
| { |
| return gen_rtx_XOR (DImode, src, GEN_INT64 (val)); |
| } |
| |
| /* Worker routines for 64-bit constant formation on arch64. |
| One of the key things to be doing in these emissions is |
| to create as many temp REGs as possible. This makes it |
| possible for half-built constants to be used later when |
| such values are similar to something required later on. |
| Without doing this, the optimizer cannot see such |
| opportunities. */ |
| |
| static void sparc_emit_set_const64_quick1 (rtx, rtx, |
| unsigned HOST_WIDE_INT, int); |
| |
| static void |
| sparc_emit_set_const64_quick1 (rtx op0, rtx temp, |
| unsigned HOST_WIDE_INT low_bits, int is_neg) |
| { |
| unsigned HOST_WIDE_INT high_bits; |
| |
| if (is_neg) |
| high_bits = (~low_bits) & 0xffffffff; |
| else |
| high_bits = low_bits; |
| |
| sparc_emit_set_safe_HIGH64 (temp, high_bits); |
| if (!is_neg) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_safe_OR64 (temp, (high_bits & 0x3ff)))); |
| } |
| else |
| { |
| /* If we are XOR'ing with -1, then we should emit a one's complement |
| instead. This way the combiner will notice logical operations |
| such as ANDN later on and substitute. */ |
| if ((low_bits & 0x3ff) == 0x3ff) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_NOT (DImode, temp))); |
| } |
| else |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_safe_XOR64 (temp, |
| (-(HOST_WIDE_INT)0x400 |
| | (low_bits & 0x3ff))))); |
| } |
| } |
| } |
| |
| static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT, |
| unsigned HOST_WIDE_INT, int); |
| |
| static void |
| sparc_emit_set_const64_quick2 (rtx op0, rtx temp, |
| unsigned HOST_WIDE_INT high_bits, |
| unsigned HOST_WIDE_INT low_immediate, |
| int shift_count) |
| { |
| rtx temp2 = op0; |
| |
| if ((high_bits & 0xfffffc00) != 0) |
| { |
| sparc_emit_set_safe_HIGH64 (temp, high_bits); |
| if ((high_bits & ~0xfffffc00) != 0) |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_safe_OR64 (temp, (high_bits & 0x3ff)))); |
| else |
| temp2 = temp; |
| } |
| else |
| { |
| emit_insn (gen_safe_SET64 (temp, high_bits)); |
| temp2 = temp; |
| } |
| |
| /* Now shift it up into place. */ |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_ASHIFT (DImode, temp2, |
| GEN_INT (shift_count)))); |
| |
| /* If there is a low immediate part piece, finish up by |
| putting that in as well. */ |
| if (low_immediate != 0) |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_safe_OR64 (op0, low_immediate))); |
| } |
| |
| static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT, |
| unsigned HOST_WIDE_INT); |
| |
| /* Full 64-bit constant decomposition. Even though this is the |
| 'worst' case, we still optimize a few things away. */ |
| static void |
| sparc_emit_set_const64_longway (rtx op0, rtx temp, |
| unsigned HOST_WIDE_INT high_bits, |
| unsigned HOST_WIDE_INT low_bits) |
| { |
| rtx sub_temp; |
| |
| if (reload_in_progress || reload_completed) |
| sub_temp = op0; |
| else |
| sub_temp = gen_reg_rtx (DImode); |
| |
| if ((high_bits & 0xfffffc00) != 0) |
| { |
| sparc_emit_set_safe_HIGH64 (temp, high_bits); |
| if ((high_bits & ~0xfffffc00) != 0) |
| emit_insn (gen_rtx_SET (VOIDmode, |
| sub_temp, |
| gen_safe_OR64 (temp, (high_bits & 0x3ff)))); |
| else |
| sub_temp = temp; |
| } |
| else |
| { |
| emit_insn (gen_safe_SET64 (temp, high_bits)); |
| sub_temp = temp; |
| } |
| |
| if (!reload_in_progress && !reload_completed) |
| { |
| rtx temp2 = gen_reg_rtx (DImode); |
| rtx temp3 = gen_reg_rtx (DImode); |
| rtx temp4 = gen_reg_rtx (DImode); |
| |
| emit_insn (gen_rtx_SET (VOIDmode, temp4, |
| gen_rtx_ASHIFT (DImode, sub_temp, |
| GEN_INT (32)))); |
| |
| sparc_emit_set_safe_HIGH64 (temp2, low_bits); |
| if ((low_bits & ~0xfffffc00) != 0) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, temp3, |
| gen_safe_OR64 (temp2, (low_bits & 0x3ff)))); |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_PLUS (DImode, temp4, temp3))); |
| } |
| else |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_PLUS (DImode, temp4, temp2))); |
| } |
| } |
| else |
| { |
| rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff); |
| rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff); |
| rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff); |
| int to_shift = 12; |
| |
| /* We are in the middle of reload, so this is really |
| painful. However we do still make an attempt to |
| avoid emitting truly stupid code. */ |
| if (low1 != const0_rtx) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_ASHIFT (DImode, sub_temp, |
| GEN_INT (to_shift)))); |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_IOR (DImode, op0, low1))); |
| sub_temp = op0; |
| to_shift = 12; |
| } |
| else |
| { |
| to_shift += 12; |
| } |
| if (low2 != const0_rtx) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_ASHIFT (DImode, sub_temp, |
| GEN_INT (to_shift)))); |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_IOR (DImode, op0, low2))); |
| sub_temp = op0; |
| to_shift = 8; |
| } |
| else |
| { |
| to_shift += 8; |
| } |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_ASHIFT (DImode, sub_temp, |
| GEN_INT (to_shift)))); |
| if (low3 != const0_rtx) |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_IOR (DImode, op0, low3))); |
| /* phew... */ |
| } |
| } |
| |
| /* Analyze a 64-bit constant for certain properties. */ |
| static void analyze_64bit_constant (unsigned HOST_WIDE_INT, |
| unsigned HOST_WIDE_INT, |
| int *, int *, int *); |
| |
| static void |
| analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits, |
| unsigned HOST_WIDE_INT low_bits, |
| int *hbsp, int *lbsp, int *abbasp) |
| { |
| int lowest_bit_set, highest_bit_set, all_bits_between_are_set; |
| int i; |
| |
| lowest_bit_set = highest_bit_set = -1; |
| i = 0; |
| do |
| { |
| if ((lowest_bit_set == -1) |
| && ((low_bits >> i) & 1)) |
| lowest_bit_set = i; |
| if ((highest_bit_set == -1) |
| && ((high_bits >> (32 - i - 1)) & 1)) |
| highest_bit_set = (64 - i - 1); |
| } |
| while (++i < 32 |
| && ((highest_bit_set == -1) |
| || (lowest_bit_set == -1))); |
| if (i == 32) |
| { |
| i = 0; |
| do |
| { |
| if ((lowest_bit_set == -1) |
| && ((high_bits >> i) & 1)) |
| lowest_bit_set = i + 32; |
| if ((highest_bit_set == -1) |
| && ((low_bits >> (32 - i - 1)) & 1)) |
| highest_bit_set = 32 - i - 1; |
| } |
| while (++i < 32 |
| && ((highest_bit_set == -1) |
| || (lowest_bit_set == -1))); |
| } |
| /* If there are no bits set this should have gone out |
| as one instruction! */ |
| if (lowest_bit_set == -1 |
| || highest_bit_set == -1) |
| abort (); |
| all_bits_between_are_set = 1; |
| for (i = lowest_bit_set; i <= highest_bit_set; i++) |
| { |
| if (i < 32) |
| { |
| if ((low_bits & (1 << i)) != 0) |
| continue; |
| } |
| else |
| { |
| if ((high_bits & (1 << (i - 32))) != 0) |
| continue; |
| } |
| all_bits_between_are_set = 0; |
| break; |
| } |
| *hbsp = highest_bit_set; |
| *lbsp = lowest_bit_set; |
| *abbasp = all_bits_between_are_set; |
| } |
| |
| static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT); |
| |
| static int |
| const64_is_2insns (unsigned HOST_WIDE_INT high_bits, |
| unsigned HOST_WIDE_INT low_bits) |
| { |
| int highest_bit_set, lowest_bit_set, all_bits_between_are_set; |
| |
| if (high_bits == 0 |
| || high_bits == 0xffffffff) |
| return 1; |
| |
| analyze_64bit_constant (high_bits, low_bits, |
| &highest_bit_set, &lowest_bit_set, |
| &all_bits_between_are_set); |
| |
| if ((highest_bit_set == 63 |
| || lowest_bit_set == 0) |
| && all_bits_between_are_set != 0) |
| return 1; |
| |
| if ((highest_bit_set - lowest_bit_set) < 21) |
| return 1; |
| |
| return 0; |
| } |
| |
| static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT, |
| unsigned HOST_WIDE_INT, |
| int, int); |
| |
| static unsigned HOST_WIDE_INT |
| create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits, |
| unsigned HOST_WIDE_INT low_bits, |
| int lowest_bit_set, int shift) |
| { |
| HOST_WIDE_INT hi, lo; |
| |
| if (lowest_bit_set < 32) |
| { |
| lo = (low_bits >> lowest_bit_set) << shift; |
| hi = ((high_bits << (32 - lowest_bit_set)) << shift); |
| } |
| else |
| { |
| lo = 0; |
| hi = ((high_bits >> (lowest_bit_set - 32)) << shift); |
| } |
| if (hi & lo) |
| abort (); |
| return (hi | lo); |
| } |
| |
| /* Here we are sure to be arch64 and this is an integer constant |
| being loaded into a register. Emit the most efficient |
| insn sequence possible. Detection of all the 1-insn cases |
| has been done already. */ |
| void |
| sparc_emit_set_const64 (rtx op0, rtx op1) |
| { |
| unsigned HOST_WIDE_INT high_bits, low_bits; |
| int lowest_bit_set, highest_bit_set; |
| int all_bits_between_are_set; |
| rtx temp = 0; |
| |
| /* Sanity check that we know what we are working with. */ |
| if (! TARGET_ARCH64) |
| abort (); |
| |
| if (GET_CODE (op0) != SUBREG) |
| { |
| if (GET_CODE (op0) != REG |
| || (REGNO (op0) >= SPARC_FIRST_FP_REG |
| && REGNO (op0) <= SPARC_LAST_V9_FP_REG)) |
| abort (); |
| } |
| |
| if (reload_in_progress || reload_completed) |
| temp = op0; |
| |
| if (GET_CODE (op1) != CONST_DOUBLE |
| && GET_CODE (op1) != CONST_INT) |
| { |
| sparc_emit_set_symbolic_const64 (op0, op1, temp); |
| return; |
| } |
| |
| if (! temp) |
| temp = gen_reg_rtx (DImode); |
| |
| if (GET_CODE (op1) == CONST_DOUBLE) |
| { |
| #if HOST_BITS_PER_WIDE_INT == 64 |
| high_bits = (CONST_DOUBLE_LOW (op1) >> 32) & 0xffffffff; |
| low_bits = CONST_DOUBLE_LOW (op1) & 0xffffffff; |
| #else |
| high_bits = CONST_DOUBLE_HIGH (op1); |
| low_bits = CONST_DOUBLE_LOW (op1); |
| #endif |
| } |
| else |
| { |
| #if HOST_BITS_PER_WIDE_INT == 64 |
| high_bits = ((INTVAL (op1) >> 32) & 0xffffffff); |
| low_bits = (INTVAL (op1) & 0xffffffff); |
| #else |
| high_bits = ((INTVAL (op1) < 0) ? |
| 0xffffffff : |
| 0x00000000); |
| low_bits = INTVAL (op1); |
| #endif |
| } |
| |
| /* low_bits bits 0 --> 31 |
| high_bits bits 32 --> 63 */ |
| |
| analyze_64bit_constant (high_bits, low_bits, |
| &highest_bit_set, &lowest_bit_set, |
| &all_bits_between_are_set); |
| |
| /* First try for a 2-insn sequence. */ |
| |
| /* These situations are preferred because the optimizer can |
| * do more things with them: |
| * 1) mov -1, %reg |
| * sllx %reg, shift, %reg |
| * 2) mov -1, %reg |
| * srlx %reg, shift, %reg |
| * 3) mov some_small_const, %reg |
| * sllx %reg, shift, %reg |
| */ |
| if (((highest_bit_set == 63 |
| || lowest_bit_set == 0) |
| && all_bits_between_are_set != 0) |
| || ((highest_bit_set - lowest_bit_set) < 12)) |
| { |
| HOST_WIDE_INT the_const = -1; |
| int shift = lowest_bit_set; |
| |
| if ((highest_bit_set != 63 |
| && lowest_bit_set != 0) |
| || all_bits_between_are_set == 0) |
| { |
| the_const = |
| create_simple_focus_bits (high_bits, low_bits, |
| lowest_bit_set, 0); |
| } |
| else if (lowest_bit_set == 0) |
| shift = -(63 - highest_bit_set); |
| |
| if (! SPARC_SIMM13_P (the_const)) |
| abort (); |
| |
| emit_insn (gen_safe_SET64 (temp, the_const)); |
| if (shift > 0) |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_rtx_ASHIFT (DImode, |
| temp, |
| GEN_INT (shift)))); |
| else if (shift < 0) |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_rtx_LSHIFTRT (DImode, |
| temp, |
| GEN_INT (-shift)))); |
| else |
| abort (); |
| return; |
| } |
| |
| /* Now a range of 22 or less bits set somewhere. |
| * 1) sethi %hi(focus_bits), %reg |
| * sllx %reg, shift, %reg |
| * 2) sethi %hi(focus_bits), %reg |
| * srlx %reg, shift, %reg |
| */ |
| if ((highest_bit_set - lowest_bit_set) < 21) |
| { |
| unsigned HOST_WIDE_INT focus_bits = |
| create_simple_focus_bits (high_bits, low_bits, |
| lowest_bit_set, 10); |
| |
| if (! SPARC_SETHI_P (focus_bits)) |
| abort (); |
| |
| sparc_emit_set_safe_HIGH64 (temp, focus_bits); |
| |
| /* If lowest_bit_set == 10 then a sethi alone could have done it. */ |
| if (lowest_bit_set < 10) |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_rtx_LSHIFTRT (DImode, temp, |
| GEN_INT (10 - lowest_bit_set)))); |
| else if (lowest_bit_set > 10) |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_rtx_ASHIFT (DImode, temp, |
| GEN_INT (lowest_bit_set - 10)))); |
| else |
| abort (); |
| return; |
| } |
| |
| /* 1) sethi %hi(low_bits), %reg |
| * or %reg, %lo(low_bits), %reg |
| * 2) sethi %hi(~low_bits), %reg |
| * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg |
| */ |
| if (high_bits == 0 |
| || high_bits == 0xffffffff) |
| { |
| sparc_emit_set_const64_quick1 (op0, temp, low_bits, |
| (high_bits == 0xffffffff)); |
| return; |
| } |
| |
| /* Now, try 3-insn sequences. */ |
| |
| /* 1) sethi %hi(high_bits), %reg |
| * or %reg, %lo(high_bits), %reg |
| * sllx %reg, 32, %reg |
| */ |
| if (low_bits == 0) |
| { |
| sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32); |
| return; |
| } |
| |
| /* We may be able to do something quick |
| when the constant is negated, so try that. */ |
| if (const64_is_2insns ((~high_bits) & 0xffffffff, |
| (~low_bits) & 0xfffffc00)) |
| { |
| /* NOTE: The trailing bits get XOR'd so we need the |
| non-negated bits, not the negated ones. */ |
| unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff; |
| |
| if ((((~high_bits) & 0xffffffff) == 0 |
| && ((~low_bits) & 0x80000000) == 0) |
| || (((~high_bits) & 0xffffffff) == 0xffffffff |
| && ((~low_bits) & 0x80000000) != 0)) |
| { |
| int fast_int = (~low_bits & 0xffffffff); |
| |
| if ((SPARC_SETHI_P (fast_int) |
| && (~high_bits & 0xffffffff) == 0) |
| || SPARC_SIMM13_P (fast_int)) |
| emit_insn (gen_safe_SET64 (temp, fast_int)); |
| else |
| sparc_emit_set_const64 (temp, GEN_INT64 (fast_int)); |
| } |
| else |
| { |
| rtx negated_const; |
| #if HOST_BITS_PER_WIDE_INT == 64 |
| negated_const = GEN_INT (((~low_bits) & 0xfffffc00) | |
| (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32)); |
| #else |
| negated_const = immed_double_const ((~low_bits) & 0xfffffc00, |
| (~high_bits) & 0xffffffff, |
| DImode); |
| #endif |
| sparc_emit_set_const64 (temp, negated_const); |
| } |
| |
| /* If we are XOR'ing with -1, then we should emit a one's complement |
| instead. This way the combiner will notice logical operations |
| such as ANDN later on and substitute. */ |
| if (trailing_bits == 0x3ff) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, op0, |
| gen_rtx_NOT (DImode, temp))); |
| } |
| else |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, |
| op0, |
| gen_safe_XOR64 (temp, |
| (-0x400 | trailing_bits)))); |
| } |
| return; |
| } |
| |
| /* 1) sethi %hi(xxx), %reg |
| * or %reg, %lo(xxx), %reg |
| * sllx %reg, yyy, %reg |
| * |
| * ??? This is just a generalized version of the low_bits==0 |
| * thing above, FIXME... |
| */ |
| if ((highest_bit_set - lowest_bit_set) < 32) |
| { |
| unsigned HOST_WIDE_INT focus_bits = |
| create_simple_focus_bits (high_bits, low_bits, |
| lowest_bit_set, 0); |
| |
| /* We can't get here in this state. */ |
| if (highest_bit_set < 32 |
| || lowest_bit_set >= 32) |
| abort (); |
| |
| /* So what we know is that the set bits straddle the |
| middle of the 64-bit word. */ |
| sparc_emit_set_const64_quick2 (op0, temp, |
| focus_bits, 0, |
| lowest_bit_set); |
| return; |
| } |
| |
| /* 1) sethi %hi(high_bits), %reg |
| * or %reg, %lo(high_bits), %reg |
| * sllx %reg, 32, %reg |
| * or %reg, low_bits, %reg |
| */ |
| if (SPARC_SIMM13_P(low_bits) |
| && ((int)low_bits > 0)) |
| { |
| sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32); |
| return; |
| } |
| |
| /* The easiest way when all else fails, is full decomposition. */ |
| #if 0 |
| printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n", |
| high_bits, low_bits, ~high_bits, ~low_bits); |
| #endif |
| sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits); |
| } |
| |
| /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, |
| return the mode to be used for the comparison. For floating-point, |
| CCFP[E]mode is used. CC_NOOVmode should be used when the first operand |
| is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special |
| processing is needed. */ |
| |
| enum machine_mode |
| select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED) |
| { |
| if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) |
| { |
| switch (op) |
| { |
| case EQ: |
| case NE: |
| case UNORDERED: |
| case ORDERED: |
| case UNLT: |
| case UNLE: |
| case UNGT: |
| case UNGE: |
| case UNEQ: |
| case LTGT: |
| return CCFPmode; |
| |
| case LT: |
| case LE: |
| case GT: |
| case GE: |
| return CCFPEmode; |
| |
| default: |
| abort (); |
| } |
| } |
| else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS |
| || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT) |
| { |
| if (TARGET_ARCH64 && GET_MODE (x) == DImode) |
| return CCX_NOOVmode; |
| else |
| return CC_NOOVmode; |
| } |
| else |
| { |
| if (TARGET_ARCH64 && GET_MODE (x) == DImode) |
| return CCXmode; |
| else |
| return CCmode; |
| } |
| } |
| |
| /* X and Y are two things to compare using CODE. Emit the compare insn and |
| return the rtx for the cc reg in the proper mode. */ |
| |
| rtx |
| gen_compare_reg (enum rtx_code code, rtx x, rtx y) |
| { |
| enum machine_mode mode = SELECT_CC_MODE (code, x, y); |
| rtx cc_reg; |
| |
| /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the |
| fcc regs (cse can't tell they're really call clobbered regs and will |
| remove a duplicate comparison even if there is an intervening function |
| call - it will then try to reload the cc reg via an int reg which is why |
| we need the movcc patterns). It is possible to provide the movcc |
| patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two |
| registers (say %g1,%g5) and it takes about 6 insns. A better fix would be |
| to tell cse that CCFPE mode registers (even pseudos) are call |
| clobbered. */ |
| |
| /* ??? This is an experiment. Rather than making changes to cse which may |
| or may not be easy/clean, we do our own cse. This is possible because |
| we will generate hard registers. Cse knows they're call clobbered (it |
| doesn't know the same thing about pseudos). If we guess wrong, no big |
| deal, but if we win, great! */ |
| |
| if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) |
| #if 1 /* experiment */ |
| { |
| int reg; |
| /* We cycle through the registers to ensure they're all exercised. */ |
| static int next_fcc_reg = 0; |
| /* Previous x,y for each fcc reg. */ |
| static rtx prev_args[4][2]; |
| |
| /* Scan prev_args for x,y. */ |
| for (reg = 0; reg < 4; reg++) |
| if (prev_args[reg][0] == x && prev_args[reg][1] == y) |
| break; |
| if (reg == 4) |
| { |
| reg = next_fcc_reg; |
| prev_args[reg][0] = x; |
| prev_args[reg][1] = y; |
| next_fcc_reg = (next_fcc_reg + 1) & 3; |
| } |
| cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG); |
| } |
| #else |
| cc_reg = gen_reg_rtx (mode); |
| #endif /* ! experiment */ |
| else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) |
| cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG); |
| else |
| cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG); |
| |
| emit_insn (gen_rtx_SET (VOIDmode, cc_reg, |
| gen_rtx_COMPARE (mode, x, y))); |
| |
| return cc_reg; |
| } |
| |
| /* This function is used for v9 only. |
| CODE is the code for an Scc's comparison. |
| OPERANDS[0] is the target of the Scc insn. |
| OPERANDS[1] is the value we compare against const0_rtx (which hasn't |
| been generated yet). |
| |
| This function is needed to turn |
| |
| (set (reg:SI 110) |
| (gt (reg:CCX 100 %icc) |
| (const_int 0))) |
| into |
| (set (reg:SI 110) |
| (gt:DI (reg:CCX 100 %icc) |
| (const_int 0))) |
| |
| IE: The instruction recognizer needs to see the mode of the comparison to |
| find the right instruction. We could use "gt:DI" right in the |
| define_expand, but leaving it out allows us to handle DI, SI, etc. |
| |
| We refer to the global sparc compare operands sparc_compare_op0 and |
| sparc_compare_op1. */ |
| |
| int |
| gen_v9_scc (enum rtx_code compare_code, register rtx *operands) |
| { |
| rtx temp, op0, op1; |
| |
| if (! TARGET_ARCH64 |
| && (GET_MODE (sparc_compare_op0) == DImode |
| || GET_MODE (operands[0]) == DImode)) |
| return 0; |
| |
| op0 = sparc_compare_op0; |
| op1 = sparc_compare_op1; |
| |
| /* Try to use the movrCC insns. */ |
| if (TARGET_ARCH64 |
| && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT |
| && op1 == const0_rtx |
| && v9_regcmp_p (compare_code)) |
| { |
| /* Special case for op0 != 0. This can be done with one instruction if |
| operands[0] == sparc_compare_op0. */ |
| |
| if (compare_code == NE |
| && GET_MODE (operands[0]) == DImode |
| && rtx_equal_p (op0, operands[0])) |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, operands[0], |
| gen_rtx_IF_THEN_ELSE (DImode, |
| gen_rtx_fmt_ee (compare_code, DImode, |
| op0, const0_rtx), |
| const1_rtx, |
| operands[0]))); |
| return 1; |
| } |
| |
| if (reg_overlap_mentioned_p (operands[0], op0)) |
| { |
| /* Handle the case where operands[0] == sparc_compare_op0. |
| We "early clobber" the result. */ |
| op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0)); |
| emit_move_insn (op0, sparc_compare_op0); |
| } |
| |
| emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx)); |
| if (GET_MODE (op0) != DImode) |
| { |
| temp = gen_reg_rtx (DImode); |
| convert_move (temp, op0, 0); |
| } |
| else |
| temp = op0; |
| emit_insn (gen_rtx_SET (VOIDmode, operands[0], |
| gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), |
| gen_rtx_fmt_ee (compare_code, DImode, |
| temp, const0_rtx), |
| const1_rtx, |
| operands[0]))); |
| return 1; |
| } |
| else |
| { |
| operands[1] = gen_compare_reg (compare_code, op0, op1); |
| |
| switch (GET_MODE (operands[1])) |
| { |
| case CCmode : |
| case CCXmode : |
| case CCFPEmode : |
| case CCFPmode : |
| break; |
| default : |
| abort (); |
| } |
| emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx)); |
| emit_insn (gen_rtx_SET (VOIDmode, operands[0], |
| gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), |
| gen_rtx_fmt_ee (compare_code, |
| GET_MODE (operands[1]), |
| operands[1], const0_rtx), |
| const1_rtx, operands[0]))); |
| return 1; |
| } |
| } |
| |
| /* Emit a conditional jump insn for the v9 architecture using comparison code |
| CODE and jump target LABEL. |
| This function exists to take advantage of the v9 brxx insns. */ |
| |
| void |
| emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label) |
| { |
| emit_jump_insn (gen_rtx_SET (VOIDmode, |
| pc_rtx, |
| gen_rtx_IF_THEN_ELSE (VOIDmode, |
| gen_rtx_fmt_ee (code, GET_MODE (op0), |
| op0, const0_rtx), |
| gen_rtx_LABEL_REF (VOIDmode, label), |
| pc_rtx))); |
| } |
| |
| /* Generate a DFmode part of a hard TFmode register. |
| REG is the TFmode hard register, LOW is 1 for the |
| low 64bit of the register and 0 otherwise. |
| */ |
| rtx |
| gen_df_reg (rtx reg, int low) |
| { |
| int regno = REGNO (reg); |
| |
| if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0)) |
| regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2; |
| return gen_rtx_REG (DFmode, regno); |
| } |
| |
| /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value. |
| Unlike normal calls, TFmode operands are passed by reference. It is |
| assumed that no more than 3 operands are required. */ |
| |
| static void |
| emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands) |
| { |
| rtx ret_slot = NULL, arg[3], func_sym; |
| int i; |
| |
| /* We only expect to be called for conversions, unary, and binary ops. */ |
| if (nargs < 2 || nargs > 3) |
| abort (); |
| |
| for (i = 0; i < nargs; ++i) |
| { |
| rtx this_arg = operands[i]; |
| rtx this_slot; |
| |
| /* TFmode arguments and return values are passed by reference. */ |
| if (GET_MODE (this_arg) == TFmode) |
| { |
| int force_stack_temp; |
| |
| force_stack_temp = 0; |
| if (TARGET_BUGGY_QP_LIB && i == 0) |
| force_stack_temp = 1; |
| |
| if (GET_CODE (this_arg) == MEM |
| && ! force_stack_temp) |
| this_arg = XEXP (this_arg, 0); |
| else if (CONSTANT_P (this_arg) |
| && ! force_stack_temp) |
| { |
| this_slot = force_const_mem (TFmode, this_arg); |
| this_arg = XEXP (this_slot, 0); |
| } |
| else |
| { |
| this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0); |
| |
| /* Operand 0 is the return value. We'll copy it out later. */ |
| if (i > 0) |
| emit_move_insn (this_slot, this_arg); |
| else |
| ret_slot = this_slot; |
| |
| this_arg = XEXP (this_slot, 0); |
| } |
| } |
| |
| arg[i] = this_arg; |
| } |
| |
| func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name); |
| |
| if (GET_MODE (operands[0]) == TFmode) |
| { |
| if (nargs == 2) |
| emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2, |
| arg[0], GET_MODE (arg[0]), |
| arg[1], GET_MODE (arg[1])); |
| else |
| emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3, |
| arg[0], GET_MODE (arg[0]), |
| arg[1], GET_MODE (arg[1]), |
| arg[2], GET_MODE (arg[2])); |
| |
| if (ret_slot) |
| emit_move_insn (operands[0], ret_slot); |
| } |
| else |
| { |
| rtx ret; |
| |
| if (nargs != 2) |
| abort (); |
| |
| ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL, |
| GET_MODE (operands[0]), 1, |
| arg[1], GET_MODE (arg[1])); |
| |
| if (ret != operands[0]) |
| emit_move_insn (operands[0], ret); |
| } |
| } |
| |
| /* Expand soft-float TFmode calls to sparc abi routines. */ |
| |
| static void |
| emit_soft_tfmode_binop (enum rtx_code code, rtx *operands) |
| { |
| const char *func; |
| |
| switch (code) |
| { |
| case PLUS: |
| func = "_Qp_add"; |
| break; |
| case MINUS: |
| func = "_Qp_sub"; |
| break; |
| case MULT: |
| func = "_Qp_mul"; |
| break; |
| case DIV: |
| func = "_Qp_div"; |
| break; |
| default: |
| abort (); |
| } |
| |
| emit_soft_tfmode_libcall (func, 3, operands); |
| } |
| |
| static void |
| emit_soft_tfmode_unop (enum rtx_code code, rtx *operands) |
| { |
| const char *func; |
| |
| switch (code) |
| { |
| case SQRT: |
| func = "_Qp_sqrt"; |
| break; |
| default: |
| abort (); |
| } |
| |
| emit_soft_tfmode_libcall (func, 2, operands); |
| } |
| |
| static void |
| emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands) |
| { |
| const char *func; |
| |
| switch (code) |
| { |
| case FLOAT_EXTEND: |
| switch (GET_MODE (operands[1])) |
| { |
| case SFmode: |
| func = "_Qp_stoq"; |
| break; |
| case DFmode: |
| func = "_Qp_dtoq"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| case FLOAT_TRUNCATE: |
| switch (GET_MODE (operands[0])) |
| { |
| case SFmode: |
| func = "_Qp_qtos"; |
| break; |
| case DFmode: |
| func = "_Qp_qtod"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| case FLOAT: |
| switch (GET_MODE (operands[1])) |
| { |
| case SImode: |
| func = "_Qp_itoq"; |
| break; |
| case DImode: |
| func = "_Qp_xtoq"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| case UNSIGNED_FLOAT: |
| switch (GET_MODE (operands[1])) |
| { |
| case SImode: |
| func = "_Qp_uitoq"; |
| break; |
| case DImode: |
| func = "_Qp_uxtoq"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| case FIX: |
| switch (GET_MODE (operands[0])) |
| { |
| case SImode: |
| func = "_Qp_qtoi"; |
| break; |
| case DImode: |
| func = "_Qp_qtox"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| case UNSIGNED_FIX: |
| switch (GET_MODE (operands[0])) |
| { |
| case SImode: |
| func = "_Qp_qtoui"; |
| break; |
| case DImode: |
| func = "_Qp_qtoux"; |
| break; |
| default: |
| abort (); |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| emit_soft_tfmode_libcall (func, 2, operands); |
| } |
| |
| /* Expand a hard-float tfmode operation. All arguments must be in |
| registers. */ |
| |
| static void |
| emit_hard_tfmode_operation (enum rtx_code code, rtx *operands) |
| { |
| rtx op, dest; |
| |
| if (GET_RTX_CLASS (code) == '1') |
| { |
| operands[1] = force_reg (GET_MODE (operands[1]), operands[1]); |
| op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]); |
| } |
| else |
| { |
| operands[1] = force_reg (GET_MODE (operands[1]), operands[1]); |
| operands[2] = force_reg (GET_MODE (operands[2]), operands[2]); |
| op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]), |
| operands[1], operands[2]); |
| } |
| |
| if (register_operand (operands[0], VOIDmode)) |
| dest = operands[0]; |
| else |
| dest = gen_reg_rtx (GET_MODE (operands[0])); |
| |
| emit_insn (gen_rtx_SET (VOIDmode, dest, op)); |
| |
| if (dest != operands[0]) |
| emit_move_insn (operands[0], dest); |
| } |
| |
| void |
| emit_tfmode_binop (enum rtx_code code, rtx *operands) |
| { |
| if (TARGET_HARD_QUAD) |
| emit_hard_tfmode_operation (code, operands); |
| else |
| emit_soft_tfmode_binop (code, operands); |
| } |
| |
| void |
| emit_tfmode_unop (enum rtx_code code, rtx *operands) |
| { |
| if (TARGET_HARD_QUAD) |
| emit_hard_tfmode_operation (code, operands); |
| else |
| emit_soft_tfmode_unop (code, operands); |
| } |
| |
| void |
| emit_tfmode_cvt (enum rtx_code code, rtx *operands) |
| { |
| if (TARGET_HARD_QUAD) |
| emit_hard_tfmode_operation (code, operands); |
| else |
| emit_soft_tfmode_cvt (code, operands); |
| } |
| |
| /* Return nonzero if a return peephole merging return with |
| setting of output register is ok. */ |
| int |
| leaf_return_peephole_ok (void) |
| { |
| return (actual_fsize == 0); |
| } |
| |
| /* Return nonzero if a branch/jump/call instruction will be emitting |
| nop into its delay slot. */ |
| |
| int |
| empty_delay_slot (rtx insn) |
| { |
| rtx seq; |
| |
| /* If no previous instruction (should not happen), return true. */ |
| if (PREV_INSN (insn) == NULL) |
| return 1; |
| |
| seq = NEXT_INSN (PREV_INSN (insn)); |
| if (GET_CODE (PATTERN (seq)) == SEQUENCE) |
| return 0; |
| |
| return 1; |
| } |
| |
| /* Return nonzero if TRIAL can go into the function epilogue's |
| delay slot. SLOT is the slot we are trying to fill. */ |
| |
| int |
| eligible_for_epilogue_delay (rtx trial, int slot) |
| { |
| rtx pat, src; |
| |
| if (slot >= 1) |
| return 0; |
| |
| if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET) |
| return 0; |
| |
| if (get_attr_length (trial) != 1) |
| return 0; |
| |
| /* If there are any call-saved registers, we should scan TRIAL if it |
| does not reference them. For now just make it easy. */ |
| if (num_gfregs) |
| return 0; |
| |
| /* If the function uses __builtin_eh_return, the eh_return machinery |
| occupies the delay slot. */ |
| if (current_function_calls_eh_return) |
| return 0; |
| |
| /* In the case of a true leaf function, anything can go into the delay slot. |
| A delay slot only exists however if the frame size is zero, otherwise |
| we will put an insn to adjust the stack after the return. */ |
| if (current_function_uses_only_leaf_regs) |
| { |
| if (leaf_return_peephole_ok ()) |
| return ((get_attr_in_uncond_branch_delay (trial) |
| == IN_BRANCH_DELAY_TRUE)); |
| return 0; |
| } |
| |
| pat = PATTERN (trial); |
| |
| /* Otherwise, only operations which can be done in tandem with |
| a `restore' or `return' insn can go into the delay slot. */ |
| if (GET_CODE (SET_DEST (pat)) != REG |
| || REGNO (SET_DEST (pat)) < 24) |
| return 0; |
| |
| /* If this instruction sets up floating point register and we have a return |
| instruction, it can probably go in. But restore will not work |
| with FP_REGS. */ |
| if (REGNO (SET_DEST (pat)) >= 32) |
| { |
| if (TARGET_V9 && ! epilogue_renumber (&pat, 1) |
| && (get_attr_in_uncond_branch_delay (trial) == IN_BRANCH_DELAY_TRUE)) |
| return 1; |
| return 0; |
| } |
| |
| /* The set of insns matched here must agree precisely with the set of |
| patterns paired with a RETURN in sparc.md. */ |
| |
| src = SET_SRC (pat); |
| |
| /* This matches "*return_[qhs]i" or even "*return_di" on TARGET_ARCH64. */ |
| if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT |
| && arith_operand (src, GET_MODE (src))) |
| { |
| if (TARGET_ARCH64) |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode); |
| else |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode); |
| } |
| |
| /* This matches "*return_di". */ |
| else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT |
| && arith_double_operand (src, GET_MODE (src))) |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode); |
| |
| /* This matches "*return_sf_no_fpu". */ |
| else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode) |
| && register_operand (src, SFmode)) |
| return 1; |
| |
| /* If we have return instruction, anything that does not use |
| local or output registers and can go into a delay slot wins. */ |
| else if (TARGET_V9 && ! epilogue_renumber (&pat, 1) |
| && (get_attr_in_uncond_branch_delay (trial) == IN_BRANCH_DELAY_TRUE)) |
| return 1; |
| |
| /* This matches "*return_addsi". */ |
| else if (GET_CODE (src) == PLUS |
| && arith_operand (XEXP (src, 0), SImode) |
| && arith_operand (XEXP (src, 1), SImode) |
| && (register_operand (XEXP (src, 0), SImode) |
| || register_operand (XEXP (src, 1), SImode))) |
| return 1; |
| |
| /* This matches "*return_adddi". */ |
| else if (GET_CODE (src) == PLUS |
| && arith_double_operand (XEXP (src, 0), DImode) |
| && arith_double_operand (XEXP (src, 1), DImode) |
| && (register_operand (XEXP (src, 0), DImode) |
| || register_operand (XEXP (src, 1), DImode))) |
| return 1; |
| |
| /* This can match "*return_losum_[sd]i". |
| Catch only some cases, so that return_losum* don't have |
| to be too big. */ |
| else if (GET_CODE (src) == LO_SUM |
| && ! TARGET_CM_MEDMID |
| && ((register_operand (XEXP (src, 0), SImode) |
| && immediate_operand (XEXP (src, 1), SImode)) |
| || (TARGET_ARCH64 |
| && register_operand (XEXP (src, 0), DImode) |
| && immediate_operand (XEXP (src, 1), DImode)))) |
| return 1; |
| |
| /* sll{,x} reg,1,reg2 is add reg,reg,reg2 as well. */ |
| else if (GET_CODE (src) == ASHIFT |
| && (register_operand (XEXP (src, 0), SImode) |
| || register_operand (XEXP (src, 0), DImode)) |
| && XEXP (src, 1) == const1_rtx) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* Return nonzero if TRIAL can go into the call delay slot. */ |
| int |
| tls_call_delay (rtx trial) |
| { |
| rtx pat, unspec; |
| |
| /* Binutils allows |
| call __tls_get_addr, %tgd_call (foo) |
| add %l7, %o0, %o0, %tgd_add (foo) |
| while Sun as/ld does not. */ |
| if (TARGET_GNU_TLS || !TARGET_TLS) |
| return 1; |
| |
| pat = PATTERN (trial); |
| if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != PLUS) |
| return 1; |
| |
| unspec = XEXP (SET_DEST (pat), 1); |
| if (GET_CODE (unspec) != UNSPEC |
| || (XINT (unspec, 1) != UNSPEC_TLSGD |
| && XINT (unspec, 1) != UNSPEC_TLSLDM)) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* Return nonzero if TRIAL can go into the sibling call |
| delay slot. */ |
| |
| int |
| eligible_for_sibcall_delay (rtx trial) |
| { |
| rtx pat, src; |
| |
| if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET) |
| return 0; |
| |
| if (get_attr_length (trial) != 1) |
| return 0; |
| |
| pat = PATTERN (trial); |
| |
| if (current_function_uses_only_leaf_regs) |
| { |
| /* If the tail call is done using the call instruction, |
| we have to restore %o7 in the delay slot. */ |
| if ((TARGET_ARCH64 && ! TARGET_CM_MEDLOW) || flag_pic) |
| return 0; |
| |
| /* %g1 is used to build the function address */ |
| if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat)) |
| return 0; |
| |
| return 1; |
| } |
| |
| /* Otherwise, only operations which can be done in tandem with |
| a `restore' insn can go into the delay slot. */ |
| if (GET_CODE (SET_DEST (pat)) != REG |
| || REGNO (SET_DEST (pat)) < 24 |
| || REGNO (SET_DEST (pat)) >= 32) |
| return 0; |
| |
| /* If it mentions %o7, it can't go in, because sibcall will clobber it |
| in most cases. */ |
| if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat)) |
| return 0; |
| |
| src = SET_SRC (pat); |
| |
| if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT |
| && arith_operand (src, GET_MODE (src))) |
| { |
| if (TARGET_ARCH64) |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode); |
| else |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode); |
| } |
| |
| else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT |
| && arith_double_operand (src, GET_MODE (src))) |
| return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode); |
| |
| else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode) |
| && register_operand (src, SFmode)) |
| return 1; |
| |
| else if (GET_CODE (src) == PLUS |
| && arith_operand (XEXP (src, 0), SImode) |
| && arith_operand (XEXP (src, 1), SImode) |
| && (register_operand (XEXP (src, 0), SImode) |
| || register_operand (XEXP (src, 1), SImode))) |
| return 1; |
| |
| else if (GET_CODE (src) == PLUS |
| && arith_double_operand (XEXP (src, 0), DImode) |
| && arith_double_operand (XEXP (src, 1), DImode) |
| && (register_operand (XEXP (src, 0), DImode) |
| || register_operand (XEXP (src, 1), DImode))) |
| return 1; |
| |
| else if (GET_CODE (src) == LO_SUM |
| && ! TARGET_CM_MEDMID |
| && ((register_operand (XEXP (src, 0), SImode) |
| && immediate_operand (XEXP (src, 1), SImode)) |
| || (TARGET_ARCH64 |
| && register_operand (XEXP (src, 0), DImode) |
| && immediate_operand (XEXP (src, 1), DImode)))) |
| return 1; |
| |
| else if (GET_CODE (src) == ASHIFT |
| && (register_operand (XEXP (src, 0), SImode) |
| || register_operand (XEXP (src, 0), DImode)) |
| && XEXP (src, 1) == const1_rtx) |
| return 1; |
| |
| return 0; |
| } |
| |
| static int |
| check_return_regs (rtx x) |
| { |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| return IN_OR_GLOBAL_P (x); |
| |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| return 1; |
| |
| case SET: |
| case IOR: |
| case AND: |
| case XOR: |
| case |