| /* Target Code for TI C6X |
| Copyright (C) 2010-2015 Free Software Foundation, Inc. |
| Contributed by Andrew Jenner <andrew@codesourcery.com> |
| Contributed by Bernd Schmidt <bernds@codesourcery.com> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it |
| under the terms of the GNU General Public License as published |
| by the Free Software Foundation; either version 3, or (at your |
| option) any later version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
| License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "rtl.h" |
| #include "hash-set.h" |
| #include "machmode.h" |
| #include "vec.h" |
| #include "double-int.h" |
| #include "input.h" |
| #include "alias.h" |
| #include "symtab.h" |
| #include "wide-int.h" |
| #include "inchash.h" |
| #include "tree.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "varasm.h" |
| #include "calls.h" |
| #include "stringpool.h" |
| #include "insn-flags.h" |
| #include "output.h" |
| #include "insn-attr.h" |
| #include "insn-codes.h" |
| #include "hashtab.h" |
| #include "hard-reg-set.h" |
| #include "function.h" |
| #include "flags.h" |
| #include "statistics.h" |
| #include "real.h" |
| #include "fixed-value.h" |
| #include "insn-config.h" |
| #include "expmed.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "emit-rtl.h" |
| #include "stmt.h" |
| #include "expr.h" |
| #include "regs.h" |
| #include "optabs.h" |
| #include "recog.h" |
| #include "ggc.h" |
| #include "dominance.h" |
| #include "cfg.h" |
| #include "cfgrtl.h" |
| #include "cfganal.h" |
| #include "lcm.h" |
| #include "cfgbuild.h" |
| #include "cfgcleanup.h" |
| #include "predict.h" |
| #include "basic-block.h" |
| #include "sched-int.h" |
| #include "timevar.h" |
| #include "tm_p.h" |
| #include "tm-preds.h" |
| #include "tm-constrs.h" |
| #include "df.h" |
| #include "diagnostic-core.h" |
| #include "hash-map.h" |
| #include "is-a.h" |
| #include "plugin-api.h" |
| #include "ipa-ref.h" |
| #include "cgraph.h" |
| #include "langhooks.h" |
| #include "target.h" |
| #include "target-def.h" |
| #include "sel-sched.h" |
| #include "debug.h" |
| #include "opts.h" |
| #include "hw-doloop.h" |
| #include "regrename.h" |
| #include "dumpfile.h" |
| #include "gimple-expr.h" |
| #include "builtins.h" |
| |
| /* Table of supported architecture variants. */ |
| typedef struct |
| { |
| const char *arch; |
| enum c6x_cpu_type type; |
| unsigned short features; |
| } c6x_arch_table; |
| |
| /* A list of all ISAs, mapping each one to a representative device. |
| Used for -march selection. */ |
| static const c6x_arch_table all_isas[] = |
| { |
| #define C6X_ISA(NAME,DEVICE,FLAGS) \ |
| { NAME, DEVICE, FLAGS }, |
| #include "c6x-isas.def" |
| #undef C6X_ISA |
| { NULL, C6X_CPU_C62X, 0 } |
| }; |
| |
| /* This is the parsed result of the "-march=" option, if given. */ |
| enum c6x_cpu_type c6x_arch = C6X_DEFAULT_ARCH; |
| |
| /* A mask of insn types that are allowed by the architecture selected by |
| the -march option. */ |
| unsigned long c6x_insn_mask = C6X_DEFAULT_INSN_MASK; |
| |
| /* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN). |
| */ |
| static rtx_insn *c6x_current_insn = NULL; |
| |
| /* A decl we build to access __c6xabi_DSBT_base. */ |
| static GTY(()) tree dsbt_decl; |
| |
| /* Determines whether we run our final scheduling pass or not. We always |
| avoid the normal second scheduling pass. */ |
| static int c6x_flag_schedule_insns2; |
| |
| /* Determines whether we run variable tracking in machine dependent |
| reorganization. */ |
| static int c6x_flag_var_tracking; |
| |
| /* Determines whether we use modulo scheduling. */ |
| static int c6x_flag_modulo_sched; |
| |
| /* Record the state of flag_pic before we set it to 1 for DSBT. */ |
| int c6x_initial_flag_pic; |
| |
| typedef struct |
| { |
| /* We record the clock cycle for every insn during scheduling. */ |
| int clock; |
| /* After scheduling, we run assign_reservations to choose unit |
| reservations for all insns. These are recorded here. */ |
| int reservation; |
| /* Records the new condition for insns which must be made |
| conditional after scheduling. An entry of NULL_RTX means no such |
| change is necessary. */ |
| rtx new_cond; |
| /* True for the first insn that was scheduled in an ebb. */ |
| bool ebb_start; |
| /* The scheduler state after the insn, transformed into a mask of UNIT_QID |
| bits rather than storing the state. Meaningful only for the last |
| insn in a cycle. */ |
| unsigned int unit_mask; |
| } c6x_sched_insn_info; |
| |
| |
| /* Record a c6x_sched_insn_info structure for every insn in the function. */ |
| static vec<c6x_sched_insn_info> insn_info; |
| |
| #define INSN_INFO_LENGTH (insn_info).length () |
| #define INSN_INFO_ENTRY(N) (insn_info[(N)]) |
| |
| static bool done_cfi_sections; |
| |
| #define RESERVATION_FLAG_D 1 |
| #define RESERVATION_FLAG_L 2 |
| #define RESERVATION_FLAG_S 4 |
| #define RESERVATION_FLAG_M 8 |
| #define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L) |
| #define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S) |
| #define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S) |
| #define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS) |
| |
| /* The DFA names of the units. */ |
| static const char *const c6x_unit_names[] = |
| { |
| "d1", "l1", "s1", "m1", "fps1", "fpl1", "adddps1", "adddpl1", |
| "d2", "l2", "s2", "m2", "fps2", "fpl2", "adddps2", "adddpl2" |
| }; |
| |
| /* The DFA unit number for each unit in c6x_unit_names[]. */ |
| static int c6x_unit_codes[ARRAY_SIZE (c6x_unit_names)]; |
| |
| /* Unit query IDs. */ |
| #define UNIT_QID_D1 0 |
| #define UNIT_QID_L1 1 |
| #define UNIT_QID_S1 2 |
| #define UNIT_QID_M1 3 |
| #define UNIT_QID_FPS1 4 |
| #define UNIT_QID_FPL1 5 |
| #define UNIT_QID_ADDDPS1 6 |
| #define UNIT_QID_ADDDPL1 7 |
| #define UNIT_QID_SIDE_OFFSET 8 |
| |
| #define RESERVATION_S1 2 |
| #define RESERVATION_S2 10 |
| |
| /* An enum for the unit requirements we count in the UNIT_REQS table. */ |
| enum unitreqs |
| { |
| UNIT_REQ_D, |
| UNIT_REQ_L, |
| UNIT_REQ_S, |
| UNIT_REQ_M, |
| UNIT_REQ_DL, |
| UNIT_REQ_DS, |
| UNIT_REQ_LS, |
| UNIT_REQ_DLS, |
| UNIT_REQ_T, |
| UNIT_REQ_X, |
| UNIT_REQ_MAX |
| }; |
| |
| /* A table used to count unit requirements. Used when computing minimum |
| iteration intervals. */ |
| typedef int unit_req_table[2][UNIT_REQ_MAX]; |
| static unit_req_table unit_reqs; |
| |
| /* Register map for debugging. */ |
| unsigned const dbx_register_map[FIRST_PSEUDO_REGISTER] = |
| { |
| 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */ |
| 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */ |
| 50, 51, 52, |
| 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */ |
| 29, 30, 31, |
| 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */ |
| 66, 67, 68, |
| -1, -1, -1 /* FP, ARGP, ILC. */ |
| }; |
| |
| /* Allocate a new, cleared machine_function structure. */ |
| |
| static struct machine_function * |
| c6x_init_machine_status (void) |
| { |
| return ggc_cleared_alloc<machine_function> (); |
| } |
| |
| /* Implement TARGET_OPTION_OVERRIDE. */ |
| |
| static void |
| c6x_option_override (void) |
| { |
| unsigned i; |
| |
| if (global_options_set.x_c6x_arch_option) |
| { |
| c6x_arch = all_isas[c6x_arch_option].type; |
| c6x_insn_mask &= ~C6X_INSNS_ALL_CPU_BITS; |
| c6x_insn_mask |= all_isas[c6x_arch_option].features; |
| } |
| |
| c6x_flag_schedule_insns2 = flag_schedule_insns_after_reload; |
| flag_schedule_insns_after_reload = 0; |
| |
| c6x_flag_modulo_sched = flag_modulo_sched; |
| flag_modulo_sched = 0; |
| |
| init_machine_status = c6x_init_machine_status; |
| |
| for (i = 0; i < ARRAY_SIZE (c6x_unit_names); i++) |
| c6x_unit_codes[i] = get_cpu_unit_code (c6x_unit_names[i]); |
| |
| if (flag_pic && !TARGET_DSBT) |
| { |
| error ("-fpic and -fPIC not supported without -mdsbt on this target"); |
| flag_pic = 0; |
| } |
| c6x_initial_flag_pic = flag_pic; |
| if (TARGET_DSBT && !flag_pic) |
| flag_pic = 1; |
| } |
| |
| |
| /* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */ |
| |
| static void |
| c6x_conditional_register_usage (void) |
| { |
| int i; |
| if (c6x_arch == C6X_CPU_C62X || c6x_arch == C6X_CPU_C67X) |
| for (i = 16; i < 32; i++) |
| { |
| fixed_regs[i] = 1; |
| fixed_regs[32 + i] = 1; |
| } |
| if (TARGET_INSNS_64) |
| { |
| SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_A_REGS], |
| REG_A0); |
| SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_REGS], |
| REG_A0); |
| CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_A_REGS], |
| REG_A0); |
| CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_REGS], |
| REG_A0); |
| } |
| } |
| |
| static GTY(()) rtx eqdf_libfunc; |
| static GTY(()) rtx nedf_libfunc; |
| static GTY(()) rtx ledf_libfunc; |
| static GTY(()) rtx ltdf_libfunc; |
| static GTY(()) rtx gedf_libfunc; |
| static GTY(()) rtx gtdf_libfunc; |
| static GTY(()) rtx eqsf_libfunc; |
| static GTY(()) rtx nesf_libfunc; |
| static GTY(()) rtx lesf_libfunc; |
| static GTY(()) rtx ltsf_libfunc; |
| static GTY(()) rtx gesf_libfunc; |
| static GTY(()) rtx gtsf_libfunc; |
| static GTY(()) rtx strasgi_libfunc; |
| static GTY(()) rtx strasgi64p_libfunc; |
| |
| /* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library |
| functions to match the C6x ABI. */ |
| |
| static void |
| c6x_init_libfuncs (void) |
| { |
| /* Double-precision floating-point arithmetic. */ |
| set_optab_libfunc (add_optab, DFmode, "__c6xabi_addd"); |
| set_optab_libfunc (sdiv_optab, DFmode, "__c6xabi_divd"); |
| set_optab_libfunc (smul_optab, DFmode, "__c6xabi_mpyd"); |
| set_optab_libfunc (neg_optab, DFmode, "__c6xabi_negd"); |
| set_optab_libfunc (sub_optab, DFmode, "__c6xabi_subd"); |
| |
| /* Single-precision floating-point arithmetic. */ |
| set_optab_libfunc (add_optab, SFmode, "__c6xabi_addf"); |
| set_optab_libfunc (sdiv_optab, SFmode, "__c6xabi_divf"); |
| set_optab_libfunc (smul_optab, SFmode, "__c6xabi_mpyf"); |
| set_optab_libfunc (neg_optab, SFmode, "__c6xabi_negf"); |
| set_optab_libfunc (sub_optab, SFmode, "__c6xabi_subf"); |
| |
| /* Floating-point comparisons. */ |
| eqsf_libfunc = init_one_libfunc ("__c6xabi_eqf"); |
| nesf_libfunc = init_one_libfunc ("__c6xabi_neqf"); |
| lesf_libfunc = init_one_libfunc ("__c6xabi_lef"); |
| ltsf_libfunc = init_one_libfunc ("__c6xabi_ltf"); |
| gesf_libfunc = init_one_libfunc ("__c6xabi_gef"); |
| gtsf_libfunc = init_one_libfunc ("__c6xabi_gtf"); |
| eqdf_libfunc = init_one_libfunc ("__c6xabi_eqd"); |
| nedf_libfunc = init_one_libfunc ("__c6xabi_neqd"); |
| ledf_libfunc = init_one_libfunc ("__c6xabi_led"); |
| ltdf_libfunc = init_one_libfunc ("__c6xabi_ltd"); |
| gedf_libfunc = init_one_libfunc ("__c6xabi_ged"); |
| gtdf_libfunc = init_one_libfunc ("__c6xabi_gtd"); |
| |
| set_optab_libfunc (eq_optab, SFmode, NULL); |
| set_optab_libfunc (ne_optab, SFmode, "__c6xabi_neqf"); |
| set_optab_libfunc (gt_optab, SFmode, NULL); |
| set_optab_libfunc (ge_optab, SFmode, NULL); |
| set_optab_libfunc (lt_optab, SFmode, NULL); |
| set_optab_libfunc (le_optab, SFmode, NULL); |
| set_optab_libfunc (unord_optab, SFmode, "__c6xabi_unordf"); |
| set_optab_libfunc (eq_optab, DFmode, NULL); |
| set_optab_libfunc (ne_optab, DFmode, "__c6xabi_neqd"); |
| set_optab_libfunc (gt_optab, DFmode, NULL); |
| set_optab_libfunc (ge_optab, DFmode, NULL); |
| set_optab_libfunc (lt_optab, DFmode, NULL); |
| set_optab_libfunc (le_optab, DFmode, NULL); |
| set_optab_libfunc (unord_optab, DFmode, "__c6xabi_unordd"); |
| |
| /* Floating-point to integer conversions. */ |
| set_conv_libfunc (sfix_optab, SImode, DFmode, "__c6xabi_fixdi"); |
| set_conv_libfunc (ufix_optab, SImode, DFmode, "__c6xabi_fixdu"); |
| set_conv_libfunc (sfix_optab, DImode, DFmode, "__c6xabi_fixdlli"); |
| set_conv_libfunc (ufix_optab, DImode, DFmode, "__c6xabi_fixdull"); |
| set_conv_libfunc (sfix_optab, SImode, SFmode, "__c6xabi_fixfi"); |
| set_conv_libfunc (ufix_optab, SImode, SFmode, "__c6xabi_fixfu"); |
| set_conv_libfunc (sfix_optab, DImode, SFmode, "__c6xabi_fixflli"); |
| set_conv_libfunc (ufix_optab, DImode, SFmode, "__c6xabi_fixfull"); |
| |
| /* Conversions between floating types. */ |
| set_conv_libfunc (trunc_optab, SFmode, DFmode, "__c6xabi_cvtdf"); |
| set_conv_libfunc (sext_optab, DFmode, SFmode, "__c6xabi_cvtfd"); |
| |
| /* Integer to floating-point conversions. */ |
| set_conv_libfunc (sfloat_optab, DFmode, SImode, "__c6xabi_fltid"); |
| set_conv_libfunc (ufloat_optab, DFmode, SImode, "__c6xabi_fltud"); |
| set_conv_libfunc (sfloat_optab, DFmode, DImode, "__c6xabi_fltllid"); |
| set_conv_libfunc (ufloat_optab, DFmode, DImode, "__c6xabi_fltulld"); |
| set_conv_libfunc (sfloat_optab, SFmode, SImode, "__c6xabi_fltif"); |
| set_conv_libfunc (ufloat_optab, SFmode, SImode, "__c6xabi_fltuf"); |
| set_conv_libfunc (sfloat_optab, SFmode, DImode, "__c6xabi_fltllif"); |
| set_conv_libfunc (ufloat_optab, SFmode, DImode, "__c6xabi_fltullf"); |
| |
| /* Long long. */ |
| set_optab_libfunc (smul_optab, DImode, "__c6xabi_mpyll"); |
| set_optab_libfunc (ashl_optab, DImode, "__c6xabi_llshl"); |
| set_optab_libfunc (lshr_optab, DImode, "__c6xabi_llshru"); |
| set_optab_libfunc (ashr_optab, DImode, "__c6xabi_llshr"); |
| |
| set_optab_libfunc (sdiv_optab, SImode, "__c6xabi_divi"); |
| set_optab_libfunc (udiv_optab, SImode, "__c6xabi_divu"); |
| set_optab_libfunc (smod_optab, SImode, "__c6xabi_remi"); |
| set_optab_libfunc (umod_optab, SImode, "__c6xabi_remu"); |
| set_optab_libfunc (sdivmod_optab, SImode, "__c6xabi_divremi"); |
| set_optab_libfunc (udivmod_optab, SImode, "__c6xabi_divremu"); |
| set_optab_libfunc (sdiv_optab, DImode, "__c6xabi_divlli"); |
| set_optab_libfunc (udiv_optab, DImode, "__c6xabi_divull"); |
| set_optab_libfunc (smod_optab, DImode, "__c6xabi_remlli"); |
| set_optab_libfunc (umod_optab, DImode, "__c6xabi_remull"); |
| set_optab_libfunc (udivmod_optab, DImode, "__c6xabi_divremull"); |
| |
| /* Block move. */ |
| strasgi_libfunc = init_one_libfunc ("__c6xabi_strasgi"); |
| strasgi64p_libfunc = init_one_libfunc ("__c6xabi_strasgi_64plus"); |
| } |
| |
| /* Begin the assembly file. */ |
| |
| static void |
| c6x_file_start (void) |
| { |
| /* Variable tracking should be run after all optimizations which change order |
| of insns. It also needs a valid CFG. This can't be done in |
| c6x_override_options, because flag_var_tracking is finalized after |
| that. */ |
| c6x_flag_var_tracking = flag_var_tracking; |
| flag_var_tracking = 0; |
| |
| done_cfi_sections = false; |
| default_file_start (); |
| |
| /* Arrays are aligned to 8-byte boundaries. */ |
| asm_fprintf (asm_out_file, |
| "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n"); |
| asm_fprintf (asm_out_file, |
| "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n"); |
| |
| /* Stack alignment is 8 bytes. */ |
| asm_fprintf (asm_out_file, |
| "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n"); |
| asm_fprintf (asm_out_file, |
| "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n"); |
| |
| #if 0 /* FIXME: Reenable when TI's tools are fixed. */ |
| /* ??? Ideally we'd check flag_short_wchar somehow. */ |
| asm_fprintf (asm_out_file, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2); |
| #endif |
| |
| /* We conform to version 1.0 of the ABI. */ |
| asm_fprintf (asm_out_file, |
| "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n"); |
| |
| } |
| |
| /* The LTO frontend only enables exceptions when it sees a function that |
| uses it. This changes the return value of dwarf2out_do_frame, so we |
| have to check before every function. */ |
| |
| void |
| c6x_output_file_unwind (FILE * f) |
| { |
| if (done_cfi_sections) |
| return; |
| |
| /* Output a .cfi_sections directive. */ |
| if (dwarf2out_do_frame ()) |
| { |
| if (flag_unwind_tables || flag_exceptions) |
| { |
| if (write_symbols == DWARF2_DEBUG |
| || write_symbols == VMS_AND_DWARF2_DEBUG) |
| asm_fprintf (f, "\t.cfi_sections .debug_frame, .c6xabi.exidx\n"); |
| else |
| asm_fprintf (f, "\t.cfi_sections .c6xabi.exidx\n"); |
| } |
| else |
| asm_fprintf (f, "\t.cfi_sections .debug_frame\n"); |
| done_cfi_sections = true; |
| } |
| } |
| |
| /* Output unwind directives at the end of a function. */ |
| |
| static void |
| c6x_output_fn_unwind (FILE * f) |
| { |
| /* Return immediately if we are not generating unwinding tables. */ |
| if (! (flag_unwind_tables || flag_exceptions)) |
| return; |
| |
| /* If this function will never be unwound, then mark it as such. */ |
| if (!(flag_unwind_tables || crtl->uses_eh_lsda) |
| && (TREE_NOTHROW (current_function_decl) |
| || crtl->all_throwers_are_sibcalls)) |
| fputs("\t.cantunwind\n", f); |
| |
| fputs ("\t.endp\n", f); |
| } |
| |
| |
| /* Stack and Calling. */ |
| |
| int argument_registers[10] = |
| { |
| REG_A4, REG_B4, |
| REG_A6, REG_B6, |
| REG_A8, REG_B8, |
| REG_A10, REG_B10, |
| REG_A12, REG_B12 |
| }; |
| |
| /* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */ |
| |
| void |
| c6x_init_cumulative_args (CUMULATIVE_ARGS *cum, const_tree fntype, rtx libname, |
| int n_named_args ATTRIBUTE_UNUSED) |
| { |
| cum->count = 0; |
| cum->nregs = 10; |
| if (!libname && fntype) |
| { |
| /* We need to find out the number of named arguments. Unfortunately, |
| for incoming arguments, N_NAMED_ARGS is set to -1. */ |
| if (stdarg_p (fntype)) |
| cum->nregs = type_num_arguments (fntype) - 1; |
| if (cum->nregs > 10) |
| cum->nregs = 10; |
| } |
| } |
| |
| /* Implements the macro FUNCTION_ARG defined in c6x.h. */ |
| |
| static rtx |
| c6x_function_arg (cumulative_args_t cum_v, machine_mode mode, |
| const_tree type, bool named ATTRIBUTE_UNUSED) |
| { |
| CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); |
| if (cum->count >= cum->nregs) |
| return NULL_RTX; |
| if (type) |
| { |
| HOST_WIDE_INT size = int_size_in_bytes (type); |
| if (TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (type)) |
| { |
| if (size > 4) |
| { |
| rtx reg1 = gen_rtx_REG (SImode, argument_registers[cum->count] + 1); |
| rtx reg2 = gen_rtx_REG (SImode, argument_registers[cum->count]); |
| rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx), |
| gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4))); |
| return gen_rtx_PARALLEL (mode, vec); |
| } |
| } |
| } |
| return gen_rtx_REG (mode, argument_registers[cum->count]); |
| } |
| |
| static void |
| c6x_function_arg_advance (cumulative_args_t cum_v, |
| machine_mode mode ATTRIBUTE_UNUSED, |
| const_tree type ATTRIBUTE_UNUSED, |
| bool named ATTRIBUTE_UNUSED) |
| { |
| CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); |
| cum->count++; |
| } |
| |
| |
| /* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return |
| upward rather than downward. */ |
| |
| bool |
| c6x_block_reg_pad_upward (machine_mode mode ATTRIBUTE_UNUSED, |
| const_tree type, bool first) |
| { |
| HOST_WIDE_INT size; |
| |
| if (!TARGET_BIG_ENDIAN) |
| return true; |
| if (!first) |
| return true; |
| if (!type) |
| return true; |
| size = int_size_in_bytes (type); |
| return size == 3; |
| } |
| |
| /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */ |
| |
| static unsigned int |
| c6x_function_arg_boundary (machine_mode mode, const_tree type) |
| { |
| unsigned int boundary = type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode); |
| |
| if (boundary > BITS_PER_WORD) |
| return 2 * BITS_PER_WORD; |
| |
| if (mode == BLKmode) |
| { |
| HOST_WIDE_INT size = int_size_in_bytes (type); |
| if (size > 4) |
| return 2 * BITS_PER_WORD; |
| if (boundary < BITS_PER_WORD) |
| { |
| if (size >= 3) |
| return BITS_PER_WORD; |
| if (size >= 2) |
| return 2 * BITS_PER_UNIT; |
| } |
| } |
| return boundary; |
| } |
| |
| /* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */ |
| static unsigned int |
| c6x_function_arg_round_boundary (machine_mode mode, const_tree type) |
| { |
| return c6x_function_arg_boundary (mode, type); |
| } |
| |
| /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place |
| where function FUNC returns or receives a value of data type TYPE. */ |
| |
| static rtx |
| c6x_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED, |
| bool outgoing ATTRIBUTE_UNUSED) |
| { |
| /* Functions return values in register A4. When returning aggregates, we may |
| have to adjust for endianness. */ |
| if (TARGET_BIG_ENDIAN && type && AGGREGATE_TYPE_P (type)) |
| { |
| HOST_WIDE_INT size = int_size_in_bytes (type); |
| if (size > 4) |
| { |
| |
| rtx reg1 = gen_rtx_REG (SImode, REG_A4 + 1); |
| rtx reg2 = gen_rtx_REG (SImode, REG_A4); |
| rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx), |
| gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4))); |
| return gen_rtx_PARALLEL (TYPE_MODE (type), vec); |
| } |
| } |
| return gen_rtx_REG (TYPE_MODE (type), REG_A4); |
| } |
| |
| /* Implement TARGET_LIBCALL_VALUE. */ |
| |
| static rtx |
| c6x_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED) |
| { |
| return gen_rtx_REG (mode, REG_A4); |
| } |
| |
| /* TARGET_STRUCT_VALUE_RTX implementation. */ |
| |
| static rtx |
| c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED, int incoming ATTRIBUTE_UNUSED) |
| { |
| return gen_rtx_REG (Pmode, REG_A3); |
| } |
| |
| /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */ |
| |
| static bool |
| c6x_function_value_regno_p (const unsigned int regno) |
| { |
| return regno == REG_A4; |
| } |
| |
| /* Types larger than 64 bit, and variable sized types, are passed by |
| reference. The callee must copy them; see c6x_callee_copies. */ |
| |
| static bool |
| c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED, |
| machine_mode mode, const_tree type, |
| bool named ATTRIBUTE_UNUSED) |
| { |
| int size = -1; |
| if (type) |
| size = int_size_in_bytes (type); |
| else if (mode != VOIDmode) |
| size = GET_MODE_SIZE (mode); |
| return size > 2 * UNITS_PER_WORD || size == -1; |
| } |
| |
| /* Decide whether a type should be returned in memory (true) |
| or in a register (false). This is called by the macro |
| TARGET_RETURN_IN_MEMORY. */ |
| |
| static bool |
| c6x_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) |
| { |
| int size = int_size_in_bytes (type); |
| return size > 2 * UNITS_PER_WORD || size == -1; |
| } |
| |
| /* Values which must be returned in the most-significant end of the return |
| register. */ |
| |
| static bool |
| c6x_return_in_msb (const_tree valtype) |
| { |
| HOST_WIDE_INT size = int_size_in_bytes (valtype); |
| return TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (valtype) && size == 3; |
| } |
| |
| /* Implement TARGET_CALLEE_COPIES. */ |
| |
| static bool |
| c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED, |
| machine_mode mode ATTRIBUTE_UNUSED, |
| const_tree type ATTRIBUTE_UNUSED, |
| bool named ATTRIBUTE_UNUSED) |
| { |
| return true; |
| } |
| |
| /* Return the type to use as __builtin_va_list. */ |
| static tree |
| c6x_build_builtin_va_list (void) |
| { |
| return build_pointer_type (char_type_node); |
| } |
| |
| static void |
| c6x_asm_trampoline_template (FILE *f) |
| { |
| fprintf (f, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */ |
| fprintf (f, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */ |
| fprintf (f, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */ |
| fprintf (f, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */ |
| fprintf (f, "\t.long\t0x00000362\n"); /* b .s2 B0 */ |
| fprintf (f, "\t.long\t0x00008000\n"); /* nop 5 */ |
| fprintf (f, "\t.long\t0x00000000\n"); /* nop */ |
| fprintf (f, "\t.long\t0x00000000\n"); /* nop */ |
| } |
| |
| /* Emit RTL insns to initialize the variable parts of a trampoline at |
| TRAMP. FNADDR is an RTX for the address of the function's pure |
| code. CXT is an RTX for the static chain value for the function. */ |
| |
| static void |
| c6x_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt) |
| { |
| rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); |
| rtx t1 = copy_to_reg (fnaddr); |
| rtx t2 = copy_to_reg (cxt); |
| rtx mask = gen_reg_rtx (SImode); |
| int i; |
| |
| emit_block_move (tramp, assemble_trampoline_template (), |
| GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL); |
| |
| emit_move_insn (mask, GEN_INT (0xffff << 7)); |
| |
| for (i = 0; i < 4; i++) |
| { |
| rtx mem = adjust_address (tramp, SImode, i * 4); |
| rtx t = (i & 1) ? t2 : t1; |
| rtx v1 = gen_reg_rtx (SImode); |
| rtx v2 = gen_reg_rtx (SImode); |
| emit_move_insn (v1, mem); |
| if (i < 2) |
| emit_insn (gen_ashlsi3 (v2, t, GEN_INT (7))); |
| else |
| emit_insn (gen_lshrsi3 (v2, t, GEN_INT (9))); |
| emit_insn (gen_andsi3 (v2, v2, mask)); |
| emit_insn (gen_iorsi3 (v2, v2, v1)); |
| emit_move_insn (mem, v2); |
| } |
| #ifdef CLEAR_INSN_CACHE |
| tramp = XEXP (tramp, 0); |
| emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__gnu_clear_cache"), |
| LCT_NORMAL, VOIDmode, 2, tramp, Pmode, |
| plus_constant (Pmode, tramp, TRAMPOLINE_SIZE), |
| Pmode); |
| #endif |
| } |
| |
| /* Determine whether c6x_output_mi_thunk can succeed. */ |
| |
| static bool |
| c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED, |
| HOST_WIDE_INT delta ATTRIBUTE_UNUSED, |
| HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED, |
| const_tree function ATTRIBUTE_UNUSED) |
| { |
| return !TARGET_LONG_CALLS; |
| } |
| |
| /* Output the assembler code for a thunk function. THUNK is the |
| declaration for the thunk function itself, FUNCTION is the decl for |
| the target function. DELTA is an immediate constant offset to be |
| added to THIS. If VCALL_OFFSET is nonzero, the word at |
| *(*this + vcall_offset) should be added to THIS. */ |
| |
| static void |
| c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED, |
| tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta, |
| HOST_WIDE_INT vcall_offset, tree function) |
| { |
| rtx xops[5]; |
| /* The this parameter is passed as the first argument. */ |
| rtx this_rtx = gen_rtx_REG (Pmode, REG_A4); |
| |
| c6x_current_insn = NULL; |
| |
| xops[4] = XEXP (DECL_RTL (function), 0); |
| if (!vcall_offset) |
| { |
| output_asm_insn ("b .s2 \t%4", xops); |
| if (!delta) |
| output_asm_insn ("nop 5", xops); |
| } |
| |
| /* Adjust the this parameter by a fixed constant. */ |
| if (delta) |
| { |
| xops[0] = GEN_INT (delta); |
| xops[1] = this_rtx; |
| if (delta >= -16 && delta <= 15) |
| { |
| output_asm_insn ("add .s1 %0, %1, %1", xops); |
| if (!vcall_offset) |
| output_asm_insn ("nop 4", xops); |
| } |
| else if (delta >= 16 && delta < 32) |
| { |
| output_asm_insn ("add .d1 %0, %1, %1", xops); |
| if (!vcall_offset) |
| output_asm_insn ("nop 4", xops); |
| } |
| else if (delta >= -32768 && delta < 32768) |
| { |
| output_asm_insn ("mvk .s1 %0, A0", xops); |
| output_asm_insn ("add .d1 %1, A0, %1", xops); |
| if (!vcall_offset) |
| output_asm_insn ("nop 3", xops); |
| } |
| else |
| { |
| output_asm_insn ("mvkl .s1 %0, A0", xops); |
| output_asm_insn ("mvkh .s1 %0, A0", xops); |
| output_asm_insn ("add .d1 %1, A0, %1", xops); |
| if (!vcall_offset) |
| output_asm_insn ("nop 3", xops); |
| } |
| } |
| |
| /* Adjust the this parameter by a value stored in the vtable. */ |
| if (vcall_offset) |
| { |
| rtx a0tmp = gen_rtx_REG (Pmode, REG_A0); |
| rtx a3tmp = gen_rtx_REG (Pmode, REG_A3); |
| |
| xops[1] = a3tmp; |
| xops[2] = a0tmp; |
| xops[3] = gen_rtx_MEM (Pmode, a0tmp); |
| output_asm_insn ("mv .s1 a4, %2", xops); |
| output_asm_insn ("ldw .d1t1 %3, %2", xops); |
| |
| /* Adjust the this parameter. */ |
| xops[0] = gen_rtx_MEM (Pmode, plus_constant (Pmode, a0tmp, |
| vcall_offset)); |
| if (!memory_operand (xops[0], Pmode)) |
| { |
| rtx tmp2 = gen_rtx_REG (Pmode, REG_A1); |
| xops[0] = GEN_INT (vcall_offset); |
| xops[1] = tmp2; |
| output_asm_insn ("mvkl .s1 %0, %1", xops); |
| output_asm_insn ("mvkh .s1 %0, %1", xops); |
| output_asm_insn ("nop 2", xops); |
| output_asm_insn ("add .d1 %2, %1, %2", xops); |
| xops[0] = gen_rtx_MEM (Pmode, a0tmp); |
| } |
| else |
| output_asm_insn ("nop 4", xops); |
| xops[2] = this_rtx; |
| output_asm_insn ("ldw .d1t1 %0, %1", xops); |
| output_asm_insn ("|| b .s2 \t%4", xops); |
| output_asm_insn ("nop 4", xops); |
| output_asm_insn ("add .d1 %2, %1, %2", xops); |
| } |
| } |
| |
| /* Return true if EXP goes in small data/bss. */ |
| |
| static bool |
| c6x_in_small_data_p (const_tree exp) |
| { |
| /* We want to merge strings, so we never consider them small data. */ |
| if (TREE_CODE (exp) == STRING_CST) |
| return false; |
| |
| /* Functions are never small data. */ |
| if (TREE_CODE (exp) == FUNCTION_DECL) |
| return false; |
| |
| if (TREE_CODE (exp) == VAR_DECL && DECL_WEAK (exp)) |
| return false; |
| |
| if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp)) |
| { |
| const char *section = DECL_SECTION_NAME (exp); |
| |
| if (strcmp (section, ".neardata") == 0 |
| || strncmp (section, ".neardata.", 10) == 0 |
| || strncmp (section, ".gnu.linkonce.s.", 16) == 0 |
| || strcmp (section, ".bss") == 0 |
| || strncmp (section, ".bss.", 5) == 0 |
| || strncmp (section, ".gnu.linkonce.sb.", 17) == 0 |
| || strcmp (section, ".rodata") == 0 |
| || strncmp (section, ".rodata.", 8) == 0 |
| || strncmp (section, ".gnu.linkonce.s2.", 17) == 0) |
| return true; |
| } |
| else |
| return PLACE_IN_SDATA_P (exp); |
| |
| return false; |
| } |
| |
| /* Return a section for X. The only special thing we do here is to |
| honor small data. We don't have a tree type, so we can't use the |
| PLACE_IN_SDATA_P macro we use everywhere else; we choose to place |
| everything sized 8 bytes or smaller into small data. */ |
| |
| static section * |
| c6x_select_rtx_section (machine_mode mode, rtx x, |
| unsigned HOST_WIDE_INT align) |
| { |
| if (c6x_sdata_mode == C6X_SDATA_ALL |
| || (c6x_sdata_mode != C6X_SDATA_NONE && GET_MODE_SIZE (mode) <= 8)) |
| /* ??? Consider using mergeable sdata sections. */ |
| return sdata_section; |
| else |
| return default_elf_select_rtx_section (mode, x, align); |
| } |
| |
| static section * |
| c6x_elf_select_section (tree decl, int reloc, |
| unsigned HOST_WIDE_INT align) |
| { |
| const char *sname = NULL; |
| unsigned int flags = SECTION_WRITE; |
| if (c6x_in_small_data_p (decl)) |
| { |
| switch (categorize_decl_for_section (decl, reloc)) |
| { |
| case SECCAT_SRODATA: |
| sname = ".rodata"; |
| flags = 0; |
| break; |
| case SECCAT_SDATA: |
| sname = ".neardata"; |
| break; |
| case SECCAT_SBSS: |
| sname = ".bss"; |
| flags |= SECTION_BSS; |
| default: |
| break; |
| } |
| } |
| else |
| { |
| switch (categorize_decl_for_section (decl, reloc)) |
| { |
| case SECCAT_DATA: |
| sname = ".fardata"; |
| break; |
| case SECCAT_DATA_REL: |
| sname = ".fardata.rel"; |
| break; |
| case SECCAT_DATA_REL_LOCAL: |
| sname = ".fardata.rel.local"; |
| break; |
| case SECCAT_DATA_REL_RO: |
| sname = ".fardata.rel.ro"; |
| break; |
| case SECCAT_DATA_REL_RO_LOCAL: |
| sname = ".fardata.rel.ro.local"; |
| break; |
| case SECCAT_BSS: |
| sname = ".far"; |
| flags |= SECTION_BSS; |
| break; |
| case SECCAT_RODATA: |
| sname = ".const"; |
| flags = 0; |
| break; |
| case SECCAT_SRODATA: |
| case SECCAT_SDATA: |
| case SECCAT_SBSS: |
| gcc_unreachable (); |
| default: |
| break; |
| } |
| } |
| if (sname) |
| { |
| /* We might get called with string constants, but get_named_section |
| doesn't like them as they are not DECLs. Also, we need to set |
| flags in that case. */ |
| if (!DECL_P (decl)) |
| return get_section (sname, flags, NULL); |
| return get_named_section (decl, sname, reloc); |
| } |
| |
| return default_elf_select_section (decl, reloc, align); |
| } |
| |
| /* Build up a unique section name, expressed as a |
| STRING_CST node, and assign it to DECL_SECTION_NAME (decl). |
| RELOC indicates whether the initial value of EXP requires |
| link-time relocations. */ |
| |
| static void ATTRIBUTE_UNUSED |
| c6x_elf_unique_section (tree decl, int reloc) |
| { |
| const char *prefix = NULL; |
| /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */ |
| bool one_only = DECL_COMDAT_GROUP (decl) && !HAVE_COMDAT_GROUP; |
| |
| if (c6x_in_small_data_p (decl)) |
| { |
| switch (categorize_decl_for_section (decl, reloc)) |
| { |
| case SECCAT_SDATA: |
| prefix = one_only ? ".s" : ".neardata"; |
| break; |
| case SECCAT_SBSS: |
| prefix = one_only ? ".sb" : ".bss"; |
| break; |
| case SECCAT_SRODATA: |
| prefix = one_only ? ".s2" : ".rodata"; |
| break; |
| case SECCAT_RODATA_MERGE_STR: |
| case SECCAT_RODATA_MERGE_STR_INIT: |
| case SECCAT_RODATA_MERGE_CONST: |
| case SECCAT_RODATA: |
| case SECCAT_DATA: |
| case SECCAT_DATA_REL: |
| case SECCAT_DATA_REL_LOCAL: |
| case SECCAT_DATA_REL_RO: |
| case SECCAT_DATA_REL_RO_LOCAL: |
| gcc_unreachable (); |
| default: |
| /* Everything else we place into default sections and hope for the |
| best. */ |
| break; |
| } |
| } |
| else |
| { |
| switch (categorize_decl_for_section (decl, reloc)) |
| { |
| case SECCAT_DATA: |
| case SECCAT_DATA_REL: |
| case SECCAT_DATA_REL_LOCAL: |
| case SECCAT_DATA_REL_RO: |
| case SECCAT_DATA_REL_RO_LOCAL: |
| prefix = one_only ? ".fd" : ".fardata"; |
| break; |
| case SECCAT_BSS: |
| prefix = one_only ? ".fb" : ".far"; |
| break; |
| case SECCAT_RODATA: |
| case SECCAT_RODATA_MERGE_STR: |
| case SECCAT_RODATA_MERGE_STR_INIT: |
| case SECCAT_RODATA_MERGE_CONST: |
| prefix = one_only ? ".fr" : ".const"; |
| break; |
| case SECCAT_SRODATA: |
| case SECCAT_SDATA: |
| case SECCAT_SBSS: |
| gcc_unreachable (); |
| default: |
| break; |
| } |
| } |
| |
| if (prefix) |
| { |
| const char *name, *linkonce; |
| char *string; |
| |
| name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); |
| name = targetm.strip_name_encoding (name); |
| |
| /* If we're using one_only, then there needs to be a .gnu.linkonce |
| prefix to the section name. */ |
| linkonce = one_only ? ".gnu.linkonce" : ""; |
| |
| string = ACONCAT ((linkonce, prefix, ".", name, NULL)); |
| |
| set_decl_section_name (decl, string); |
| return; |
| } |
| default_unique_section (decl, reloc); |
| } |
| |
| static unsigned int |
| c6x_section_type_flags (tree decl, const char *name, int reloc) |
| { |
| unsigned int flags = 0; |
| |
| if (strcmp (name, ".far") == 0 |
| || strncmp (name, ".far.", 5) == 0) |
| flags |= SECTION_BSS; |
| |
| flags |= default_section_type_flags (decl, name, reloc); |
| |
| return flags; |
| } |
| |
| /* Checks whether the given CALL_EXPR would use a caller saved |
| register. This is used to decide whether sibling call optimization |
| could be performed on the respective function call. */ |
| |
| static bool |
| c6x_call_saved_register_used (tree call_expr) |
| { |
| CUMULATIVE_ARGS cum_v; |
| cumulative_args_t cum; |
| HARD_REG_SET call_saved_regset; |
| tree parameter; |
| machine_mode mode; |
| tree type; |
| rtx parm_rtx; |
| int i; |
| |
| INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0); |
| cum = pack_cumulative_args (&cum_v); |
| |
| COMPL_HARD_REG_SET (call_saved_regset, call_used_reg_set); |
| for (i = 0; i < call_expr_nargs (call_expr); i++) |
| { |
| parameter = CALL_EXPR_ARG (call_expr, i); |
| gcc_assert (parameter); |
| |
| /* For an undeclared variable passed as parameter we will get |
| an ERROR_MARK node here. */ |
| if (TREE_CODE (parameter) == ERROR_MARK) |
| return true; |
| |
| type = TREE_TYPE (parameter); |
| gcc_assert (type); |
| |
| mode = TYPE_MODE (type); |
| gcc_assert (mode); |
| |
| if (pass_by_reference (&cum_v, mode, type, true)) |
| { |
| mode = Pmode; |
| type = build_pointer_type (type); |
| } |
| |
| parm_rtx = c6x_function_arg (cum, mode, type, 0); |
| |
| c6x_function_arg_advance (cum, mode, type, 0); |
| |
| if (!parm_rtx) |
| continue; |
| |
| if (REG_P (parm_rtx) |
| && overlaps_hard_reg_set_p (call_saved_regset, GET_MODE (parm_rtx), |
| REGNO (parm_rtx))) |
| return true; |
| if (GET_CODE (parm_rtx) == PARALLEL) |
| { |
| int n = XVECLEN (parm_rtx, 0); |
| while (n-- > 0) |
| { |
| rtx x = XEXP (XVECEXP (parm_rtx, 0, n), 0); |
| if (REG_P (x) |
| && overlaps_hard_reg_set_p (call_saved_regset, |
| GET_MODE (x), REGNO (x))) |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
| |
| /* Decide whether we can make a sibling call to a function. DECL is the |
| declaration of the function being targeted by the call and EXP is the |
| CALL_EXPR representing the call. */ |
| |
| static bool |
| c6x_function_ok_for_sibcall (tree decl, tree exp) |
| { |
| /* Registers A10, A12, B10 and B12 are available as arguments |
| register but unfortunately caller saved. This makes functions |
| needing these registers for arguments not suitable for |
| sibcalls. */ |
| if (c6x_call_saved_register_used (exp)) |
| return false; |
| |
| if (!flag_pic) |
| return true; |
| |
| if (TARGET_DSBT) |
| { |
| /* When compiling for DSBT, the calling function must be local, |
| so that when we reload B14 in the sibcall epilogue, it will |
| not change its value. */ |
| struct cgraph_local_info *this_func; |
| |
| if (!decl) |
| /* Not enough information. */ |
| return false; |
| |
| this_func = cgraph_node::local_info (current_function_decl); |
| return this_func->local; |
| } |
| |
| return true; |
| } |
| |
| /* Return true if DECL is known to be linked into section SECTION. */ |
| |
| static bool |
| c6x_function_in_section_p (tree decl, section *section) |
| { |
| /* We can only be certain about functions defined in the same |
| compilation unit. */ |
| if (!TREE_STATIC (decl)) |
| return false; |
| |
| /* Make sure that SYMBOL always binds to the definition in this |
| compilation unit. */ |
| if (!targetm.binds_local_p (decl)) |
| return false; |
| |
| /* If DECL_SECTION_NAME is set, assume it is trustworthy. */ |
| if (!DECL_SECTION_NAME (decl)) |
| { |
| /* Make sure that we will not create a unique section for DECL. */ |
| if (flag_function_sections || DECL_COMDAT_GROUP (decl)) |
| return false; |
| } |
| |
| return function_section (decl) == section; |
| } |
| |
| /* Return true if a call to OP, which is a SYMBOL_REF, must be expanded |
| as a long call. */ |
| bool |
| c6x_long_call_p (rtx op) |
| { |
| tree decl; |
| |
| if (!TARGET_LONG_CALLS) |
| return false; |
| |
| decl = SYMBOL_REF_DECL (op); |
| |
| /* Try to determine whether the symbol is in the same section as the current |
| function. Be conservative, and only cater for cases in which the |
| whole of the current function is placed in the same section. */ |
| if (decl != NULL_TREE |
| && !flag_reorder_blocks_and_partition |
| && TREE_CODE (decl) == FUNCTION_DECL |
| && c6x_function_in_section_p (decl, current_function_section ())) |
| return false; |
| |
| return true; |
| } |
| |
| /* Emit the sequence for a call. */ |
| void |
| c6x_expand_call (rtx retval, rtx address, bool sibcall) |
| { |
| rtx callee = XEXP (address, 0); |
| rtx call_insn; |
| |
| if (!c6x_call_operand (callee, Pmode)) |
| { |
| callee = force_reg (Pmode, callee); |
| address = change_address (address, Pmode, callee); |
| } |
| call_insn = gen_rtx_CALL (VOIDmode, address, const0_rtx); |
| if (sibcall) |
| { |
| call_insn = emit_call_insn (call_insn); |
| use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), |
| gen_rtx_REG (Pmode, REG_B3)); |
| } |
| else |
| { |
| if (retval == NULL_RTX) |
| call_insn = emit_call_insn (call_insn); |
| else |
| call_insn = emit_call_insn (gen_rtx_SET (GET_MODE (retval), retval, |
| call_insn)); |
| } |
| if (flag_pic) |
| use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx); |
| } |
| |
| /* Legitimize PIC addresses. If the address is already position-independent, |
| we return ORIG. Newly generated position-independent addresses go into a |
| reg. This is REG if nonzero, otherwise we allocate register(s) as |
| necessary. PICREG is the register holding the pointer to the PIC offset |
| table. */ |
| |
| static rtx |
| legitimize_pic_address (rtx orig, rtx reg, rtx picreg) |
| { |
| rtx addr = orig; |
| rtx new_rtx = orig; |
| |
| if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF) |
| { |
| int unspec = UNSPEC_LOAD_GOT; |
| rtx tmp; |
| |
| if (reg == 0) |
| { |
| gcc_assert (can_create_pseudo_p ()); |
| reg = gen_reg_rtx (Pmode); |
| } |
| if (flag_pic == 2) |
| { |
| if (can_create_pseudo_p ()) |
| tmp = gen_reg_rtx (Pmode); |
| else |
| tmp = reg; |
| emit_insn (gen_movsi_gotoff_high (tmp, addr)); |
| emit_insn (gen_movsi_gotoff_lo_sum (tmp, tmp, addr)); |
| emit_insn (gen_load_got_gotoff (reg, picreg, tmp)); |
| } |
| else |
| { |
| tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec); |
| new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp)); |
| |
| emit_move_insn (reg, new_rtx); |
| } |
| if (picreg == pic_offset_table_rtx) |
| crtl->uses_pic_offset_table = 1; |
| return reg; |
| } |
| |
| else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS) |
| { |
| rtx base; |
| |
| if (GET_CODE (addr) == CONST) |
| { |
| addr = XEXP (addr, 0); |
| gcc_assert (GET_CODE (addr) == PLUS); |
| } |
| |
| if (XEXP (addr, 0) == picreg) |
| return orig; |
| |
| if (reg == 0) |
| { |
| gcc_assert (can_create_pseudo_p ()); |
| reg = gen_reg_rtx (Pmode); |
| } |
| |
| base = legitimize_pic_address (XEXP (addr, 0), reg, picreg); |
| addr = legitimize_pic_address (XEXP (addr, 1), |
| base == reg ? NULL_RTX : reg, |
| picreg); |
| |
| if (GET_CODE (addr) == CONST_INT) |
| { |
| gcc_assert (! reload_in_progress && ! reload_completed); |
| addr = force_reg (Pmode, addr); |
| } |
| |
| if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1))) |
| { |
| base = gen_rtx_PLUS (Pmode, base, XEXP (addr, 0)); |
| addr = XEXP (addr, 1); |
| } |
| |
| return gen_rtx_PLUS (Pmode, base, addr); |
| } |
| |
| return new_rtx; |
| } |
| |
| /* Expand a move operation in mode MODE. The operands are in OPERANDS. |
| Returns true if no further code must be generated, false if the caller |
| should generate an insn to move OPERANDS[1] to OPERANDS[0]. */ |
| |
| bool |
| expand_move (rtx *operands, machine_mode mode) |
| { |
| rtx dest = operands[0]; |
| rtx op = operands[1]; |
| |
| if ((reload_in_progress | reload_completed) == 0 |
| && GET_CODE (dest) == MEM && GET_CODE (op) != REG) |
| operands[1] = force_reg (mode, op); |
| else if (mode == SImode && symbolic_operand (op, SImode)) |
| { |
| if (flag_pic) |
| { |
| if (sdata_symbolic_operand (op, SImode)) |
| { |
| emit_insn (gen_load_sdata_pic (dest, pic_offset_table_rtx, op)); |
| crtl->uses_pic_offset_table = 1; |
| return true; |
| } |
| else |
| { |
| rtx temp = (reload_completed || reload_in_progress |
| ? dest : gen_reg_rtx (Pmode)); |
| |
| operands[1] = legitimize_pic_address (op, temp, |
| pic_offset_table_rtx); |
| } |
| } |
| else if (reload_completed |
| && !sdata_symbolic_operand (op, SImode)) |
| { |
| emit_insn (gen_movsi_high (dest, op)); |
| emit_insn (gen_movsi_lo_sum (dest, dest, op)); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* This function is called when we're about to expand an integer compare |
| operation which performs COMPARISON. It examines the second operand, |
| and if it is an integer constant that cannot be used directly on the |
| current machine in a comparison insn, it returns true. */ |
| bool |
| c6x_force_op_for_comparison_p (enum rtx_code code, rtx op) |
| { |
| if (!CONST_INT_P (op) || satisfies_constraint_Iu4 (op)) |
| return false; |
| |
| if ((code == EQ || code == LT || code == GT) |
| && !satisfies_constraint_Is5 (op)) |
| return true; |
| if ((code == GTU || code == LTU) |
| && (!TARGET_INSNS_64 || !satisfies_constraint_Iu5 (op))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Emit comparison instruction if necessary, returning the expression |
| that holds the compare result in the proper mode. Return the comparison |
| that should be used in the jump insn. */ |
| |
| rtx |
| c6x_expand_compare (rtx comparison, machine_mode mode) |
| { |
| enum rtx_code code = GET_CODE (comparison); |
| rtx op0 = XEXP (comparison, 0); |
| rtx op1 = XEXP (comparison, 1); |
| rtx cmp; |
| enum rtx_code jump_code = code; |
| machine_mode op_mode = GET_MODE (op0); |
| |
| if (op_mode == DImode && (code == NE || code == EQ) && op1 == const0_rtx) |
| { |
| rtx t = gen_reg_rtx (SImode); |
| emit_insn (gen_iorsi3 (t, gen_lowpart (SImode, op0), |
| gen_highpart (SImode, op0))); |
| op_mode = SImode; |
| cmp = t; |
| } |
| else if (op_mode == DImode) |
| { |
| rtx lo[2], high[2]; |
| rtx cmp1, cmp2; |
| |
| if (code == NE || code == GEU || code == LEU || code == GE || code == LE) |
| { |
| code = reverse_condition (code); |
| jump_code = EQ; |
| } |
| else |
| jump_code = NE; |
| |
| split_di (&op0, 1, lo, high); |
| split_di (&op1, 1, lo + 1, high + 1); |
| |
| if (c6x_force_op_for_comparison_p (code, high[1]) |
| || c6x_force_op_for_comparison_p (EQ, high[1])) |
| high[1] = force_reg (SImode, high[1]); |
| |
| cmp1 = gen_reg_rtx (SImode); |
| cmp2 = gen_reg_rtx (SImode); |
| emit_insn (gen_rtx_SET (VOIDmode, cmp1, |
| gen_rtx_fmt_ee (code, SImode, high[0], high[1]))); |
| if (code == EQ) |
| { |
| if (c6x_force_op_for_comparison_p (code, lo[1])) |
| lo[1] = force_reg (SImode, lo[1]); |
| emit_insn (gen_rtx_SET (VOIDmode, cmp2, |
| gen_rtx_fmt_ee (code, SImode, lo[0], lo[1]))); |
| emit_insn (gen_andsi3 (cmp1, cmp1, cmp2)); |
| } |
| else |
| { |
| emit_insn (gen_rtx_SET (VOIDmode, cmp2, |
| gen_rtx_EQ (SImode, high[0], high[1]))); |
| if (code == GT) |
| code = GTU; |
| else if (code == LT) |
| code = LTU; |
| if (c6x_force_op_for_comparison_p (code, lo[1])) |
| lo[1] = force_reg (SImode, lo[1]); |
| emit_insn (gen_cmpsi_and (cmp2, gen_rtx_fmt_ee (code, SImode, |
| lo[0], lo[1]), |
| lo[0], lo[1], cmp2)); |
| emit_insn (gen_iorsi3 (cmp1, cmp1, cmp2)); |
| } |
| cmp = cmp1; |
| } |
| else if (TARGET_FP && !flag_finite_math_only |
| && (op_mode == DFmode || op_mode == SFmode) |
| && code != EQ && code != NE && code != LT && code != GT |
| && code != UNLE && code != UNGE) |
| { |
| enum rtx_code code1, code2, code3; |
| rtx (*fn) (rtx, rtx, rtx, rtx, rtx); |
| |
| jump_code = NE; |
| code3 = UNKNOWN; |
| switch (code) |
| { |
| case UNLT: |
| case UNGT: |
| jump_code = EQ; |
| /* fall through */ |
| case LE: |
| case GE: |
| code1 = code == LE || code == UNGT ? LT : GT; |
| code2 = EQ; |
| break; |
| |
| case UNORDERED: |
| jump_code = EQ; |
| /* fall through */ |
| case ORDERED: |
| code3 = EQ; |
| /* fall through */ |
| case LTGT: |
| code1 = LT; |
| code2 = GT; |
| break; |
| |
| case UNEQ: |
| code1 = LT; |
| code2 = GT; |
| jump_code = EQ; |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| cmp = gen_reg_rtx (SImode); |
| emit_insn (gen_rtx_SET (VOIDmode, cmp, |
| gen_rtx_fmt_ee (code1, SImode, op0, op1))); |
| fn = op_mode == DFmode ? gen_cmpdf_ior : gen_cmpsf_ior; |
| emit_insn (fn (cmp, gen_rtx_fmt_ee (code2, SImode, op0, op1), |
| op0, op1, cmp)); |
| if (code3 != UNKNOWN) |
| emit_insn (fn (cmp, gen_rtx_fmt_ee (code3, SImode, op0, op1), |
| op0, op1, cmp)); |
| } |
| else if (op_mode == SImode && (code == NE || code == EQ) && op1 == const0_rtx) |
| cmp = op0; |
| else |
| { |
| bool is_fp_libfunc; |
| is_fp_libfunc = !TARGET_FP && (op_mode == DFmode || op_mode == SFmode); |
| |
| if ((code == NE || code == GEU || code == LEU || code == GE || code == LE) |
| && !is_fp_libfunc) |
| { |
| code = reverse_condition (code); |
| jump_code = EQ; |
| } |
| else if (code == UNGE) |
| { |
| code = LT; |
| jump_code = EQ; |
| } |
| else if (code == UNLE) |
| { |
| code = GT; |
| jump_code = EQ; |
| } |
| else |
| jump_code = NE; |
| |
| if (is_fp_libfunc) |
| { |
| rtx_insn *insns; |
| rtx libfunc; |
| switch (code) |
| { |
| case EQ: |
| libfunc = op_mode == DFmode ? eqdf_libfunc : eqsf_libfunc; |
| break; |
| case NE: |
| libfunc = op_mode == DFmode ? nedf_libfunc : nesf_libfunc; |
| break; |
| case GT: |
| libfunc = op_mode == DFmode ? gtdf_libfunc : gtsf_libfunc; |
| break; |
| case GE: |
| libfunc = op_mode == DFmode ? gedf_libfunc : gesf_libfunc; |
| break; |
| case LT: |
| libfunc = op_mode == DFmode ? ltdf_libfunc : ltsf_libfunc; |
| break; |
| case LE: |
| libfunc = op_mode == DFmode ? ledf_libfunc : lesf_libfunc; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| start_sequence (); |
| |
| cmp = emit_library_call_value (libfunc, 0, LCT_CONST, SImode, 2, |
| op0, op_mode, op1, op_mode); |
| insns = get_insns (); |
| end_sequence (); |
| |
| emit_libcall_block (insns, cmp, cmp, |
| gen_rtx_fmt_ee (code, SImode, op0, op1)); |
| } |
| else |
| { |
| cmp = gen_reg_rtx (SImode); |
| if (c6x_force_op_for_comparison_p (code, op1)) |
| op1 = force_reg (SImode, op1); |
| emit_insn (gen_rtx_SET (VOIDmode, cmp, |
| gen_rtx_fmt_ee (code, SImode, op0, op1))); |
| } |
| } |
| |
| return gen_rtx_fmt_ee (jump_code, mode, cmp, const0_rtx); |
| } |
| |
| /* Return one word of double-word value OP. HIGH_P is true to select the |
| high part, false to select the low part. When encountering auto-increment |
| addressing, we make the assumption that the low part is going to be accessed |
| first. */ |
| |
| rtx |
| c6x_subword (rtx op, bool high_p) |
| { |
| unsigned int byte; |
| machine_mode mode; |
| |
| mode = GET_MODE (op); |
| if (mode == VOIDmode) |
| mode = DImode; |
| |
| if (TARGET_BIG_ENDIAN ? !high_p : high_p) |
| byte = UNITS_PER_WORD; |
| else |
| byte = 0; |
| |
| if (MEM_P (op)) |
| { |
| rtx addr = XEXP (op, 0); |
| if (GET_CODE (addr) == PLUS || REG_P (addr)) |
| return adjust_address (op, word_mode, byte); |
| /* FIXME: should really support autoincrement addressing for |
| multi-word modes. */ |
| gcc_unreachable (); |
| } |
| |
| return simplify_gen_subreg (word_mode, op, mode, byte); |
| } |
| |
| /* Split one or more DImode RTL references into pairs of SImode |
| references. The RTL can be REG, offsettable MEM, integer constant, or |
| CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to |
| split and "num" is its length. lo_half and hi_half are output arrays |
| that parallel "operands". */ |
| |
| void |
| split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) |
| { |
| while (num--) |
| { |
| rtx op = operands[num]; |
| |
| lo_half[num] = c6x_subword (op, false); |
| hi_half[num] = c6x_subword (op, true); |
| } |
| } |
| |
| /* Return true if VAL is a mask valid for a clr instruction. */ |
| bool |
| c6x_valid_mask_p (HOST_WIDE_INT val) |
| { |
| int i; |
| for (i = 0; i < 32; i++) |
| if (!(val & ((unsigned HOST_WIDE_INT)1 << i))) |
| break; |
| for (; i < 32; i++) |
| if (val & ((unsigned HOST_WIDE_INT)1 << i)) |
| break; |
| for (; i < 32; i++) |
| if (!(val & ((unsigned HOST_WIDE_INT)1 << i))) |
| return false; |
| return true; |
| } |
| |
| /* Expand a block move for a movmemM pattern. */ |
| |
| bool |
| c6x_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, |
| rtx expected_align_exp ATTRIBUTE_UNUSED, |
| rtx expected_size_exp ATTRIBUTE_UNUSED) |
| { |
| unsigned HOST_WIDE_INT align = 1; |
| unsigned HOST_WIDE_INT src_mem_align, dst_mem_align, min_mem_align; |
| unsigned HOST_WIDE_INT count = 0, offset = 0; |
| unsigned int biggest_move = TARGET_STDW ? 8 : 4; |
| |
| if (CONST_INT_P (align_exp)) |
| align = INTVAL (align_exp); |
| |
| src_mem_align = MEM_ALIGN (src) / BITS_PER_UNIT; |
| dst_mem_align = MEM_ALIGN (dst) / BITS_PER_UNIT; |
| min_mem_align = MIN (src_mem_align, dst_mem_align); |
| |
| if (min_mem_align > align) |
| align = min_mem_align / BITS_PER_UNIT; |
| if (src_mem_align < align) |
| src_mem_align = align; |
| if (dst_mem_align < align) |
| dst_mem_align = align; |
| |
| if (CONST_INT_P (count_exp)) |
| count = INTVAL (count_exp); |
| else |
| return false; |
| |
| /* Make sure we don't need to care about overflow later on. */ |
| if (count > ((unsigned HOST_WIDE_INT) 1 << 30)) |
| return false; |
| |
| if (count >= 28 && (count & 3) == 0 && align >= 4) |
| { |
| tree dst_expr = MEM_EXPR (dst); |
| tree src_expr = MEM_EXPR (src); |
| rtx fn = TARGET_INSNS_64PLUS ? strasgi64p_libfunc : strasgi_libfunc; |
| rtx srcreg = force_reg (Pmode, XEXP (src, 0)); |
| rtx dstreg = force_reg (Pmode, XEXP (dst, 0)); |
| |
| if (src_expr) |
| mark_addressable (src_expr); |
| if (dst_expr) |
| mark_addressable (dst_expr); |
| emit_library_call (fn, LCT_NORMAL, VOIDmode, 3, |
| dstreg, Pmode, srcreg, Pmode, count_exp, SImode); |
| return true; |
| } |
| |
| if (biggest_move > align && !TARGET_INSNS_64) |
| biggest_move = align; |
| |
| if (count / biggest_move > 7) |
| return false; |
| |
| while (count > 0) |
| { |
| rtx reg, reg_lowpart; |
| machine_mode srcmode, dstmode; |
| unsigned HOST_WIDE_INT src_size, dst_size, src_left; |
| int shift; |
| rtx srcmem, dstmem; |
| |
| while (biggest_move > count) |
| biggest_move /= 2; |
| |
| src_size = dst_size = biggest_move; |
| if (src_size > src_mem_align && src_size == 2) |
| src_size = 1; |
| if (dst_size > dst_mem_align && dst_size == 2) |
| dst_size = 1; |
| |
| if (dst_size > src_size) |
| dst_size = src_size; |
| |
| srcmode = mode_for_size (src_size * BITS_PER_UNIT, MODE_INT, 0); |
| dstmode = mode_for_size (dst_size * BITS_PER_UNIT, MODE_INT, 0); |
| if (src_size >= 4) |
| reg_lowpart = reg = gen_reg_rtx (srcmode); |
| else |
| { |
| reg = gen_reg_rtx (SImode); |
| reg_lowpart = gen_lowpart (srcmode, reg); |
| } |
| |
| srcmem = adjust_address (copy_rtx (src), srcmode, offset); |
| |
| if (src_size > src_mem_align) |
| { |
| enum insn_code icode = (srcmode == SImode ? CODE_FOR_movmisalignsi |
| : CODE_FOR_movmisaligndi); |
| emit_insn (GEN_FCN (icode) (reg_lowpart, srcmem)); |
| } |
| else |
| emit_move_insn (reg_lowpart, srcmem); |
| |
| src_left = src_size; |
| shift = TARGET_BIG_ENDIAN ? (src_size - dst_size) * BITS_PER_UNIT : 0; |
| while (src_left > 0) |
| { |
| rtx dstreg = reg_lowpart; |
| |
| if (src_size > dst_size) |
| { |
| rtx srcword = reg; |
| int shift_amount = shift & (BITS_PER_WORD - 1); |
| if (src_size > 4) |
| srcword = operand_subword_force (srcword, src_left >= 4 ? 0 : 4, |
| SImode); |
| if (shift_amount > 0) |
| { |
| dstreg = gen_reg_rtx (SImode); |
| emit_insn (gen_lshrsi3 (dstreg, srcword, |
| GEN_INT (shift_amount))); |
| } |
| else |
| dstreg = srcword; |
| dstreg = gen_lowpart (dstmode, dstreg); |
| } |
| |
| dstmem = adjust_address (copy_rtx (dst), dstmode, offset); |
| if (dst_size > dst_mem_align) |
| { |
| enum insn_code icode = (dstmode == SImode ? CODE_FOR_movmisalignsi |
| : CODE_FOR_movmisaligndi); |
| emit_insn (GEN_FCN (icode) (dstmem, dstreg)); |
| } |
| else |
| emit_move_insn (dstmem, dstreg); |
| |
| if (TARGET_BIG_ENDIAN) |
| shift -= dst_size * BITS_PER_UNIT; |
| else |
| shift += dst_size * BITS_PER_UNIT; |
| offset += dst_size; |
| src_left -= dst_size; |
| } |
| count -= src_size; |
| } |
| return true; |
| } |
| |
| /* Subroutine of print_address_operand, print a single address offset OFF for |
| a memory access of mode MEM_MODE, choosing between normal form and scaled |
| form depending on the type of the insn. Misaligned memory references must |
| use the scaled form. */ |
| |
| static void |
| print_address_offset (FILE *file, rtx off, machine_mode mem_mode) |
| { |
| rtx pat; |
| |
| if (c6x_current_insn != NULL_RTX) |
| { |
| pat = PATTERN (c6x_current_insn); |
| if (GET_CODE (pat) == COND_EXEC) |
| pat = COND_EXEC_CODE (pat); |
| if (GET_CODE (pat) == PARALLEL) |
| pat = XVECEXP (pat, 0, 0); |
| |
| if (GET_CODE (pat) == SET |
| && GET_CODE (SET_SRC (pat)) == UNSPEC |
| && XINT (SET_SRC (pat), 1) == UNSPEC_MISALIGNED_ACCESS) |
| { |
| gcc_assert (CONST_INT_P (off) |
| && (INTVAL (off) & (GET_MODE_SIZE (mem_mode) - 1)) == 0); |
| fprintf (file, "[" HOST_WIDE_INT_PRINT_DEC "]", |
| INTVAL (off) / GET_MODE_SIZE (mem_mode)); |
| return; |
| } |
| } |
| fputs ("(", file); |
| output_address (off); |
| fputs (")", file); |
| } |
| |
| static bool |
| c6x_print_operand_punct_valid_p (unsigned char c) |
| { |
| return c == '$' || c == '.' || c == '|'; |
| } |
| |
| static void c6x_print_operand (FILE *, rtx, int); |
| |
| /* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */ |
| |
| static void |
| c6x_print_address_operand (FILE *file, rtx x, machine_mode mem_mode) |
| { |
| rtx off; |
| switch (GET_CODE (x)) |
| { |
| case PRE_MODIFY: |
| case POST_MODIFY: |
| if (GET_CODE (x) == POST_MODIFY) |
| output_address (XEXP (x, 0)); |
| off = XEXP (XEXP (x, 1), 1); |
| if (XEXP (x, 0) == stack_pointer_rtx) |
| { |
| if (GET_CODE (x) == PRE_MODIFY) |
| gcc_assert (INTVAL (off) > 0); |
| else |
| gcc_assert (INTVAL (off) < 0); |
| } |
| if (CONST_INT_P (off) && INTVAL (off) < 0) |
| { |
| fprintf (file, "--"); |
| off = GEN_INT (-INTVAL (off)); |
| } |
| else |
| fprintf (file, "++"); |
| if (GET_CODE (x) == PRE_MODIFY) |
| output_address (XEXP (x, 0)); |
| print_address_offset (file, off, mem_mode); |
| break; |
| |
| case PLUS: |
| off = XEXP (x, 1); |
| if (CONST_INT_P (off) && INTVAL (off) < 0) |
| { |
| fprintf (file, "-"); |
| off = GEN_INT (-INTVAL (off)); |
| } |
| else |
| fprintf (file, "+"); |
| output_address (XEXP (x, 0)); |
| print_address_offset (file, off, mem_mode); |
| break; |
| |
| case PRE_DEC: |
| gcc_assert (XEXP (x, 0) != stack_pointer_rtx); |
| fprintf (file, "--"); |
| output_address (XEXP (x, 0)); |
| fprintf (file, "[1]"); |
| break; |
| case PRE_INC: |
| fprintf (file, "++"); |
| output_address (XEXP (x, 0)); |
| fprintf (file, "[1]"); |
| break; |
| case POST_INC: |
| gcc_assert (XEXP (x, 0) != stack_pointer_rtx); |
| output_address (XEXP (x, 0)); |
| fprintf (file, "++[1]"); |
| break; |
| case POST_DEC: |
| output_address (XEXP (x, 0)); |
| fprintf (file, "--[1]"); |
| break; |
| |
| case SYMBOL_REF: |
| case CONST: |
| case LABEL_REF: |
| gcc_assert (sdata_symbolic_operand (x, Pmode)); |
| fprintf (file, "+B14("); |
| output_addr_const (file, x); |
| fprintf (file, ")"); |
| break; |
| |
| case UNSPEC: |
| switch (XINT (x, 1)) |
| { |
| case UNSPEC_LOAD_GOT: |
| fputs ("$GOT(", file); |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| fputs (")", file); |
| break; |
| case UNSPEC_LOAD_SDATA: |
| output_addr_const (file, XVECEXP (x, 0, 0)); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| break; |
| |
| default: |
| gcc_assert (GET_CODE (x) != MEM); |
| c6x_print_operand (file, x, 0); |
| break; |
| } |
| } |
| |
| /* Return a single character, which is either 'l', 's', 'd' or 'm', which |
| specifies the functional unit used by INSN. */ |
| |
| char |
| c6x_get_unit_specifier (rtx_insn *insn) |
| { |
| enum attr_units units; |
| |
| if (insn_info.exists ()) |
| { |
| int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation; |
| return c6x_unit_names[unit][0]; |
| } |
| |
| units = get_attr_units (insn); |
| switch (units) |
| { |
| case UNITS_D: |
| case UNITS_DL: |
| case UNITS_DS: |
| case UNITS_DLS: |
| case UNITS_D_ADDR: |
| return 'd'; |
| break; |
| case UNITS_L: |
| case UNITS_LS: |
| return 'l'; |
| break; |
| case UNITS_S: |
| return 's'; |
| break; |
| case UNITS_M: |
| return 'm'; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Prints the unit specifier field. */ |
| static void |
| c6x_print_unit_specifier_field (FILE *file, rtx_insn *insn) |
| { |
| enum attr_units units = get_attr_units (insn); |
| enum attr_cross cross = get_attr_cross (insn); |
| enum attr_dest_regfile rf = get_attr_dest_regfile (insn); |
| int half; |
| char unitspec; |
| |
| if (units == UNITS_D_ADDR) |
| { |
| enum attr_addr_regfile arf = get_attr_addr_regfile (insn); |
| int t_half; |
| gcc_assert (arf != ADDR_REGFILE_UNKNOWN); |
| half = arf == ADDR_REGFILE_A ? 1 : 2; |
| t_half = rf == DEST_REGFILE_A ? 1 : 2; |
| fprintf (file, ".d%dt%d", half, t_half); |
| return; |
| } |
| |
| if (insn_info.exists ()) |
| { |
| int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation; |
| fputs (".", file); |
| fputs (c6x_unit_names[unit], file); |
| if (cross == CROSS_Y) |
| fputs ("x", file); |
| return; |
| } |
| |
| gcc_assert (rf != DEST_REGFILE_UNKNOWN); |
| unitspec = c6x_get_unit_specifier (insn); |
| half = rf == DEST_REGFILE_A ? 1 : 2; |
| fprintf (file, ".%c%d%s", unitspec, half, cross == CROSS_Y ? "x" : ""); |
| } |
| |
| /* Output assembly language output for the address ADDR to FILE. */ |
| static void |
| c6x_print_operand_address (FILE *file, rtx addr) |
| { |
| c6x_print_address_operand (file, addr, VOIDmode); |
| } |
| |
| /* Print an operand, X, to FILE, with an optional modifier in CODE. |
| |
| Meaning of CODE: |
| $ -- print the unit specifier field for the instruction. |
| . -- print the predicate for the instruction or an emptry string for an |
| unconditional one. |
| | -- print "||" if the insn should be issued in parallel with the previous |
| one. |
| |
| C -- print an opcode suffix for a reversed condition |
| d -- H, W or D as a suffix for ADDA, based on the factor given by the |
| operand |
| D -- print either B, H, W or D as a suffix for ADDA, based on the size of |
| the operand |
| J -- print a predicate |
| j -- like J, but use reverse predicate |
| k -- treat a CONST_INT as a register number and print it as a register |
| k -- like k, but print out a doubleword register |
| n -- print an integer operand, negated |
| p -- print the low part of a DImode register |
| P -- print the high part of a DImode register |
| r -- print the absolute value of an integer operand, shifted right by 1 |
| R -- print the absolute value of an integer operand, shifted right by 2 |
| f -- the first clear bit in an integer operand assumed to be a mask for |
| a clr instruction |
| F -- the last clear bit in such a mask |
| s -- the first set bit in an integer operand assumed to be a mask for |
| a set instruction |
| S -- the last set bit in such a mask |
| U -- print either 1 or 2, depending on the side of the machine used by |
| the operand */ |
| |
| static void |
| c6x_print_operand (FILE *file, rtx x, int code) |
| { |
| int i; |
| HOST_WIDE_INT v; |
| tree t; |
| machine_mode mode; |
| |
| if (code == '|') |
| { |
| if (GET_MODE (c6x_current_insn) != TImode) |
| fputs ("||", file); |
| return; |
| } |
| if (code == '$') |
| { |
| c6x_print_unit_specifier_field (file, c6x_current_insn); |
| return; |
| } |
| |
| if (code == '.') |
| { |
| x = current_insn_predicate; |
| if (x) |
| { |
| unsigned int regno = REGNO (XEXP (x, 0)); |
| fputs ("[", file); |
| if (GET_CODE (x) == EQ) |
| fputs ("!", file); |
| fputs (reg_names [regno], file); |
| fputs ("]", file); |
| } |
| return; |
| } |
| |
| mode = GET_MODE (x); |
| |
| switch (code) |
| { |
| case 'C': |
| case 'c': |
| { |
| enum rtx_code c = GET_CODE (x); |
| if (code == 'C') |
| c = swap_condition (c); |
| fputs (GET_RTX_NAME (c), file); |
| } |
| return; |
| |
| case 'J': |
| case 'j': |
| { |
| unsigned int regno = REGNO (XEXP (x, 0)); |
| if ((GET_CODE (x) == EQ) == (code == 'J')) |
| fputs ("!", file); |
| fputs (reg_names [regno], file); |
| } |
| return; |
| |
| case 'k': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| fprintf (file, "%s", reg_names[v]); |
| return; |
| case 'K': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| gcc_assert ((v & 1) == 0); |
| fprintf (file, "%s:%s", reg_names[v + 1], reg_names[v]); |
| return; |
| |
| case 's': |
| case 'S': |
| case 'f': |
| case 'F': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| for (i = 0; i < 32; i++) |
| { |
| HOST_WIDE_INT tst = v & 1; |
| if (((code == 'f' || code == 'F') && !tst) |
| || ((code == 's' || code == 'S') && tst)) |
| break; |
| v >>= 1; |
| } |
| if (code == 'f' || code == 's') |
| { |
| fprintf (file, "%d", i); |
| return; |
| } |
| for (;i < 32; i++) |
| { |
| HOST_WIDE_INT tst = v & 1; |
| if ((code == 'F' && tst) || (code == 'S' && !tst)) |
| break; |
| v >>= 1; |
| } |
| fprintf (file, "%d", i - 1); |
| return; |
| |
| case 'n': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| output_addr_const (file, GEN_INT (-INTVAL (x))); |
| return; |
| |
| case 'r': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| if (v < 0) |
| v = -v; |
| output_addr_const (file, GEN_INT (v >> 1)); |
| return; |
| |
| case 'R': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| if (v < 0) |
| v = -v; |
| output_addr_const (file, GEN_INT (v >> 2)); |
| return; |
| |
| case 'd': |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| v = INTVAL (x); |
| fputs (v == 2 ? "h" : v == 4 ? "w" : "d", file); |
| return; |
| |
| case 'p': |
| case 'P': |
| gcc_assert (GET_CODE (x) == REG); |
| v = REGNO (x); |
| if (code == 'P') |
| v++; |
| fputs (reg_names[v], file); |
| return; |
| |
| case 'D': |
| v = 0; |
| if (GET_CODE (x) == CONST) |
| { |
| x = XEXP (x, 0); |
| gcc_assert (GET_CODE (x) == PLUS); |
| gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT); |
| v = INTVAL (XEXP (x, 1)); |
| x = XEXP (x, 0); |
| |
| } |
| gcc_assert (GET_CODE (x) == SYMBOL_REF); |
| |
| t = SYMBOL_REF_DECL (x); |
| if (DECL_P (t)) |
| v |= DECL_ALIGN_UNIT (t); |
| else |
| v |= TYPE_ALIGN_UNIT (TREE_TYPE (t)); |
| if (v & 1) |
| fputs ("b", file); |
| else if (v & 2) |
| fputs ("h", file); |
| else |
| fputs ("w", file); |
| return; |
| |
| case 'U': |
| if (MEM_P (x)) |
| { |
| x = XEXP (x, 0); |
| if (GET_CODE (x) == PLUS |
| || GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) |
| x = XEXP (x, 0); |
| if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF) |
| { |
| gcc_assert (sdata_symbolic_operand (x, Pmode)); |
| fputs ("2", file); |
| return; |
| } |
| } |
| gcc_assert (REG_P (x)); |
| if (A_REGNO_P (REGNO (x))) |
| fputs ("1", file); |
| if (B_REGNO_P (REGNO (x))) |
| fputs ("2", file); |
| return; |
| |
| default: |
| switch (GET_CODE (x)) |
| { |
| case REG: |
| if (GET_MODE_SIZE (mode) == 8) |
| fprintf (file, "%s:%s", reg_names[REGNO (x) + 1], |
| reg_names[REGNO (x)]); |
| else |
| fprintf (file, "%s", reg_names[REGNO (x)]); |
| break; |
| |
| case MEM: |
| fputc ('*', file); |
| gcc_assert (XEXP (x, 0) != stack_pointer_rtx); |
| c6x_print_address_operand (file, XEXP (x, 0), GET_MODE (x)); |
| break; |
| |
| case SYMBOL_REF: |
| fputc ('(', file); |
| output_addr_const (file, x); |
| fputc (')', file); |
| break; |
| |
| case CONST_INT: |
| output_addr_const (file, x); |
| break; |
| |
| case CONST_DOUBLE: |
| output_operand_lossage ("invalid const_double operand"); |
| break; |
| |
| default: |
| output_addr_const (file, x); |
| } |
| } |
| } |
| |
| /* Return TRUE if OP is a valid memory address with a base register of |
| class C. If SMALL_OFFSET is true, we disallow memory references which would |
| require a long offset with B14/B15. */ |
| |
| bool |
| c6x_mem_operand (rtx op, enum reg_class c, bool small_offset) |
| { |
| machine_mode mode = GET_MODE (op); |
| rtx base = XEXP (op, 0); |
| switch (GET_CODE (base)) |
| { |
| case REG: |
| break; |
| case PLUS: |
| if (small_offset |
| && (XEXP (base, 0) == stack_pointer_rtx |
| || XEXP (base, 0) == pic_offset_table_rtx)) |
| { |
| if (!c6x_legitimate_address_p_1 (mode, base, true, true)) |
| return false; |
| } |
| |
| /* fall through */ |
| case PRE_INC: |
| case PRE_DEC: |
| case PRE_MODIFY: |
| case POST_INC: |
| case POST_DEC: |
| case POST_MODIFY: |
| base = XEXP (base, 0); |
| break; |
| |
| case CONST: |
| case LABEL_REF: |
| case SYMBOL_REF: |
| gcc_assert (sdata_symbolic_operand (base, Pmode)); |
| return !small_offset && c == B_REGS; |
| |
| default: |
| return false; |
| } |
| return TEST_HARD_REG_BIT (reg_class_contents[ (int) (c)], REGNO (base)); |
| } |
| |
| /* Returns true if X is a valid address for use in a memory reference |
| of mode MODE. If STRICT is true, we do not allow pseudo registers |
| in the address. NO_LARGE_OFFSET is true if we are examining an |
| address for use in a load or store misaligned instruction, or |
| recursively examining an operand inside a PRE/POST_MODIFY. */ |
| |
| bool |
| c6x_legitimate_address_p_1 (machine_mode mode, rtx x, bool strict, |
| bool no_large_offset) |
| { |
| int size, size1; |
| HOST_WIDE_INT off; |
| enum rtx_code code = GET_CODE (x); |
| |
| switch (code) |
| { |
| case PRE_MODIFY: |
| case POST_MODIFY: |
| /* We can't split these into word-sized pieces yet. */ |
| if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD) |
| return false; |
| if (GET_CODE (XEXP (x, 1)) != PLUS) |
| return false; |
| if (!c6x_legitimate_address_p_1 (mode, XEXP (x, 1), strict, true)) |
| return false; |
| if (!rtx_equal_p (XEXP (x, 0), XEXP (XEXP (x, 1), 0))) |
| return false; |
| |
| /* fall through */ |
| case PRE_INC: |
| case PRE_DEC: |
| case POST_INC: |
| case POST_DEC: |
| /* We can't split these into word-sized pieces yet. */ |
| if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD) |
| return false; |
| x = XEXP (x, 0); |
| if (!REG_P (x)) |
| return false; |
| |
| /* fall through */ |
| case REG: |
| if (strict) |
| return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x)); |
| else |
| return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x)); |
| |
| case PLUS: |
| if (!REG_P (XEXP (x, 0)) |
| || !c6x_legitimate_address_p_1 (mode, XEXP (x, 0), strict, false)) |
| return false; |
| /* We cannot ensure currently that both registers end up in the |
| same register file. */ |
| if (REG_P (XEXP (x, 1))) |
| return false; |
| |
| if (mode == BLKmode) |
| size = 4; |
| else if (mode == VOIDmode) |
| /* ??? This can happen during ivopts. */ |
| size = 1; |
| else |
| size = GET_MODE_SIZE (mode); |
| |
| if (flag_pic |
| && GET_CODE (XEXP (x, 1)) == UNSPEC |
| && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_SDATA |
| && XEXP (x, 0) == pic_offset_table_rtx |
| && sdata_symbolic_operand (XVECEXP (XEXP (x, 1), 0, 0), SImode)) |
| return !no_large_offset && size <= 4; |
| if (flag_pic == 1 |
| && mode == Pmode |
| && GET_CODE (XEXP (x, 1)) == UNSPEC |
| && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_GOT |
| && XEXP (x, 0) == pic_offset_table_rtx |
| && (GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == SYMBOL_REF |
| || GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == LABEL_REF)) |
| return !no_large_offset; |
| if (GET_CODE (XEXP (x, 1)) != CONST_INT) |
| return false; |
| |
| off = INTVAL (XEXP (x, 1)); |
| |
| /* If the machine does not have doubleword load/stores, we'll use |
| word size accesses. */ |
| size1 = size; |
| if (size == 2 * UNITS_PER_WORD && !TARGET_STDW) |
| size = UNITS_PER_WORD; |
| |
| if (((HOST_WIDE_INT)size1 - 1) & off) |
| return false; |
| off /= size; |
| if (off > -32 && off < (size1 == size ? 32 : 28)) |
| return true; |
| if (no_large_offset || code != PLUS || XEXP (x, 0) != stack_pointer_rtx |
| || size1 > UNITS_PER_WORD) |
| return false; |
| return off >= 0 && off < 32768; |
| |
| case CONST: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| return (!no_large_offset |
| /* With -fpic, we must wrap it in an unspec to show the B14 |
| dependency. */ |
| && !flag_pic |
| && GET_MODE_SIZE (mode) <= UNITS_PER_WORD |
| && sdata_symbolic_operand (x, Pmode)); |
| |
| default: |
| return false; |
| } |
| } |
| |
| static bool |
| c6x_legitimate_address_p (machine_mode mode, rtx x, bool strict) |
| { |
| return c6x_legitimate_address_p_1 (mode, x, strict, false); |
| } |
| |
| static bool |
| c6x_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, |
| rtx x ATTRIBUTE_UNUSED) |
| { |
| return true; |
| } |
| |
| /* Implements TARGET_PREFERRED_RENAME_CLASS. */ |
| static reg_class_t |
| c6x_preferred_rename_class (reg_class_t cl) |
| { |
| if (cl == A_REGS) |
| return NONPREDICATE_A_REGS; |
| if (cl == B_REGS) |
| return NONPREDICATE_B_REGS; |
| if (cl == ALL_REGS || cl == GENERAL_REGS) |
| return NONPREDICATE_REGS; |
| return NO_REGS; |
| } |
| |
| /* Implements FINAL_PRESCAN_INSN. */ |
| void |
| c6x_final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED, |
| int noperands ATTRIBUTE_UNUSED) |
| { |
| c6x_current_insn = insn; |
| } |
| |
| /* A structure to describe the stack layout of a function. The layout is |
| as follows: |
| |
| [saved frame pointer (or possibly padding0)] |
| --> incoming stack pointer, new hard frame pointer |
| [saved call-used regs] |
| [optional padding1] |
| --> soft frame pointer |
| [frame] |
| [outgoing arguments] |
| [optional padding2] |
| |
| The structure members are laid out in this order. */ |
| |
| struct c6x_frame |
| { |
| int padding0; |
| /* Number of registers to save. */ |
| int nregs; |
| int padding1; |
| HOST_WIDE_INT frame; |
| int outgoing_arguments_size; |
| int padding2; |
| |
| HOST_WIDE_INT to_allocate; |
| /* The offsets relative to the incoming stack pointer (which |
| becomes HARD_FRAME_POINTER). */ |
| HOST_WIDE_INT frame_pointer_offset; |
| HOST_WIDE_INT b3_offset; |
| |
| /* True if we should call push_rts/pop_rts to save and restore |
| registers. */ |
| bool push_rts; |
| }; |
| |
| /* Return true if we need to save and modify the PIC register in the |
| prologue. */ |
| |
| static bool |
| must_reload_pic_reg_p (void) |
| { |
| struct cgraph_local_info *i = NULL; |
| |
| if (!TARGET_DSBT) |
| return false; |
| |
| i = cgraph_node::local_info (current_function_decl); |
| |
| if ((crtl->uses_pic_offset_table || !crtl->is_leaf) && !i->local) |
| return true; |
| return false; |
| } |
| |
| /* Return 1 if we need to save REGNO. */ |
| static int |
| c6x_save_reg (unsigned int regno) |
| { |
| return ((df_regs_ever_live_p (regno) |
| && !call_used_regs[regno] |
| && !fixed_regs[regno]) |
| || (regno == RETURN_ADDR_REGNO |
| && (df_regs_ever_live_p (regno) |
| || !crtl->is_leaf)) |
| || (regno == PIC_OFFSET_TABLE_REGNUM && must_reload_pic_reg_p ())); |
| } |
| |
| /* Examine the number of regs NREGS we've determined we must save. |
| Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for |
| prologue and epilogue. */ |
| |
| static bool |
| use_push_rts_p (int nregs) |
| { |
| if (TARGET_INSNS_64PLUS && optimize_function_for_size_p (cfun) |
| && !cfun->machine->contains_sibcall |
| && !cfun->returns_struct |
| && !TARGET_LONG_CALLS |
| && nregs >= 6 && !frame_pointer_needed) |
| return true; |
| return false; |
| } |
| |
| /* Return number of saved general prupose registers. */ |
| |
| int |
| c6x_nsaved_regs (void) |
| { |
| int nregs = 0; |
| int regno; |
| |
| for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
| if (c6x_save_reg (regno)) |
| nregs++; |
| return nregs; |
| } |
| |
| /* The safe debug order mandated by the ABI. */ |
| static unsigned reg_save_order[] = |
| { |
| REG_A10, REG_A11, REG_A12, REG_A13, |
| REG_A14, REG_B3, |
| REG_B10, REG_B11, REG_B12, REG_B13, |
| REG_B14, REG_A15 |
| }; |
| |
| #define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order) |
| |
| /* Compute the layout of the stack frame and store it in FRAME. */ |
| |
| static void |
| c6x_compute_frame_layout (struct c6x_frame *frame) |
| { |
| HOST_WIDE_INT size = get_frame_size (); |
| HOST_WIDE_INT offset; |
| int nregs; |
| |
| /* We use the four bytes which are technically inside the caller's frame, |
| usually to save the frame pointer. */ |
| offset = -4; |
| frame->padding0 = 0; |
| nregs = c6x_nsaved_regs (); |
| frame->push_rts = false; |
| frame->b3_offset = 0; |
| if (use_push_rts_p (nregs)) |
| { |
| frame->push_rts = true; |
| frame->b3_offset = (TARGET_BIG_ENDIAN ? -12 : -13) * 4; |
| nregs = 14; |
| } |
| else if (c6x_save_reg (REG_B3)) |
| { |
| int idx; |
| for (idx = N_SAVE_ORDER - 1; reg_save_order[idx] != REG_B3; idx--) |
| { |
| if (c6x_save_reg (reg_save_order[idx])) |
| frame->b3_offset -= 4; |
| } |
| } |
| frame->nregs = nregs; |
| |
| if (size == 0 && nregs == 0) |
| { |
| frame->padding0 = 4; |
| frame->padding1 = frame->padding2 = 0; |
| frame->frame_pointer_offset = frame->to_allocate = 0; |
| frame->outgoing_arguments_size = 0; |
| return; |
| } |
| |
| if (!frame->push_rts) |
| offset += frame->nregs * 4; |
| |
| if (offset == 0 && size == 0 && crtl->outgoing_args_size == 0 |
| && !crtl->is_leaf) |
| /* Don't use the bottom of the caller's frame if we have no |
| allocation of our own and call other functions. */ |
| frame->padding0 = frame->padding1 = 4; |
| else if (offset & 4) |
| frame->padding1 = 4; |
| else |
| frame->padding1 = 0; |
| |
| offset += frame->padding0 + frame->padding1; |
| frame->frame_pointer_offset = offset; |
| offset += size; |
| |
| frame->outgoing_arguments_size = crtl->outgoing_args_size; |
| offset += frame->outgoing_arguments_size; |
| |
| if ((offset & 4) == 0) |
| frame->padding2 = 8; |
| else |
| frame->padding2 = 4; |
| frame->to_allocate = offset + frame->padding2; |
| } |
| |
| /* Return the offset between two registers, one to be eliminated, and the other |
| its replacement, at the start of a routine. */ |
| |
| HOST_WIDE_INT |
| c6x_initial_elimination_offset (int from, int to) |
| { |
| struct c6x_frame frame; |
| c6x_compute_frame_layout (&frame); |
| |
| if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) |
| return 0; |
| else if (from == FRAME_POINTER_REGNUM |
| && to == HARD_FRAME_POINTER_REGNUM) |
| return -frame.frame_pointer_offset; |
| else |
| { |
| gcc_assert (to == STACK_POINTER_REGNUM); |
| |
| if (from == ARG_POINTER_REGNUM) |
| return frame.to_allocate + (frame.push_rts ? 56 : 0); |
| |
| gcc_assert (from == FRAME_POINTER_REGNUM); |
| return frame.to_allocate - frame.frame_pointer_offset; |
| } |
| } |
| |
| /* Given FROM and TO register numbers, say whether this elimination is |
| allowed. Frame pointer elimination is automatically handled. */ |
| |
| static bool |
| c6x_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) |
| { |
| if (to == STACK_POINTER_REGNUM) |
| return !frame_pointer_needed; |
| return true; |
| } |
| |
| /* Emit insns to increment the stack pointer by OFFSET. If |
| FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns. |
| Does nothing if the offset is zero. */ |
| |
| static void |
| emit_add_sp_const (HOST_WIDE_INT offset, bool frame_related_p) |
| { |
| rtx to_add = GEN_INT (offset); |
| rtx orig_to_add = to_add; |
| rtx_insn *insn; |
| |
| if (offset == 0) |
| return; |
| |
| if (offset < -32768 || offset > 32767) |
| { |
| rtx reg = gen_rtx_REG (SImode, REG_A0); |
| rtx low = GEN_INT (trunc_int_for_mode (offset, HImode)); |
| |
| insn = emit_insn (gen_movsi_high (reg, low)); |
| if (frame_related_p) |
| RTX_FRAME_RELATED_P (insn) = 1; |
| insn = emit_insn (gen_movsi_lo_sum (reg, reg, to_add)); |
| if (frame_related_p) |
| RTX_FRAME_RELATED_P (insn) = 1; |
| to_add = reg; |
| } |
| insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, |
| to_add)); |
| if (frame_related_p) |
| { |
| if (REG_P (to_add)) |
| add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
| gen_rtx_SET (VOIDmode, stack_pointer_rtx, |
| gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| orig_to_add))); |
| |
| RTX_FRAME_RELATED_P (insn) = 1; |
| } |
| } |
| |
| /* Prologue and epilogue. */ |
| void |
| c6x_expand_prologue (void) |
| { |
| struct c6x_frame frame; |
| rtx_insn *insn; |
| rtx mem; |
| int nsaved = 0; |
| HOST_WIDE_INT initial_offset, off, added_already; |
| |
| c6x_compute_frame_layout (&frame); |
| |
| if (flag_stack_usage_info) |
| current_function_static_stack_size = frame.to_allocate; |
| |
| initial_offset = -frame.to_allocate; |
| if (frame.push_rts) |
| { |
| emit_insn (gen_push_rts ()); |
| nsaved = frame.nregs; |
| } |
| |
| /* If the offsets would be too large for the memory references we will |
| create to save registers, do the stack allocation in two parts. |
| Ensure by subtracting 8 that we don't store to the word pointed to |
| by the stack pointer. */ |
| if (initial_offset < -32768) |
| initial_offset = -frame.frame_pointer_offset - 8; |
| |
| if (frame.to_allocate > 0) |
| gcc_assert (initial_offset != 0); |
| |
| off = -initial_offset + 4 - frame.padding0; |
| |
| mem = gen_frame_mem (Pmode, stack_pointer_rtx); |
| |
| added_already = 0; |
| if (frame_pointer_needed) |
| { |
| rtx fp_reg = gen_rtx_REG (SImode, REG_A15); |
| /* We go through some contortions here to both follow the ABI's |
| recommendation that FP == incoming SP, and to avoid writing or |
| reading the word pointed to by the stack pointer. */ |
| rtx addr = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx, |
| gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| GEN_INT (-8))); |
| insn = emit_move_insn (gen_frame_mem (Pmode, addr), fp_reg); |
| RTX_FRAME_RELATED_P (insn) = 1; |
| nsaved++; |
| insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, stack_pointer_rtx, |
| GEN_INT (8))); |
| RTX_FRAME_RELATED_P (insn) = 1; |
| off -= 4; |
| added_already = -8; |
| } |
| |
| emit_add_sp_const (initial_offset - added_already, true); |
| |
| if (nsaved < frame.nregs) |
| { |
| unsigned i; |
| |
| for (i = 0; i < N_SAVE_ORDER; i++) |
| { |
| int idx = N_SAVE_ORDER - i - 1; |
| unsigned regno = reg_save_order[idx]; |
| rtx reg; |
| machine_mode save_mode = SImode; |
| |
| if (regno == REG_A15 && frame_pointer_needed) |
| /* Already saved. */ |
| continue; |
| if (!c6x_save_reg (regno)) |
| continue; |
| |
| if (TARGET_STDW && (off & 4) == 0 && off <= 256 |
| && (regno & 1) == 1 |
| && i + 1 < N_SAVE_ORDER |
| && reg_save_order[idx - 1] == regno - 1 |
| && c6x_save_reg (regno - 1)) |
| { |
| save_mode = DImode; |
| regno--; |
| i++; |
| } |
| reg = gen_rtx_REG (save_mode, regno); |
| off -= GET_MODE_SIZE (save_mode); |
| |
| insn = emit_move_insn (adjust_address (mem, save_mode, off), |
| reg); |
| RTX_FRAME_RELATED_P (insn) = 1; |
| |
| nsaved += HARD_REGNO_NREGS (regno, save_mode); |
| } |
| } |
| gcc_assert (nsaved == frame.nregs); |
| emit_add_sp_const (-frame.to_allocate - initial_offset, true); |
| if (must_reload_pic_reg_p ()) |
| { |
| if (dsbt_decl == NULL) |
| { |
| tree t; |
| |
| t = build_index_type (integer_one_node); |
| t = build_array_type (integer_type_node, t); |
| t = build_decl (BUILTINS_LOCATION, VAR_DECL, |
| get_identifier ("__c6xabi_DSBT_BASE"), t); |
| DECL_ARTIFICIAL (t) = 1; |
| DECL_IGNORED_P (t) = 1; |
| DECL_EXTERNAL (t) = 1; |
| TREE_STATIC (t) = 1; |
| TREE_PUBLIC (t) = 1; |
| TREE_USED (t) = 1; |
| |
| dsbt_decl = t; |
| } |
| emit_insn (gen_setup_dsbt (pic_offset_table_rtx, |
| XEXP (DECL_RTL (dsbt_decl), 0))); |
| } |
| } |
| |
| void |
| c6x_expand_epilogue (bool sibcall) |
| { |
| unsigned i; |
| struct c6x_frame frame; |
| rtx mem; |
| HOST_WIDE_INT off; |
| int nsaved = 0; |
| |
| c6x_compute_frame_layout (&frame); |
| |
| mem = gen_frame_mem (Pmode, stack_pointer_rtx); |
| |
| /* Insert a dummy set/use of the stack pointer. This creates a |
| scheduler barrier between the prologue saves and epilogue restores. */ |
| emit_insn (gen_epilogue_barrier (stack_pointer_rtx, stack_pointer_rtx)); |
| |
| /* If the offsets would be too large for the memory references we will |
| create to restore registers, do a preliminary stack adjustment here. */ |
| off = frame.to_allocate - frame.frame_pointer_offset + frame.padding1; |
| if (frame.push_rts) |
| { |
| nsaved = frame.nregs; |
| } |
| else |
| { |
| if (frame.to_allocate > 32768) |
| { |
| /* Don't add the entire offset so that we leave an unused word |
| above the stack pointer. */ |
| emit_add_sp_const ((off - 16) & ~7, false); |
| off &= 7; |
| off += 16; |
| } |
| for (i = 0; i < N_SAVE_ORDER; i++) |
| { |
| unsigned regno = reg_save_order[i]; |
| rtx reg; |
| machine_mode save_mode = SImode; |
| |
| if (!c6x_save_reg (regno)) |
| continue; |
| if (regno == REG_A15 && frame_pointer_needed) |
| continue; |
| |
| if (TARGET_STDW && (off & 4) == 0 && off < 256 |
| && (regno & 1) == 0 |
| && i + 1 < N_SAVE_ORDER |
| && reg_save_order[i + 1] == regno + 1 |
| && c6x_save_reg (regno + 1)) |
| { |
| save_mode = DImode; |
| i++; |
| } |
| reg = gen_rtx_REG (save_mode, regno); |
| |
| emit_move_insn (reg, adjust_address (mem, save_mode, off)); |
| |
| off += GET_MODE_SIZE (save_mode); |
| nsaved += HARD_REGNO_NREGS (regno, save_mode); |
| } |
| } |
| if (!frame_pointer_needed) |
| emit_add_sp_const (off + frame.padding0 - 4, false); |
| else |
| { |
| rtx fp_reg = gen_rtx_REG (SImode, REG_A15); |
| rtx addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, |
| gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| GEN_INT (8))); |
| emit_insn (gen_addsi3 (stack_pointer_rtx, hard_frame_pointer_rtx, |
| GEN_INT (-8))); |
| emit_move_insn (fp_reg, gen_frame_mem (Pmode, addr)); |
| nsaved++; |
| } |
| gcc_assert (nsaved == frame.nregs); |
| if (!sibcall) |
| { |
| if (frame.push_rts) |
| emit_jump_insn (gen_pop_rts ()); |
| else |
| emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode, |
| RETURN_ADDR_REGNO))); |
| } |
| } |
| |
| /* Return the value of the return address for the frame COUNT steps up |
| from the current frame, after the prologue. |
| We punt for everything but the current frame by returning const0_rtx. */ |
| |
| rtx |
| c6x_return_addr_rtx (int count) |
| { |
| if (count != 0) |
| return const0_rtx; |
| |
| return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNO); |
| } |
| |
| /* Return true iff TYPE is one of the shadow types. */ |
| static bool |
| shadow_type_p (enum attr_type type) |
| { |
| return (type == TYPE_SHADOW || type == TYPE_LOAD_SHADOW |
| || type == TYPE_MULT_SHADOW); |
| } |
| |
| /* Return true iff INSN is a shadow pattern. */ |
| static bool |
| shadow_p (rtx_insn *insn) |
| { |
| if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0) |
| return false; |
| return shadow_type_p (get_attr_type (insn)); |
| } |
| |
| /* Return true iff INSN is a shadow or blockage pattern. */ |
| static bool |
| shadow_or_blockage_p (rtx_insn *insn) |
| { |
| enum attr_type type; |
| if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0) |
| return false; |
| type = get_attr_type (insn); |
| return shadow_type_p (type) || type == TYPE_BLOCKAGE; |
| } |
| |
| /* Translate UNITS into a bitmask of units we can reserve for this |
| insn. */ |
| static int |
| get_reservation_flags (enum attr_units units) |
| { |
| switch (units) |
| { |
| case UNITS_D: |
| case UNITS_D_ADDR: |
| return RESERVATION_FLAG_D; |
| case UNITS_L: |
| return RESERVATION_FLAG_L; |
| case UNITS_S: |
| return RESERVATION_FLAG_S; |
| case UNITS_M: |
| return RESERVATION_FLAG_M; |
| case UNITS_LS: |
| return RESERVATION_FLAG_LS; |
| case UNITS_DL: |
| return RESERVATION_FLAG_DL; |
| case UNITS_DS: |
| return RESERVATION_FLAG_DS; |
| case UNITS_DLS: |
| return RESERVATION_FLAG_DLS; |
| default: |
| return 0; |
| } |
| } |
| |
| /* Compute the side of the machine used by INSN, which reserves UNITS. |
| This must match the reservations in the scheduling description. */ |
| static int |
| get_insn_side (rtx_insn *insn, enum attr_units units) |
| { |
| if (units == UNITS_D_ADDR) |
| return (get_attr_addr_regfile (insn) == ADDR_REGFILE_A ? 0 : 1); |
| else |
| { |
| enum attr_dest_regfile rf = get_attr_dest_regfile (insn); |
| if (rf == DEST_REGFILE_ANY) |
| return get_attr_type (insn) == TYPE_BRANCH ? 0 : 1; |
| else |
| return rf == DEST_REGFILE_A ? 0 : 1; |
| } |
| } |
| |
| /* After scheduling, walk the insns between HEAD and END and assign unit |
| reservations. */ |
| static void |
| assign_reservations (rtx_insn *head, rtx_insn *end) |
| { |
| rtx_insn *insn; |
| for (insn = head; insn != NEXT_INSN (end); insn = NEXT_INSN (insn)) |
| { |
| unsigned int sched_mask, reserved; |
| rtx_insn *within, *last; |
| int pass; |
| int rsrv[2]; |
| int rsrv_count[2][4]; |
| int i; |
| |
| if (GET_MODE (insn) != TImode) |
| continue; |
| |
| reserved = 0; |
| last = NULL; |
| /* Find the last insn in the packet. It has a state recorded for it, |
| which we can use to determine the units we should be using. */ |
| for (within = insn; |
| (within != NEXT_INSN (end) |
| && (within == insn || GET_MODE (within) != TImode)); |
| within = NEXT_INSN (within)) |
| { |
| int icode; |
| if (!NONDEBUG_INSN_P (within)) |
| continue; |
| icode = recog_memoized (within); |
|
|