| /* Output routines for GCC for ARM/RISCiX. |
| Copyright (C) 1991, 93, 94, 95, 96, 1997 Free Software Foundation, Inc. |
| Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) |
| and Martin Simmons (@harleqn.co.uk). |
| More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) |
| |
| This file is part of GNU CC. |
| |
| GNU CC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 2, or (at your option) |
| any later version. |
| |
| GNU CC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GNU CC; see the file COPYING. If not, write to |
| the Free Software Foundation, 59 Temple Place - Suite 330, |
| Boston, MA 02111-1307, USA. */ |
| |
| #include "config.h" |
| #include <stdio.h> |
| #include <string.h> |
| #include "rtl.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "real.h" |
| #include "insn-config.h" |
| #include "conditions.h" |
| #include "insn-flags.h" |
| #include "output.h" |
| #include "insn-attr.h" |
| #include "flags.h" |
| #include "reload.h" |
| #include "tree.h" |
| #include "expr.h" |
| |
| /* The maximum number of insns skipped which will be conditionalised if |
| possible. */ |
| #define MAX_INSNS_SKIPPED 5 |
| |
| /* Some function declarations. */ |
| extern FILE *asm_out_file; |
| |
| static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT)); |
| static char *output_multi_immediate PROTO ((rtx *, char *, char *, int, |
| HOST_WIDE_INT)); |
| static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode, |
| HOST_WIDE_INT, rtx, rtx, int, int)); |
| static int arm_naked_function_p PROTO ((tree)); |
| static void init_fpa_table PROTO ((void)); |
| static enum machine_mode select_dominance_cc_mode PROTO ((enum rtx_code, rtx, |
| rtx, HOST_WIDE_INT)); |
| static HOST_WIDE_INT add_constant PROTO ((rtx, enum machine_mode)); |
| static void dump_table PROTO ((rtx)); |
| static int fixit PROTO ((rtx, enum machine_mode, int)); |
| static rtx find_barrier PROTO ((rtx, int)); |
| static int broken_move PROTO ((rtx)); |
| static char *fp_const_from_val PROTO ((REAL_VALUE_TYPE *)); |
| static int eliminate_lr2ip PROTO ((rtx *)); |
| static char *shift_op PROTO ((rtx, HOST_WIDE_INT *)); |
| static int pattern_really_clobbers_lr PROTO ((rtx)); |
| static int function_really_clobbers_lr PROTO ((rtx)); |
| static void emit_multi_reg_push PROTO ((int)); |
| static void emit_sfm PROTO ((int, int)); |
| static enum arm_cond_code get_arm_condition_code PROTO ((rtx)); |
| |
| /* Define the information needed to generate branch insns. This is |
| stored from the compare operation. */ |
| |
| rtx arm_compare_op0, arm_compare_op1; |
| int arm_compare_fp; |
| |
| /* What type of cpu are we compiling for? */ |
| enum processor_type arm_cpu; |
| |
| /* What type of floating point are we tuning for? */ |
| enum floating_point_type arm_fpu; |
| |
| /* What type of floating point instructions are available? */ |
| enum floating_point_type arm_fpu_arch; |
| |
| /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */ |
| enum prog_mode_type arm_prgmode; |
| |
| /* Set by the -mfp=... option */ |
| char *target_fp_name = NULL; |
| |
| /* Nonzero if this is an "M" variant of the processor. */ |
| int arm_fast_multiply = 0; |
| |
| /* Nonzero if this chip supports the ARM Architecture 4 extensions */ |
| int arm_arch4 = 0; |
| |
| /* Set to the features we should tune the code for (multiply speed etc). */ |
| int tune_flags = 0; |
| |
| /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we |
| must report the mode of the memory reference from PRINT_OPERAND to |
| PRINT_OPERAND_ADDRESS. */ |
| enum machine_mode output_memory_reference_mode; |
| |
| /* Nonzero if the prologue must setup `fp'. */ |
| int current_function_anonymous_args; |
| |
| /* The register number to be used for the PIC offset register. */ |
| int arm_pic_register = 9; |
| |
| /* Location counter of .text segment. */ |
| int arm_text_location = 0; |
| |
| /* Set to one if we think that lr is only saved because of subroutine calls, |
| but all of these can be `put after' return insns */ |
| int lr_save_eliminated; |
| |
| /* Set to 1 when a return insn is output, this means that the epilogue |
| is not needed. */ |
| |
| static int return_used_this_function; |
| |
| static int arm_constant_limit = 3; |
| |
| /* For an explanation of these variables, see final_prescan_insn below. */ |
| int arm_ccfsm_state; |
| enum arm_cond_code arm_current_cc; |
| rtx arm_target_insn; |
| int arm_target_label; |
| |
| /* The condition codes of the ARM, and the inverse function. */ |
| char *arm_condition_codes[] = |
| { |
| "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", |
| "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" |
| }; |
| |
| static enum arm_cond_code get_arm_condition_code (); |
| |
| |
| /* Initialization code */ |
| |
| struct arm_cpu_select arm_select[4] = |
| { |
| /* switch name, tune arch */ |
| { (char *)0, "--with-cpu=", 1, 1 }, |
| { (char *)0, "-mcpu=", 1, 1 }, |
| { (char *)0, "-march=", 0, 1 }, |
| { (char *)0, "-mtune=", 1, 0 }, |
| }; |
| |
| #define FL_CO_PROC 0x01 /* Has external co-processor bus */ |
| #define FL_FAST_MULT 0x02 /* Fast multiply */ |
| #define FL_MODE26 0x04 /* 26-bit mode support */ |
| #define FL_MODE32 0x08 /* 32-bit mode support */ |
| #define FL_ARCH4 0x10 /* Architecture rel 4 */ |
| #define FL_THUMB 0x20 /* Thumb aware */ |
| |
| struct processors |
| { |
| char *name; |
| enum processor_type type; |
| unsigned int flags; |
| }; |
| |
| /* Not all of these give usefully different compilation alternatives, |
| but there is no simple way of generalizing them. */ |
| static struct processors all_procs[] = |
| { |
| {"arm2", PROCESSOR_ARM2, FL_CO_PROC | FL_MODE26}, |
| {"arm250", PROCESSOR_ARM2, FL_CO_PROC | FL_MODE26}, |
| {"arm3", PROCESSOR_ARM2, FL_CO_PROC | FL_MODE26}, |
| {"arm6", PROCESSOR_ARM6, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| {"arm600", PROCESSOR_ARM6, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| {"arm610", PROCESSOR_ARM6, FL_MODE32 | FL_MODE26}, |
| {"arm7", PROCESSOR_ARM7, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| /* arm7m doesn't exist on its own, only in conjunction with D, (and I), but |
| those don't alter the code, so it is sometimes known as the arm7m */ |
| {"arm7m", PROCESSOR_ARM7, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_MODE26)}, |
| {"arm7dm", PROCESSOR_ARM7, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_MODE26)}, |
| {"arm7dmi", PROCESSOR_ARM7, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_MODE26)}, |
| {"arm700", PROCESSOR_ARM7, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| {"arm710", PROCESSOR_ARM7, FL_MODE32 | FL_MODE26}, |
| {"arm7100", PROCESSOR_ARM7, FL_MODE32 | FL_MODE26}, |
| {"arm7500", PROCESSOR_ARM7, FL_MODE32 | FL_MODE26}, |
| /* Doesn't really have an external co-proc, but does have embedded fpu */ |
| {"arm7500fe", PROCESSOR_ARM7, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| {"arm7tdmi", PROCESSOR_ARM7, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_ARCH4 | FL_THUMB)}, |
| {"arm8", PROCESSOR_ARM8, (FL_FAST_MULT | FL_MODE32 | FL_MODE26 |
| | FL_ARCH4)}, |
| {"arm810", PROCESSOR_ARM8, (FL_FAST_MULT | FL_MODE32 | FL_MODE26 |
| | FL_ARCH4)}, |
| {"strongarm", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26 |
| | FL_ARCH4)}, |
| {"strongarm110", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26 |
| | FL_ARCH4)}, |
| {"armv2", PROCESSOR_NONE, FL_CO_PROC | FL_MODE26}, |
| {"armv2a", PROCESSOR_NONE, FL_CO_PROC | FL_MODE26}, |
| {"armv3", PROCESSOR_NONE, FL_CO_PROC | FL_MODE32 | FL_MODE26}, |
| {"armv3m", PROCESSOR_NONE, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_MODE26)}, |
| {"armv4", PROCESSOR_NONE, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_MODE26 | FL_ARCH4)}, |
| /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no |
| implementations that support it, so we will leave it out for now. */ |
| {"armv4t", PROCESSOR_NONE, (FL_CO_PROC | FL_FAST_MULT | FL_MODE32 |
| | FL_ARCH4)}, |
| {NULL, 0, 0} |
| }; |
| |
| /* Fix up any incompatible options that the user has specified. |
| This has now turned into a maze. */ |
| void |
| arm_override_options () |
| { |
| int arm_thumb_aware = 0; |
| int flags = 0; |
| int i; |
| struct arm_cpu_select *ptr; |
| static struct cpu_default { |
| int cpu; |
| char *name; |
| } cpu_defaults[] = { |
| { TARGET_CPU_arm2, "arm2" }, |
| { TARGET_CPU_arm6, "arm6" }, |
| { TARGET_CPU_arm610, "arm610" }, |
| { TARGET_CPU_arm7dm, "arm7dm" }, |
| { TARGET_CPU_arm7500fe, "arm7500fe" }, |
| { TARGET_CPU_arm7tdmi, "arm7tdmi" }, |
| { TARGET_CPU_arm8, "arm8" }, |
| { TARGET_CPU_arm810, "arm810" }, |
| { TARGET_CPU_strongarm, "strongarm" }, |
| { 0, 0 } |
| }; |
| struct cpu_default *def; |
| |
| /* Set the default. */ |
| for (def = &cpu_defaults[0]; def->name; ++def) |
| if (def->cpu == TARGET_CPU_DEFAULT) |
| break; |
| if (! def->name) |
| abort (); |
| |
| arm_select[0].string = def->name; |
| |
| for (i = 0; i < sizeof (arm_select) / sizeof (arm_select[0]); i++) |
| { |
| ptr = &arm_select[i]; |
| if (ptr->string != (char *)0 && ptr->string[0] != '\0') |
| { |
| struct processors *sel; |
| |
| for (sel = all_procs; sel->name != NULL; sel++) |
| if (! strcmp (ptr->string, sel->name)) |
| { |
| /* -march= is the only flag that can take an architecture |
| type, so if we match when the tune bit is set, the |
| option was invalid. */ |
| if (ptr->set_tune_p) |
| { |
| if (sel->type == PROCESSOR_NONE) |
| continue; /* Its an architecture, not a cpu */ |
| |
| arm_cpu = sel->type; |
| tune_flags = sel->flags; |
| } |
| |
| if (ptr->set_arch_p) |
| flags = sel->flags; |
| |
| break; |
| } |
| |
| if (sel->name == NULL) |
| error ("bad value (%s) for %s switch", ptr->string, ptr->name); |
| } |
| } |
| |
| if (write_symbols != NO_DEBUG && flag_omit_frame_pointer) |
| warning ("-g with -fomit-frame-pointer may not give sensible debugging"); |
| |
| if (TARGET_POKE_FUNCTION_NAME) |
| target_flags |= ARM_FLAG_APCS_FRAME; |
| |
| if (TARGET_6) |
| warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>"); |
| |
| if (TARGET_3) |
| warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>"); |
| |
| if (TARGET_APCS_REENT && flag_pic) |
| fatal ("-fpic and -mapcs-reent are incompatible"); |
| |
| if (TARGET_APCS_REENT) |
| warning ("APCS reentrant code not supported."); |
| |
| /* If stack checking is disabled, we can use r10 as the PIC register, |
| which keeps r9 available. */ |
| if (flag_pic && ! TARGET_APCS_STACK) |
| arm_pic_register = 10; |
| |
| /* Well, I'm about to have a go, but pic is NOT going to be compatible |
| with APCS reentrancy, since that requires too much support in the |
| assembler and linker, and the ARMASM assembler seems to lack some |
| required directives. */ |
| if (flag_pic) |
| warning ("Position independent code not supported. Ignored"); |
| |
| if (TARGET_APCS_FLOAT) |
| warning ("Passing floating point arguments in fp regs not yet supported"); |
| |
| if (TARGET_APCS_STACK && ! TARGET_APCS) |
| { |
| warning ("-mapcs-stack-check incompatible with -mno-apcs-frame"); |
| target_flags |= ARM_FLAG_APCS_FRAME; |
| } |
| |
| /* Default is to tune for an FPA */ |
| arm_fpu = FP_HARD; |
| |
| /* Default value for floating point code... if no co-processor |
| bus, then schedule for emulated floating point. Otherwise, |
| assume the user has an FPA. |
| Note: this does not prevent use of floating point instructions, |
| -msoft-float does that. */ |
| if (tune_flags & FL_CO_PROC == 0) |
| arm_fpu = FP_SOFT3; |
| |
| arm_fast_multiply = (flags & FL_FAST_MULT) != 0; |
| arm_arch4 = (flags & FL_ARCH4) != 0; |
| arm_thumb_aware = (flags & FL_THUMB) != 0; |
| |
| if (target_fp_name) |
| { |
| if (strcmp (target_fp_name, "2") == 0) |
| arm_fpu_arch = FP_SOFT2; |
| else if (strcmp (target_fp_name, "3") == 0) |
| arm_fpu_arch = FP_HARD; |
| else |
| fatal ("Invalid floating point emulation option: -mfpe=%s", |
| target_fp_name); |
| } |
| else |
| arm_fpu_arch = FP_DEFAULT; |
| |
| if (TARGET_THUMB_INTERWORK && ! arm_thumb_aware) |
| { |
| warning ("This processor variant does not support Thumb interworking"); |
| target_flags &= ~ARM_FLAG_THUMB; |
| } |
| |
| if (TARGET_FPE && arm_fpu != FP_HARD) |
| arm_fpu = FP_SOFT2; |
| |
| /* For arm2/3 there is no need to do any scheduling if there is only |
| a floating point emulator, or we are doing software floating-point. */ |
| if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && arm_cpu == PROCESSOR_ARM2) |
| flag_schedule_insns = flag_schedule_insns_after_reload = 0; |
| |
| arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26; |
| } |
| |
| |
| /* Return 1 if it is possible to return using a single instruction */ |
| |
| int |
| use_return_insn () |
| { |
| int regno; |
| |
| if (!reload_completed ||current_function_pretend_args_size |
| || current_function_anonymous_args |
| || ((get_frame_size () + current_function_outgoing_args_size != 0) |
| && !(TARGET_APCS || frame_pointer_needed))) |
| return 0; |
| |
| /* Can't be done if interworking with Thumb, and any registers have been |
| stacked */ |
| if (TARGET_THUMB_INTERWORK) |
| for (regno = 0; regno < 16; regno++) |
| if (regs_ever_live[regno] && ! call_used_regs[regno]) |
| return 0; |
| |
| /* Can't be done if any of the FPU regs are pushed, since this also |
| requires an insn */ |
| for (regno = 16; regno < 24; regno++) |
| if (regs_ever_live[regno] && ! call_used_regs[regno]) |
| return 0; |
| |
| /* If a function is naked, don't use the "return" insn. */ |
| if (arm_naked_function_p (current_function_decl)) |
| return 0; |
| |
| return 1; |
| } |
| |
| /* Return TRUE if int I is a valid immediate ARM constant. */ |
| |
| int |
| const_ok_for_arm (i) |
| HOST_WIDE_INT i; |
| { |
| unsigned HOST_WIDE_INT mask = ~0xFF; |
| |
| /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must |
| be all zero, or all one. */ |
| if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0 |
| && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) |
| != (((HOST_WIDE_INT) -1) & ~(unsigned HOST_WIDE_INT) 0xffffffff))) |
| return FALSE; |
| |
| /* Fast return for 0 and powers of 2 */ |
| if ((i & (i - 1)) == 0) |
| return TRUE; |
| |
| do |
| { |
| if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0) |
| return TRUE; |
| mask = |
| (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff) |
| >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff); |
| } while (mask != ~0xFF); |
| |
| return FALSE; |
| } |
| |
| /* Return true if I is a valid constant for the operation CODE. */ |
| int |
| const_ok_for_op (i, code, mode) |
| HOST_WIDE_INT i; |
| enum rtx_code code; |
| enum machine_mode mode; |
| { |
| if (const_ok_for_arm (i)) |
| return 1; |
| |
| switch (code) |
| { |
| case PLUS: |
| return const_ok_for_arm (ARM_SIGN_EXTEND (-i)); |
| |
| case MINUS: /* Should only occur with (MINUS I reg) => rsb */ |
| case XOR: |
| case IOR: |
| return 0; |
| |
| case AND: |
| return const_ok_for_arm (ARM_SIGN_EXTEND (~i)); |
| |
| default: |
| abort (); |
| } |
| } |
| |
| /* Emit a sequence of insns to handle a large constant. |
| CODE is the code of the operation required, it can be any of SET, PLUS, |
| IOR, AND, XOR, MINUS; |
| MODE is the mode in which the operation is being performed; |
| VAL is the integer to operate on; |
| SOURCE is the other operand (a register, or a null-pointer for SET); |
| SUBTARGETS means it is safe to create scratch registers if that will |
| either produce a simpler sequence, or we will want to cse the values. |
| Return value is the number of insns emitted. */ |
| |
| int |
| arm_split_constant (code, mode, val, target, source, subtargets) |
| enum rtx_code code; |
| enum machine_mode mode; |
| HOST_WIDE_INT val; |
| rtx target; |
| rtx source; |
| int subtargets; |
| { |
| if (subtargets || code == SET |
| || (GET_CODE (target) == REG && GET_CODE (source) == REG |
| && REGNO (target) != REGNO (source))) |
| { |
| rtx temp; |
| |
| if (arm_gen_constant (code, mode, val, target, source, 1, 0) |
| > arm_constant_limit + (code != SET)) |
| { |
| if (code == SET) |
| { |
| /* Currently SET is the only monadic value for CODE, all |
| the rest are diadic. */ |
| emit_insn (gen_rtx (SET, VOIDmode, target, GEN_INT (val))); |
| return 1; |
| } |
| else |
| { |
| rtx temp = subtargets ? gen_reg_rtx (mode) : target; |
| |
| emit_insn (gen_rtx (SET, VOIDmode, temp, GEN_INT (val))); |
| /* For MINUS, the value is subtracted from, since we never |
| have subtraction of a constant. */ |
| if (code == MINUS) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (code, mode, temp, source))); |
| else |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (code, mode, source, temp))); |
| return 2; |
| } |
| } |
| } |
| |
| return arm_gen_constant (code, mode, val, target, source, subtargets, 1); |
| } |
| |
| /* As above, but extra parameter GENERATE which, if clear, suppresses |
| RTL generation. */ |
| int |
| arm_gen_constant (code, mode, val, target, source, subtargets, generate) |
| enum rtx_code code; |
| enum machine_mode mode; |
| HOST_WIDE_INT val; |
| rtx target; |
| rtx source; |
| int subtargets; |
| int generate; |
| { |
| int can_add = 0; |
| int can_invert = 0; |
| int can_negate = 0; |
| int can_negate_initial = 0; |
| int can_shift = 0; |
| int i; |
| int num_bits_set = 0; |
| int set_sign_bit_copies = 0; |
| int clear_sign_bit_copies = 0; |
| int clear_zero_bit_copies = 0; |
| int set_zero_bit_copies = 0; |
| int insns = 0; |
| rtx new_src; |
| unsigned HOST_WIDE_INT temp1, temp2; |
| unsigned HOST_WIDE_INT remainder = val & 0xffffffff; |
| |
| /* find out which operations are safe for a given CODE. Also do a quick |
| check for degenerate cases; these can occur when DImode operations |
| are split. */ |
| switch (code) |
| { |
| case SET: |
| can_invert = 1; |
| can_shift = 1; |
| can_negate = 1; |
| break; |
| |
| case PLUS: |
| can_negate = 1; |
| can_negate_initial = 1; |
| break; |
| |
| case IOR: |
| if (remainder == 0xffffffff) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| GEN_INT (ARM_SIGN_EXTEND (val)))); |
| return 1; |
| } |
| if (remainder == 0) |
| { |
| if (reload_completed && rtx_equal_p (target, source)) |
| return 0; |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, source)); |
| return 1; |
| } |
| break; |
| |
| case AND: |
| if (remainder == 0) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx)); |
| return 1; |
| } |
| if (remainder == 0xffffffff) |
| { |
| if (reload_completed && rtx_equal_p (target, source)) |
| return 0; |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, source)); |
| return 1; |
| } |
| can_invert = 1; |
| break; |
| |
| case XOR: |
| if (remainder == 0) |
| { |
| if (reload_completed && rtx_equal_p (target, source)) |
| return 0; |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, source)); |
| return 1; |
| } |
| if (remainder == 0xffffffff) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (NOT, mode, source))); |
| return 1; |
| } |
| |
| /* We don't know how to handle this yet below. */ |
| abort (); |
| |
| case MINUS: |
| /* We treat MINUS as (val - source), since (source - val) is always |
| passed as (source + (-val)). */ |
| if (remainder == 0) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (NEG, mode, source))); |
| return 1; |
| } |
| if (const_ok_for_arm (val)) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (MINUS, mode, GEN_INT (val), source))); |
| return 1; |
| } |
| can_negate = 1; |
| |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| /* If we can do it in one insn get out quickly */ |
| if (const_ok_for_arm (val) |
| || (can_negate_initial && const_ok_for_arm (-val)) |
| || (can_invert && const_ok_for_arm (~val))) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| (source ? gen_rtx (code, mode, source, |
| GEN_INT (val)) |
| : GEN_INT (val)))); |
| return 1; |
| } |
| |
| |
| /* Calculate a few attributes that may be useful for specific |
| optimizations. */ |
| |
| for (i = 31; i >= 0; i--) |
| { |
| if ((remainder & (1 << i)) == 0) |
| clear_sign_bit_copies++; |
| else |
| break; |
| } |
| |
| for (i = 31; i >= 0; i--) |
| { |
| if ((remainder & (1 << i)) != 0) |
| set_sign_bit_copies++; |
| else |
| break; |
| } |
| |
| for (i = 0; i <= 31; i++) |
| { |
| if ((remainder & (1 << i)) == 0) |
| clear_zero_bit_copies++; |
| else |
| break; |
| } |
| |
| for (i = 0; i <= 31; i++) |
| { |
| if ((remainder & (1 << i)) != 0) |
| set_zero_bit_copies++; |
| else |
| break; |
| } |
| |
| switch (code) |
| { |
| case SET: |
| /* See if we can do this by sign_extending a constant that is known |
| to be negative. This is a good, way of doing it, since the shift |
| may well merge into a subsequent insn. */ |
| if (set_sign_bit_copies > 1) |
| { |
| if (const_ok_for_arm |
| (temp1 = ARM_SIGN_EXTEND (remainder |
| << (set_sign_bit_copies - 1)))) |
| { |
| if (generate) |
| { |
| new_src = subtargets ? gen_reg_rtx (mode) : target; |
| emit_insn (gen_rtx (SET, VOIDmode, new_src, |
| GEN_INT (temp1))); |
| emit_insn (gen_ashrsi3 (target, new_src, |
| GEN_INT (set_sign_bit_copies - 1))); |
| } |
| return 2; |
| } |
| /* For an inverted constant, we will need to set the low bits, |
| these will be shifted out of harm's way. */ |
| temp1 |= (1 << (set_sign_bit_copies - 1)) - 1; |
| if (const_ok_for_arm (~temp1)) |
| { |
| if (generate) |
| { |
| new_src = subtargets ? gen_reg_rtx (mode) : target; |
| emit_insn (gen_rtx (SET, VOIDmode, new_src, |
| GEN_INT (temp1))); |
| emit_insn (gen_ashrsi3 (target, new_src, |
| GEN_INT (set_sign_bit_copies - 1))); |
| } |
| return 2; |
| } |
| } |
| |
| /* See if we can generate this by setting the bottom (or the top) |
| 16 bits, and then shifting these into the other half of the |
| word. We only look for the simplest cases, to do more would cost |
| too much. Be careful, however, not to generate this when the |
| alternative would take fewer insns. */ |
| if (val & 0xffff0000) |
| { |
| temp1 = remainder & 0xffff0000; |
| temp2 = remainder & 0x0000ffff; |
| |
| /* Overlaps outside this range are best done using other methods. */ |
| for (i = 9; i < 24; i++) |
| { |
| if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder) |
| && ! const_ok_for_arm (temp2)) |
| { |
| insns = arm_gen_constant (code, mode, temp2, |
| new_src = (subtargets |
| ? gen_reg_rtx (mode) |
| : target), |
| source, subtargets, generate); |
| source = new_src; |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (IOR, mode, |
| gen_rtx (ASHIFT, mode, source, |
| GEN_INT (i)), |
| source))); |
| return insns + 1; |
| } |
| } |
| |
| /* Don't duplicate cases already considered. */ |
| for (i = 17; i < 24; i++) |
| { |
| if (((temp1 | (temp1 >> i)) == remainder) |
| && ! const_ok_for_arm (temp1)) |
| { |
| insns = arm_gen_constant (code, mode, temp1, |
| new_src = (subtargets |
| ? gen_reg_rtx (mode) |
| : target), |
| source, subtargets, generate); |
| source = new_src; |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (IOR, mode, |
| gen_rtx (LSHIFTRT, mode, |
| source, GEN_INT (i)), |
| source))); |
| return insns + 1; |
| } |
| } |
| } |
| break; |
| |
| case IOR: |
| case XOR: |
| /* If we have IOR or XOR, and the constant can be loaded in a |
| single instruction, and we can find a temporary to put it in, |
| then this can be done in two instructions instead of 3-4. */ |
| if (subtargets |
| || (reload_completed && ! reg_mentioned_p (target, source))) |
| { |
| if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val))) |
| { |
| if (generate) |
| { |
| rtx sub = subtargets ? gen_reg_rtx (mode) : target; |
| |
| emit_insn (gen_rtx (SET, VOIDmode, sub, GEN_INT (val))); |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (code, mode, source, sub))); |
| } |
| return 2; |
| } |
| } |
| |
| if (code == XOR) |
| break; |
| |
| if (set_sign_bit_copies > 8 |
| && (val & (-1 << (32 - set_sign_bit_copies))) == val) |
| { |
| if (generate) |
| { |
| rtx sub = subtargets ? gen_reg_rtx (mode) : target; |
| rtx shift = GEN_INT (set_sign_bit_copies); |
| |
| emit_insn (gen_rtx (SET, VOIDmode, sub, |
| gen_rtx (NOT, mode, |
| gen_rtx (ASHIFT, mode, source, |
| shift)))); |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (NOT, mode, |
| gen_rtx (LSHIFTRT, mode, sub, |
| shift)))); |
| } |
| return 2; |
| } |
| |
| if (set_zero_bit_copies > 8 |
| && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder) |
| { |
| if (generate) |
| { |
| rtx sub = subtargets ? gen_reg_rtx (mode) : target; |
| rtx shift = GEN_INT (set_zero_bit_copies); |
| |
| emit_insn (gen_rtx (SET, VOIDmode, sub, |
| gen_rtx (NOT, mode, |
| gen_rtx (LSHIFTRT, mode, source, |
| shift)))); |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (NOT, mode, |
| gen_rtx (ASHIFT, mode, sub, |
| shift)))); |
| } |
| return 2; |
| } |
| |
| if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val))) |
| { |
| if (generate) |
| { |
| rtx sub = subtargets ? gen_reg_rtx (mode) : target; |
| emit_insn (gen_rtx (SET, VOIDmode, sub, |
| gen_rtx (NOT, mode, source))); |
| source = sub; |
| if (subtargets) |
| sub = gen_reg_rtx (mode); |
| emit_insn (gen_rtx (SET, VOIDmode, sub, |
| gen_rtx (AND, mode, source, |
| GEN_INT (temp1)))); |
| emit_insn (gen_rtx (SET, VOIDmode, target, |
| gen_rtx (NOT, mode, sub))); |
| } |
| return 3; |
| } |
| break; |
| |
| case AND: |
| /* See if two shifts will do 2 or more insn's worth of work. */ |
| if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24) |
| { |
| HOST_WIDE_INT shift_mask = ((0xffffffff |
| << (32 - clear_sign_bit_copies)) |
| & 0xffffffff); |
| rtx new_source; |
| rtx shift; |
| |
| if ((remainder | shift_mask) != 0xffffffff) |
| { |
| if (generate) |
| { |
| new_source = subtargets ? gen_reg_rtx (mode) : target; |
| insns = arm_gen_constant (AND, mode, remainder | shift_mask, |
| new_source, source, subtargets, 1); |
| source = new_source; |
| } |
| else |
| insns = arm_gen_constant (AND, mode, remainder | shift_mask, |
| new_source, source, subtargets, 0); |
| } |
| |
| if (generate) |
| { |
| shift = GEN_INT (clear_sign_bit_copies); |
| new_source = subtargets ? gen_reg_rtx (mode) : target; |
| emit_insn (gen_ashlsi3 (new_source, source, shift)); |
| emit_insn (gen_lshrsi3 (target, new_source, shift)); |
| } |
| |
| return insns + 2; |
| } |
| |
| if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24) |
| { |
| HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1; |
| rtx new_source; |
| rtx shift; |
| |
| if ((remainder | shift_mask) != 0xffffffff) |
| { |
| if (generate) |
| { |
| new_source = subtargets ? gen_reg_rtx (mode) : target; |
| insns = arm_gen_constant (AND, mode, remainder | shift_mask, |
| new_source, source, subtargets, 1); |
| source = new_source; |
| } |
| else |
| insns = arm_gen_constant (AND, mode, remainder | shift_mask, |
| new_source, source, subtargets, 0); |
| } |
| |
| if (generate) |
| { |
| shift = GEN_INT (clear_zero_bit_copies); |
| new_source = subtargets ? gen_reg_rtx (mode) : target; |
| emit_insn (gen_lshrsi3 (new_source, source, shift)); |
| emit_insn (gen_ashlsi3 (target, new_source, shift)); |
| } |
| |
| return insns + 2; |
| } |
| |
| break; |
| |
| default: |
| break; |
| } |
| |
| for (i = 0; i < 32; i++) |
| if (remainder & (1 << i)) |
| num_bits_set++; |
| |
| if (code == AND || (can_invert && num_bits_set > 16)) |
| remainder = (~remainder) & 0xffffffff; |
| else if (code == PLUS && num_bits_set > 16) |
| remainder = (-remainder) & 0xffffffff; |
| else |
| { |
| can_invert = 0; |
| can_negate = 0; |
| } |
| |
| /* Now try and find a way of doing the job in either two or three |
| instructions. |
| We start by looking for the largest block of zeros that are aligned on |
| a 2-bit boundary, we then fill up the temps, wrapping around to the |
| top of the word when we drop off the bottom. |
| In the worst case this code should produce no more than four insns. */ |
| { |
| int best_start = 0; |
| int best_consecutive_zeros = 0; |
| |
| for (i = 0; i < 32; i += 2) |
| { |
| int consecutive_zeros = 0; |
| |
| if (! (remainder & (3 << i))) |
| { |
| while ((i < 32) && ! (remainder & (3 << i))) |
| { |
| consecutive_zeros += 2; |
| i += 2; |
| } |
| if (consecutive_zeros > best_consecutive_zeros) |
| { |
| best_consecutive_zeros = consecutive_zeros; |
| best_start = i - consecutive_zeros; |
| } |
| i -= 2; |
| } |
| } |
| |
| /* Now start emitting the insns, starting with the one with the highest |
| bit set: we do this so that the smallest number will be emitted last; |
| this is more likely to be combinable with addressing insns. */ |
| i = best_start; |
| do |
| { |
| int end; |
| |
| if (i <= 0) |
| i += 32; |
| if (remainder & (3 << (i - 2))) |
| { |
| end = i - 8; |
| if (end < 0) |
| end += 32; |
| temp1 = remainder & ((0x0ff << end) |
| | ((i < end) ? (0xff >> (32 - end)) : 0)); |
| remainder &= ~temp1; |
| |
| if (code == SET) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, |
| new_src = (subtargets |
| ? gen_reg_rtx (mode) |
| : target), |
| GEN_INT (can_invert ? ~temp1 : temp1))); |
| can_invert = 0; |
| code = PLUS; |
| } |
| else if (code == MINUS) |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, |
| new_src = (subtargets |
| ? gen_reg_rtx (mode) |
| : target), |
| gen_rtx (code, mode, GEN_INT (temp1), |
| source))); |
| code = PLUS; |
| } |
| else |
| { |
| if (generate) |
| emit_insn (gen_rtx (SET, VOIDmode, |
| new_src = (remainder |
| ? (subtargets |
| ? gen_reg_rtx (mode) |
| : target) |
| : target), |
| gen_rtx (code, mode, source, |
| GEN_INT (can_invert ? ~temp1 |
| : (can_negate |
| ? -temp1 |
| : temp1))))); |
| } |
| |
| insns++; |
| source = new_src; |
| i -= 6; |
| } |
| i -= 2; |
| } while (remainder); |
| } |
| return insns; |
| } |
| |
| /* Canonicalize a comparison so that we are more likely to recognize it. |
| This can be done for a few constant compares, where we can make the |
| immediate value easier to load. */ |
| enum rtx_code |
| arm_canonicalize_comparison (code, op1) |
| enum rtx_code code; |
| rtx *op1; |
| { |
| HOST_WIDE_INT i = INTVAL (*op1); |
| |
| switch (code) |
| { |
| case EQ: |
| case NE: |
| return code; |
| |
| case GT: |
| case LE: |
| if (i != (1 << (HOST_BITS_PER_WIDE_INT - 1) - 1) |
| && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1)))) |
| { |
| *op1 = GEN_INT (i+1); |
| return code == GT ? GE : LT; |
| } |
| break; |
| |
| case GE: |
| case LT: |
| if (i != (1 << (HOST_BITS_PER_WIDE_INT - 1)) |
| && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1)))) |
| { |
| *op1 = GEN_INT (i-1); |
| return code == GE ? GT : LE; |
| } |
| break; |
| |
| case GTU: |
| case LEU: |
| if (i != ~0 |
| && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1)))) |
| { |
| *op1 = GEN_INT (i + 1); |
| return code == GTU ? GEU : LTU; |
| } |
| break; |
| |
| case GEU: |
| case LTU: |
| if (i != 0 |
| && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1)))) |
| { |
| *op1 = GEN_INT (i - 1); |
| return code == GEU ? GTU : LEU; |
| } |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| return code; |
| } |
| |
| |
| /* Handle aggregates that are not laid out in a BLKmode element. |
| This is a sub-element of RETURN_IN_MEMORY. */ |
| int |
| arm_return_in_memory (type) |
| tree type; |
| { |
| if (TREE_CODE (type) == RECORD_TYPE) |
| { |
| tree field; |
| |
| /* For a struct, we can return in a register if every element was a |
| bit-field. */ |
| for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) |
| if (TREE_CODE (field) != FIELD_DECL |
| || ! DECL_BIT_FIELD_TYPE (field)) |
| return 1; |
| |
| return 0; |
| } |
| else if (TREE_CODE (type) == UNION_TYPE) |
| { |
| tree field; |
| |
| /* Unions can be returned in registers if every element is |
| integral, or can be returned in an integer register. */ |
| for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) |
| { |
| if (TREE_CODE (field) != FIELD_DECL |
| || (AGGREGATE_TYPE_P (TREE_TYPE (field)) |
| && RETURN_IN_MEMORY (TREE_TYPE (field))) |
| || FLOAT_TYPE_P (TREE_TYPE (field))) |
| return 1; |
| } |
| return 0; |
| } |
| /* XXX Not sure what should be done for other aggregates, so put them in |
| memory. */ |
| return 1; |
| } |
| |
| int |
| legitimate_pic_operand_p (x) |
| rtx x; |
| { |
| if (CONSTANT_P (x) && flag_pic |
| && (GET_CODE (x) == SYMBOL_REF |
| || (GET_CODE (x) == CONST |
| && GET_CODE (XEXP (x, 0)) == PLUS |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))) |
| return 0; |
| |
| return 1; |
| } |
| |
| rtx |
| legitimize_pic_address (orig, mode, reg) |
| rtx orig; |
| enum machine_mode mode; |
| rtx reg; |
| { |
| if (GET_CODE (orig) == SYMBOL_REF) |
| { |
| rtx pic_ref, address; |
| rtx insn; |
| int subregs = 0; |
| |
| if (reg == 0) |
| { |
| if (reload_in_progress || reload_completed) |
| abort (); |
| else |
| reg = gen_reg_rtx (Pmode); |
| |
| subregs = 1; |
| } |
| |
| #ifdef AOF_ASSEMBLER |
| /* The AOF assembler can generate relocations for these directly, and |
| understands that the PIC register has to be added into the offset. |
| */ |
| insn = emit_insn (gen_pic_load_addr_based (reg, orig)); |
| #else |
| if (subregs) |
| address = gen_reg_rtx (Pmode); |
| else |
| address = reg; |
| |
| emit_insn (gen_pic_load_addr (address, orig)); |
| |
| pic_ref = gen_rtx (MEM, Pmode, |
| gen_rtx (PLUS, Pmode, pic_offset_table_rtx, address)); |
| RTX_UNCHANGING_P (pic_ref) = 1; |
| insn = emit_move_insn (reg, pic_ref); |
| #endif |
| current_function_uses_pic_offset_table = 1; |
| /* Put a REG_EQUAL note on this insn, so that it can be optimized |
| by loop. */ |
| REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig, |
| REG_NOTES (insn)); |
| return reg; |
| } |
| else if (GET_CODE (orig) == CONST) |
| { |
| rtx base, offset; |
| |
| if (GET_CODE (XEXP (orig, 0)) == PLUS |
| && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) |
| return orig; |
| |
| if (reg == 0) |
| { |
| if (reload_in_progress || reload_completed) |
| abort (); |
| else |
| reg = gen_reg_rtx (Pmode); |
| } |
| |
| if (GET_CODE (XEXP (orig, 0)) == PLUS) |
| { |
| base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); |
| offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, |
| base == reg ? 0 : reg); |
| } |
| else |
| abort (); |
| |
| if (GET_CODE (offset) == CONST_INT) |
| { |
| /* The base register doesn't really matter, we only want to |
| test the index for the appropriate mode. */ |
| GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win); |
| |
| if (! reload_in_progress && ! reload_completed) |
| offset = force_reg (Pmode, offset); |
| else |
| abort (); |
| |
| win: |
| if (GET_CODE (offset) == CONST_INT) |
| return plus_constant_for_output (base, INTVAL (offset)); |
| } |
| |
| if (GET_MODE_SIZE (mode) > 4 |
| && (GET_MODE_CLASS (mode) == MODE_INT |
| || TARGET_SOFT_FLOAT)) |
| { |
| emit_insn (gen_addsi3 (reg, base, offset)); |
| return reg; |
| } |
| |
| return gen_rtx (PLUS, Pmode, base, offset); |
| } |
| else if (GET_CODE (orig) == LABEL_REF) |
| current_function_uses_pic_offset_table = 1; |
| |
| return orig; |
| } |
| |
| static rtx pic_rtx; |
| |
| int |
| is_pic(x) |
| rtx x; |
| { |
| if (x == pic_rtx) |
| return 1; |
| return 0; |
| } |
| |
| void |
| arm_finalize_pic () |
| { |
| #ifndef AOF_ASSEMBLER |
| rtx l1, pic_tmp, pic_tmp2, seq; |
| rtx global_offset_table; |
| |
| if (current_function_uses_pic_offset_table == 0) |
| return; |
| |
| if (! flag_pic) |
| abort (); |
| |
| start_sequence (); |
| l1 = gen_label_rtx (); |
| |
| global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "_GLOBAL_OFFSET_TABLE_"); |
| /* The PC contains 'dot'+8, but the label L1 is on the next |
| instruction, so the offset is only 'dot'+4. */ |
| pic_tmp = gen_rtx (CONST, VOIDmode, |
| gen_rtx (PLUS, Pmode, |
| gen_rtx (LABEL_REF, VOIDmode, l1), |
| GEN_INT (4))); |
| pic_tmp2 = gen_rtx (CONST, VOIDmode, |
| gen_rtx (PLUS, Pmode, |
| global_offset_table, |
| pc_rtx)); |
| |
| pic_rtx = gen_rtx (CONST, Pmode, |
| gen_rtx (MINUS, Pmode, pic_tmp2, pic_tmp)); |
| |
| emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx)); |
| emit_jump_insn (gen_pic_add_dot_plus_eight(l1, pic_offset_table_rtx)); |
| emit_label (l1); |
| |
| seq = gen_sequence (); |
| end_sequence (); |
| emit_insn_after (seq, get_insns ()); |
| |
| /* Need to emit this whether or not we obey regdecls, |
| since setjmp/longjmp can cause life info to screw up. */ |
| emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx)); |
| #endif /* AOF_ASSEMBLER */ |
| } |
| |
| #define REG_OR_SUBREG_REG(X) \ |
| (GET_CODE (X) == REG \ |
| || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG)) |
| |
| #define REG_OR_SUBREG_RTX(X) \ |
| (GET_CODE (X) == REG ? (X) : SUBREG_REG (X)) |
| |
| #define ARM_FRAME_RTX(X) \ |
| ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \ |
| || (X) == arg_pointer_rtx) |
| |
| int |
| arm_rtx_costs (x, code, outer_code) |
| rtx x; |
| enum rtx_code code, outer_code; |
| { |
| enum machine_mode mode = GET_MODE (x); |
| enum rtx_code subcode; |
| int extra_cost; |
| |
| switch (code) |
| { |
| case MEM: |
| /* Memory costs quite a lot for the first word, but subsequent words |
| load at the equivalent of a single insn each. */ |
| return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD) |
| + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0)); |
| |
| case DIV: |
| case MOD: |
| return 100; |
| |
| case ROTATE: |
| if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG) |
| return 4; |
| /* Fall through */ |
| case ROTATERT: |
| if (mode != SImode) |
| return 8; |
| /* Fall through */ |
| case ASHIFT: case LSHIFTRT: case ASHIFTRT: |
| if (mode == DImode) |
| return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8) |
| + ((GET_CODE (XEXP (x, 0)) == REG |
| || (GET_CODE (XEXP (x, 0)) == SUBREG |
| && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) |
| ? 0 : 8)); |
| return (1 + ((GET_CODE (XEXP (x, 0)) == REG |
| || (GET_CODE (XEXP (x, 0)) == SUBREG |
| && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) |
| ? 0 : 4) |
| + ((GET_CODE (XEXP (x, 1)) == REG |
| || (GET_CODE (XEXP (x, 1)) == SUBREG |
| && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG) |
| || (GET_CODE (XEXP (x, 1)) == CONST_INT)) |
| ? 0 : 4)); |
| |
| case MINUS: |
| if (mode == DImode) |
| return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8) |
| + ((REG_OR_SUBREG_REG (XEXP (x, 0)) |
| || (GET_CODE (XEXP (x, 0)) == CONST_INT |
| && const_ok_for_arm (INTVAL (XEXP (x, 0))))) |
| ? 0 : 8)); |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1)) |
| || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE |
| && const_double_rtx_ok_for_fpu (XEXP (x, 1)))) |
| ? 0 : 8) |
| + ((REG_OR_SUBREG_REG (XEXP (x, 0)) |
| || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE |
| && const_double_rtx_ok_for_fpu (XEXP (x, 0)))) |
| ? 0 : 8)); |
| |
| if (((GET_CODE (XEXP (x, 0)) == CONST_INT |
| && const_ok_for_arm (INTVAL (XEXP (x, 0))) |
| && REG_OR_SUBREG_REG (XEXP (x, 1)))) |
| || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT |
| || subcode == ASHIFTRT || subcode == LSHIFTRT |
| || subcode == ROTATE || subcode == ROTATERT |
| || (subcode == MULT |
| && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT |
| && ((INTVAL (XEXP (XEXP (x, 1), 1)) & |
| (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0))) |
| && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0)) |
| && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1)) |
| || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT) |
| && REG_OR_SUBREG_REG (XEXP (x, 0)))) |
| return 1; |
| /* Fall through */ |
| |
| case PLUS: |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) |
| + ((REG_OR_SUBREG_REG (XEXP (x, 1)) |
| || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE |
| && const_double_rtx_ok_for_fpu (XEXP (x, 1)))) |
| ? 0 : 8)); |
| |
| /* Fall through */ |
| case AND: case XOR: case IOR: |
| extra_cost = 0; |
| |
| /* Normally the frame registers will be spilt into reg+const during |
| reload, so it is a bad idea to combine them with other instructions, |
| since then they might not be moved outside of loops. As a compromise |
| we allow integration with ops that have a constant as their second |
| operand. */ |
| if ((REG_OR_SUBREG_REG (XEXP (x, 0)) |
| && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0))) |
| && GET_CODE (XEXP (x, 1)) != CONST_INT) |
| || (REG_OR_SUBREG_REG (XEXP (x, 0)) |
| && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0))))) |
| extra_cost = 4; |
| |
| if (mode == DImode) |
| return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) |
| + ((REG_OR_SUBREG_REG (XEXP (x, 1)) |
| || (GET_CODE (XEXP (x, 1)) == CONST_INT |
| && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode))) |
| ? 0 : 8)); |
| |
| if (REG_OR_SUBREG_REG (XEXP (x, 0))) |
| return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost) |
| + ((REG_OR_SUBREG_REG (XEXP (x, 1)) |
| || (GET_CODE (XEXP (x, 1)) == CONST_INT |
| && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode))) |
| ? 0 : 4)); |
| |
| else if (REG_OR_SUBREG_REG (XEXP (x, 1))) |
| return (1 + extra_cost |
| + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT |
| || subcode == LSHIFTRT || subcode == ASHIFTRT |
| || subcode == ROTATE || subcode == ROTATERT |
| || (subcode == MULT |
| && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT |
| && ((INTVAL (XEXP (XEXP (x, 0), 1)) & |
| (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)) |
| && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0))) |
| && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1))) |
| || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))) |
| ? 0 : 4)); |
| |
| return 8; |
| |
| case MULT: |
| /* There is no point basing this on the tuning, since it is always the |
| fast variant if it exists at all */ |
| if (arm_fast_multiply && mode == DImode |
| && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1))) |
| && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND |
| || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) |
| return 8; |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT |
| || mode == DImode) |
| return 30; |
| |
| if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| { |
| unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1)) |
| & (unsigned HOST_WIDE_INT) 0xffffffff); |
| int add_cost = const_ok_for_arm (i) ? 4 : 8; |
| int j; |
| /* Tune as appropriate */ |
| int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2); |
| |
| for (j = 0; i && j < 32; j += booth_unit_size) |
| { |
| i >>= booth_unit_size; |
| add_cost += 2; |
| } |
| |
| return add_cost; |
| } |
| |
| return (((tune_flags & FL_FAST_MULT) ? 8 : 30) |
| + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4) |
| + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4)); |
| |
| case TRUNCATE: |
| if (arm_fast_multiply && mode == SImode |
| && GET_CODE (XEXP (x, 0)) == LSHIFTRT |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT |
| && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) |
| == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))) |
| && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND |
| || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND)) |
| return 8; |
| return 99; |
| |
| case NEG: |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6); |
| /* Fall through */ |
| case NOT: |
| if (mode == DImode) |
| return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); |
| |
| return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); |
| |
| case IF_THEN_ELSE: |
| if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) |
| return 14; |
| return 2; |
| |
| case COMPARE: |
| return 1; |
| |
| case ABS: |
| return 4 + (mode == DImode ? 4 : 0); |
| |
| case SIGN_EXTEND: |
| if (GET_MODE (XEXP (x, 0)) == QImode) |
| return (4 + (mode == DImode ? 4 : 0) |
| + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); |
| /* Fall through */ |
| case ZERO_EXTEND: |
| switch (GET_MODE (XEXP (x, 0))) |
| { |
| case QImode: |
| return (1 + (mode == DImode ? 4 : 0) |
| + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); |
| |
| case HImode: |
| return (4 + (mode == DImode ? 4 : 0) |
| + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); |
| |
| case SImode: |
| return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); |
| } |
| abort (); |
| |
| default: |
| return 99; |
| } |
| } |
| |
| int |
| arm_adjust_cost (insn, link, dep, cost) |
| rtx insn; |
| rtx link; |
| rtx dep; |
| int cost; |
| { |
| rtx i_pat, d_pat; |
| |
| if ((i_pat = single_set (insn)) != NULL |
| && GET_CODE (SET_SRC (i_pat)) == MEM |
| && (d_pat = single_set (dep)) != NULL |
| && GET_CODE (SET_DEST (d_pat)) == MEM) |
| { |
| /* This is a load after a store, there is no conflict if the load reads |
| from a cached area. Assume that loads from the stack, and from the |
| constant pool are cached, and that others will miss. This is a |
| hack. */ |
| |
| /* debug_rtx (insn); |
| debug_rtx (dep); |
| debug_rtx (link); |
| fprintf (stderr, "costs %d\n", cost); */ |
| |
| if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0)) |
| || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0)) |
| || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0)) |
| || reg_mentioned_p (hard_frame_pointer_rtx, |
| XEXP (SET_SRC (i_pat), 0))) |
| { |
| /* fprintf (stderr, "***** Now 1\n"); */ |
| return 1; |
| } |
| } |
| |
| return cost; |
| } |
| |
| /* This code has been fixed for cross compilation. */ |
| |
| static int fpa_consts_inited = 0; |
| |
| char *strings_fpa[8] = { |
| "0", "1", "2", "3", |
| "4", "5", "0.5", "10" |
| }; |
| |
| static REAL_VALUE_TYPE values_fpa[8]; |
| |
| static void |
| init_fpa_table () |
| { |
| int i; |
| REAL_VALUE_TYPE r; |
| |
| for (i = 0; i < 8; i++) |
| { |
| r = REAL_VALUE_ATOF (strings_fpa[i], DFmode); |
| values_fpa[i] = r; |
| } |
| |
| fpa_consts_inited = 1; |
| } |
| |
| /* Return TRUE if rtx X is a valid immediate FPU constant. */ |
| |
| int |
| const_double_rtx_ok_for_fpu (x) |
| rtx x; |
| { |
| REAL_VALUE_TYPE r; |
| int i; |
| |
| if (!fpa_consts_inited) |
| init_fpa_table (); |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (r, x); |
| if (REAL_VALUE_MINUS_ZERO (r)) |
| return 0; |
| |
| for (i = 0; i < 8; i++) |
| if (REAL_VALUES_EQUAL (r, values_fpa[i])) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* Return TRUE if rtx X is a valid immediate FPU constant. */ |
| |
| int |
| neg_const_double_rtx_ok_for_fpu (x) |
| rtx x; |
| { |
| REAL_VALUE_TYPE r; |
| int i; |
| |
| if (!fpa_consts_inited) |
| init_fpa_table (); |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (r, x); |
| r = REAL_VALUE_NEGATE (r); |
| if (REAL_VALUE_MINUS_ZERO (r)) |
| return 0; |
| |
| for (i = 0; i < 8; i++) |
| if (REAL_VALUES_EQUAL (r, values_fpa[i])) |
| return 1; |
| |
| return 0; |
| } |
| |
| /* Predicates for `match_operand' and `match_operator'. */ |
| |
| /* s_register_operand is the same as register_operand, but it doesn't accept |
| (SUBREG (MEM)...). |
| |
| This function exists because at the time it was put in it led to better |
| code. SUBREG(MEM) always needs a reload in the places where |
| s_register_operand is used, and this seemed to lead to excessive |
| reloading. */ |
| |
| int |
| s_register_operand (op, mode) |
| register rtx op; |
| enum machine_mode mode; |
| { |
| if (GET_MODE (op) != mode && mode != VOIDmode) |
| return 0; |
| |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| |
| /* We don't consider registers whose class is NO_REGS |
| to be a register operand. */ |
| return (GET_CODE (op) == REG |
| && (REGNO (op) >= FIRST_PSEUDO_REGISTER |
| || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); |
| } |
| |
| /* Only accept reg, subreg(reg), const_int. */ |
| |
| int |
| reg_or_int_operand (op, mode) |
| register rtx op; |
| enum machine_mode mode; |
| { |
| if (GET_CODE (op) == CONST_INT) |
| return 1; |
| |
| if (GET_MODE (op) != mode && mode != VOIDmode) |
| return 0; |
| |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| |
| /* We don't consider registers whose class is NO_REGS |
| to be a register operand. */ |
| return (GET_CODE (op) == REG |
| && (REGNO (op) >= FIRST_PSEUDO_REGISTER |
| || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); |
| } |
| |
| /* Return 1 if OP is an item in memory, given that we are in reload. */ |
| |
| int |
| reload_memory_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| int regno = true_regnum (op); |
| |
| return (! CONSTANT_P (op) |
| && (regno == -1 |
| || (GET_CODE (op) == REG |
| && REGNO (op) >= FIRST_PSEUDO_REGISTER))); |
| } |
| |
| /* Return TRUE for valid operands for the rhs of an ARM instruction. */ |
| |
| int |
| arm_rhs_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (s_register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))); |
| } |
| |
| /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load. |
| */ |
| |
| int |
| arm_rhsm_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (s_register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))) |
| || memory_operand (op, mode)); |
| } |
| |
| /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a |
| constant that is valid when negated. */ |
| |
| int |
| arm_add_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (s_register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT |
| && (const_ok_for_arm (INTVAL (op)) |
| || const_ok_for_arm (-INTVAL (op))))); |
| } |
| |
| int |
| arm_not_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (s_register_operand (op, mode) |
| || (GET_CODE (op) == CONST_INT |
| && (const_ok_for_arm (INTVAL (op)) |
| || const_ok_for_arm (~INTVAL (op))))); |
| } |
| |
| /* Return TRUE if the operand is a memory reference which contains an |
| offsettable address. */ |
| int |
| offsettable_memory_operand (op, mode) |
| register rtx op; |
| enum machine_mode mode; |
| { |
| if (mode == VOIDmode) |
| mode = GET_MODE (op); |
| |
| return (mode == GET_MODE (op) |
| && GET_CODE (op) == MEM |
| && offsettable_address_p (reload_completed | reload_in_progress, |
| mode, XEXP (op, 0))); |
| } |
| |
| /* Return TRUE if the operand is a memory reference which is, or can be |
| made word aligned by adjusting the offset. */ |
| int |
| alignable_memory_operand (op, mode) |
| register rtx op; |
| enum machine_mode mode; |
| { |
| rtx reg; |
| |
| if (mode == VOIDmode) |
| mode = GET_MODE (op); |
| |
| if (mode != GET_MODE (op) || GET_CODE (op) != MEM) |
| return 0; |
| |
| op = XEXP (op, 0); |
| |
| return ((GET_CODE (reg = op) == REG |
| || (GET_CODE (op) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (op)) == REG) |
| || (GET_CODE (op) == PLUS |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && (GET_CODE (reg = XEXP (op, 0)) == REG |
| || (GET_CODE (XEXP (op, 0)) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG)))) |
| && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4); |
| } |
| |
| /* Similar to s_register_operand, but does not allow hard integer |
| registers. */ |
| int |
| f_register_operand (op, mode) |
| register rtx op; |
| enum machine_mode mode; |
| { |
| if (GET_MODE (op) != mode && mode != VOIDmode) |
| return 0; |
| |
| if (GET_CODE (op) == SUBREG) |
| op = SUBREG_REG (op); |
| |
| /* We don't consider registers whose class is NO_REGS |
| to be a register operand. */ |
| return (GET_CODE (op) == REG |
| && (REGNO (op) >= FIRST_PSEUDO_REGISTER |
| || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS)); |
| } |
| |
| /* Return TRUE for valid operands for the rhs of an FPU instruction. */ |
| |
| int |
| fpu_rhs_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (s_register_operand (op, mode)) |
| return TRUE; |
| else if (GET_CODE (op) == CONST_DOUBLE) |
| return (const_double_rtx_ok_for_fpu (op)); |
| |
| return FALSE; |
| } |
| |
| int |
| fpu_add_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (s_register_operand (op, mode)) |
| return TRUE; |
| else if (GET_CODE (op) == CONST_DOUBLE) |
| return (const_double_rtx_ok_for_fpu (op) |
| || neg_const_double_rtx_ok_for_fpu (op)); |
| |
| return FALSE; |
| } |
| |
| /* Return nonzero if OP is a constant power of two. */ |
| |
| int |
| power_of_two_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (GET_CODE (op) == CONST_INT) |
| { |
| HOST_WIDE_INT value = INTVAL(op); |
| return value != 0 && (value & (value - 1)) == 0; |
| } |
| return FALSE; |
| } |
| |
| /* Return TRUE for a valid operand of a DImode operation. |
| Either: REG, CONST_DOUBLE or MEM(DImode_address). |
| Note that this disallows MEM(REG+REG), but allows |
| MEM(PRE/POST_INC/DEC(REG)). */ |
| |
| int |
| di_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (s_register_operand (op, mode)) |
| return TRUE; |
| |
| switch (GET_CODE (op)) |
| { |
| case CONST_DOUBLE: |
| case CONST_INT: |
| return TRUE; |
| |
| case MEM: |
| return memory_address_p (DImode, XEXP (op, 0)); |
| |
| default: |
| return FALSE; |
| } |
| } |
| |
| /* Return TRUE for a valid operand of a DFmode operation when -msoft-float. |
| Either: REG, CONST_DOUBLE or MEM(DImode_address). |
| Note that this disallows MEM(REG+REG), but allows |
| MEM(PRE/POST_INC/DEC(REG)). */ |
| |
| int |
| soft_df_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (s_register_operand (op, mode)) |
| return TRUE; |
| |
| switch (GET_CODE (op)) |
| { |
| case CONST_DOUBLE: |
| return TRUE; |
| |
| case MEM: |
| return memory_address_p (DFmode, XEXP (op, 0)); |
| |
| default: |
| return FALSE; |
| } |
| } |
| |
| /* Return TRUE for valid index operands. */ |
| |
| int |
| index_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (s_register_operand(op, mode) |
| || (immediate_operand (op, mode) |
| && INTVAL (op) < 4096 && INTVAL (op) > -4096)); |
| } |
| |
| /* Return TRUE for valid shifts by a constant. This also accepts any |
| power of two on the (somewhat overly relaxed) assumption that the |
| shift operator in this case was a mult. */ |
| |
| int |
| const_shift_operand (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| return (power_of_two_operand (op, mode) |
| || (immediate_operand (op, mode) |
| && (INTVAL (op) < 32 && INTVAL (op) > 0))); |
| } |
| |
| /* Return TRUE for arithmetic operators which can be combined with a multiply |
| (shift). */ |
| |
| int |
| shiftable_operator (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| if (GET_MODE (x) != mode) |
| return FALSE; |
| else |
| { |
| enum rtx_code code = GET_CODE (x); |
| |
| return (code == PLUS || code == MINUS |
| || code == IOR || code == XOR || code == AND); |
| } |
| } |
| |
| /* Return TRUE for shift operators. */ |
| |
| int |
| shift_operator (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| if (GET_MODE (x) != mode) |
| return FALSE; |
| else |
| { |
| enum rtx_code code = GET_CODE (x); |
| |
| if (code == MULT) |
| return power_of_two_operand (XEXP (x, 1)); |
| |
| return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT |
| || code == ROTATERT); |
| } |
| } |
| |
| int equality_operator (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| return GET_CODE (x) == EQ || GET_CODE (x) == NE; |
| } |
| |
| /* Return TRUE for SMIN SMAX UMIN UMAX operators. */ |
| |
| int |
| minmax_operator (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| enum rtx_code code = GET_CODE (x); |
| |
| if (GET_MODE (x) != mode) |
| return FALSE; |
| |
| return code == SMIN || code == SMAX || code == UMIN || code == UMAX; |
| } |
| |
| /* return TRUE if x is EQ or NE */ |
| |
| /* Return TRUE if this is the condition code register, if we aren't given |
| a mode, accept any class CCmode register */ |
| |
| int |
| cc_register (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| if (mode == VOIDmode) |
| { |
| mode = GET_MODE (x); |
| if (GET_MODE_CLASS (mode) != MODE_CC) |
| return FALSE; |
| } |
| |
| if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24) |
| return TRUE; |
| |
| return FALSE; |
| } |
| |
| /* Return TRUE if this is the condition code register, if we aren't given |
| a mode, accept any class CCmode register which indicates a dominance |
| expression. */ |
| |
| int |
| dominant_cc_register (x, mode) |
| rtx x; |
| enum machine_mode mode; |
| { |
| if (mode == VOIDmode) |
| { |
| mode = GET_MODE (x); |
| if (GET_MODE_CLASS (mode) != MODE_CC) |
| return FALSE; |
| } |
| |
| if (mode != CC_DNEmode && mode != CC_DEQmode |
| && mode != CC_DLEmode && mode != CC_DLTmode |
| && mode != CC_DGEmode && mode != CC_DGTmode |
| && mode != CC_DLEUmode && mode != CC_DLTUmode |
| && mode != CC_DGEUmode && mode != CC_DGTUmode) |
| return FALSE; |
| |
| if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24) |
| return TRUE; |
| |
| return FALSE; |
| } |
| |
| /* Return TRUE if X references a SYMBOL_REF. */ |
| int |
| symbol_mentioned_p (x) |
| rtx x; |
| { |
| register char *fmt; |
| register int i; |
| |
| if (GET_CODE (x) == SYMBOL_REF) |
| return 1; |
| |
| fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'E') |
| { |
| register int j; |
| |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| if (symbol_mentioned_p (XVECEXP (x, i, j))) |
| return 1; |
| } |
| else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i))) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| /* Return TRUE if X references a LABEL_REF. */ |
| int |
| label_mentioned_p (x) |
| rtx x; |
| { |
| register char *fmt; |
| register int i; |
| |
| if (GET_CODE (x) == LABEL_REF) |
| return 1; |
| |
| fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'E') |
| { |
| register int j; |
| |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| if (label_mentioned_p (XVECEXP (x, i, j))) |
| return 1; |
| } |
| else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i))) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| enum rtx_code |
| minmax_code (x) |
| rtx x; |
| { |
| enum rtx_code code = GET_CODE (x); |
| |
| if (code == SMAX) |
| return GE; |
| else if (code == SMIN) |
| return LE; |
| else if (code == UMIN) |
| return LEU; |
| else if (code == UMAX) |
| return GEU; |
| |
| abort (); |
| } |
| |
| /* Return 1 if memory locations are adjacent */ |
| |
| int |
| adjacent_mem_locations (a, b) |
| rtx a, b; |
| { |
| int val0 = 0, val1 = 0; |
| int reg0, reg1; |
| |
| if ((GET_CODE (XEXP (a, 0)) == REG |
| || (GET_CODE (XEXP (a, 0)) == PLUS |
| && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT)) |
| && (GET_CODE (XEXP (b, 0)) == REG |
| || (GET_CODE (XEXP (b, 0)) == PLUS |
| && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT))) |
| { |
| if (GET_CODE (XEXP (a, 0)) == PLUS) |
| { |
| reg0 = REGNO (XEXP (XEXP (a, 0), 0)); |
| val0 = INTVAL (XEXP (XEXP (a, 0), 1)); |
| } |
| else |
| reg0 = REGNO (XEXP (a, 0)); |
| if (GET_CODE (XEXP (b, 0)) == PLUS) |
| { |
| reg1 = REGNO (XEXP (XEXP (b, 0), 0)); |
| val1 = INTVAL (XEXP (XEXP (b, 0), 1)); |
| } |
| else |
| reg1 = REGNO (XEXP (b, 0)); |
| return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4); |
| } |
| return 0; |
| } |
| |
| /* Return 1 if OP is a load multiple operation. It is known to be |
| parallel and the first section will be tested. */ |
| |
| int |
| load_multiple_operation (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| HOST_WIDE_INT count = XVECLEN (op, 0); |
| int dest_regno; |
| rtx src_addr; |
| HOST_WIDE_INT i = 1, base = 0; |
| rtx elt; |
| |
| if (count <= 1 |
| || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| return 0; |
| |
| /* Check to see if this might be a write-back */ |
| if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) |
| { |
| i++; |
| base = 1; |
| |
| /* Now check it more carefully */ |
| if (GET_CODE (SET_DEST (elt)) != REG |
| || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG |
| || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) |
| || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT |
| || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 |
| || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER |
| || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG |
| || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) |
| != REGNO (SET_DEST (elt))) |
| return 0; |
| |
| count--; |
| } |
| |
| /* Perform a quick check so we don't blow up below. */ |
| if (count <= i |
| || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG |
| || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM) |
| return 0; |
| |
| dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1))); |
| src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0); |
| |
| for (; i < count; i++) |
| { |
| rtx elt = XVECEXP (op, 0, i); |
| |
| if (GET_CODE (elt) != SET |
| || GET_CODE (SET_DEST (elt)) != REG |
| || GET_MODE (SET_DEST (elt)) != SImode |
| || REGNO (SET_DEST (elt)) != dest_regno + i - base |
| || GET_CODE (SET_SRC (elt)) != MEM |
| || GET_MODE (SET_SRC (elt)) != SImode |
| || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS |
| || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) |
| || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT |
| || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4) |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| /* Return 1 if OP is a store multiple operation. It is known to be |
| parallel and the first section will be tested. */ |
| |
| int |
| store_multiple_operation (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| HOST_WIDE_INT count = XVECLEN (op, 0); |
| int src_regno; |
| rtx dest_addr; |
| HOST_WIDE_INT i = 1, base = 0; |
| rtx elt; |
| |
| if (count <= 1 |
| || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| return 0; |
| |
| /* Check to see if this might be a write-back */ |
| if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) |
| { |
| i++; |
| base = 1; |
| |
| /* Now check it more carefully */ |
| if (GET_CODE (SET_DEST (elt)) != REG |
| || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG |
| || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) |
| || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT |
| || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 |
| || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER |
| || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG |
| || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) |
| != REGNO (SET_DEST (elt))) |
| return 0; |
| |
| count--; |
| } |
| |
| /* Perform a quick check so we don't blow up below. */ |
| if (count <= i |
| || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM |
| || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG) |
| return 0; |
| |
| src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1))); |
| dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0); |
| |
| for (; i < count; i++) |
| { |
| elt = XVECEXP (op, 0, i); |
| |
| if (GET_CODE (elt) != SET |
| || GET_CODE (SET_SRC (elt)) != REG |
| || GET_MODE (SET_SRC (elt)) != SImode |
| || REGNO (SET_SRC (elt)) != src_regno + i - base |
| || GET_CODE (SET_DEST (elt)) != MEM |
| || GET_MODE (SET_DEST (elt)) != SImode |
| || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS |
| || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) |
| || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT |
| || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4) |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| int |
| load_multiple_sequence (operands, nops, regs, base, load_offset) |
| rtx *operands; |
| int nops; |
| int *regs; |
| int *base; |
| HOST_WIDE_INT *load_offset; |
| { |
| int unsorted_regs[4]; |
| HOST_WIDE_INT unsorted_offsets[4]; |
| int order[4]; |
| int base_reg; |
| int i; |
| |
| /* Can only handle 2, 3, or 4 insns at present, though could be easily |
| extended if required. */ |
| if (nops < 2 || nops > 4) |
| abort (); |
| |
| /* Loop over the operands and check that the memory references are |
| suitable (ie immediate offsets from the same base register). At |
| the same time, extract the target register, and the memory |
| offsets. */ |
| for (i = 0; i < nops; i++) |
| { |
| rtx reg; |
| rtx offset; |
| |
| /* Convert a subreg of a mem into the mem itself. */ |
| if (GET_CODE (operands[nops + i]) == SUBREG) |
| operands[nops + i] = alter_subreg(operands[nops + i]); |
| |
| if (GET_CODE (operands[nops + i]) != MEM) |
| abort (); |
| |
| /* Don't reorder volatile memory references; it doesn't seem worth |
| looking for the case where the order is ok anyway. */ |
| if (MEM_VOLATILE_P (operands[nops + i])) |
| return 0; |
| |
| offset = const0_rtx; |
| |
| if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG |
| || (GET_CODE (reg) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (reg)) == REG)) |
| || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS |
| && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0)) |
| == REG) |
| || (GET_CODE (reg) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (reg)) == REG)) |
| && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1)) |
| == CONST_INT))) |
| { |
| if (i == 0) |
| { |
| base_reg = REGNO(reg); |
| unsorted_regs[0] = (GET_CODE (operands[i]) == REG |
| ? REGNO (operands[i]) |
| : REGNO (SUBREG_REG (operands[i]))); |
| order[0] = 0; |
| } |
| else |
| { |
| if (base_reg != REGNO (reg)) |
| /* Not addressed from the same base register. */ |
| return 0; |
| |
| unsorted_regs[i] = (GET_CODE (operands[i]) == REG |
| ? REGNO (operands[i]) |
| : REGNO (SUBREG_REG (operands[i]))); |
| if (unsorted_regs[i] < unsorted_regs[order[0]]) |
| order[0] = i; |
| } |
| |
| /* If it isn't an integer register, or if it overwrites the |
| base register but isn't the last insn in the list, then |
| we can't do this. */ |
| if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14 |
| || (i != nops - 1 && unsorted_regs[i] == base_reg)) |
| return 0; |
| |
| unsorted_offsets[i] = INTVAL (offset); |
| } |
| else |
| /* Not a suitable memory address. */ |
| return 0; |
| } |
| |
| /* All the useful information has now been extracted from the |
| operands into unsorted_regs and unsorted_offsets; additionally, |
| order[0] has been set to the lowest numbered register in the |
| list. Sort the registers into order, and check that the memory |
| offsets are ascending and adjacent. */ |
| |
| for (i = 1; i < nops; i++) |
| { |
| int j; |
| |
| order[i] = order[i - 1]; |
| for (j = 0; j < nops; j++) |
| if (unsorted_regs[j] > unsorted_regs[order[i - 1]] |
| && (order[i] == order[i - 1] |
| || unsorted_regs[j] < unsorted_regs[order[i]])) |
| order[i] = j; |
| |
| /* Have we found a suitable register? if not, one must be used more |
| than once. */ |
| if (order[i] == order[i - 1]) |
| return 0; |
| |
| /* Is the memory address adjacent and ascending? */ |
| if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4) |
| return 0; |
| } |
| |
| if (base) |
| { |
| *base = base_reg; |
| |
| for (i = 0; i < nops; i++) |
| regs[i] = unsorted_regs[order[i]]; |
| |
| *load_offset = unsorted_offsets[order[0]]; |
| } |
| |
| if (unsorted_offsets[order[0]] == 0) |
| return 1; /* ldmia */ |
| |
| if (unsorted_offsets[order[0]] == 4) |
| return 2; /* ldmib */ |
| |
| if (unsorted_offsets[order[nops - 1]] == 0) |
| return 3; /* ldmda */ |
| |
| if (unsorted_offsets[order[nops - 1]] == -4) |
| return 4; /* ldmdb */ |
| |
| /* Can't do it without setting up the offset, only do this if it takes |
| no more than one insn. */ |
| return (const_ok_for_arm (unsorted_offsets[order[0]]) |
| || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0; |
| } |
| |
| char * |
| emit_ldm_seq (operands, nops) |
| rtx *operands; |
| int nops; |
| { |
| int regs[4]; |
| int base_reg; |
| HOST_WIDE_INT offset; |
| char buf[100]; |
| int i; |
| |
| switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset)) |
| { |
| case 1: |
| strcpy (buf, "ldm%?ia\t"); |
| break; |
| |
| case 2: |
| strcpy (buf, "ldm%?ib\t"); |
| break; |
| |
| case 3: |
| strcpy (buf, "ldm%?da\t"); |
| break; |
| |
| case 4: |
| strcpy (buf, "ldm%?db\t"); |
| break; |
| |
| case 5: |
| if (offset >= 0) |
| sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX, |
| reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg], |
| (long) offset); |
| else |
| sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX, |
| reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg], |
| (long) -offset); |
| output_asm_insn (buf, operands); |
| base_reg = regs[0]; |
| strcpy (buf, "ldm%?ia\t"); |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX, |
| reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]); |
| |
| for (i = 1; i < nops; i++) |
| sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX, |
| reg_names[regs[i]]); |
| |
| strcat (buf, "}\t%@ phole ldm"); |
| |
| output_asm_insn (buf, operands); |
| return ""; |
| } |
| |
| int |
| store_multiple_sequence (operands, nops, regs, base, load_offset) |
| rtx *operands; |
| int nops; |
| int *regs; |
| int *base; |
| HOST_WIDE_INT *load_offset; |
| { |
| int unsorted_regs[4]; |
| HOST_WIDE_INT unsorted_offsets[4]; |
| int order[4]; |
| int base_reg; |
| int i; |
| |
| /* Can only handle 2, 3, or 4 insns at present, though could be easily |
| extended if required. */ |
| if (nops < 2 || nops > 4) |
| abort (); |
| |
| /* Loop over the operands and check that the memory references are |
| suitable (ie immediate offsets from the same base register). At |
| the same time, extract the target register, and the memory |
| offsets. */ |
| for (i = 0; i < nops; i++) |
| { |
| rtx reg; |
| rtx offset; |
| |
| /* Convert a subreg of a mem into the mem itself. */ |
| if (GET_CODE (operands[nops + i]) == SUBREG) |
| operands[nops + i] = alter_subreg(operands[nops + i]); |
| |
| if (GET_CODE (operands[nops + i]) != MEM) |
| abort (); |
| |
| /* Don't reorder volatile memory references; it doesn't seem worth |
| looking for the case where the order is ok anyway. */ |
| if (MEM_VOLATILE_P (operands[nops + i])) |
| return 0; |
| |
| offset = const0_rtx; |
| |
| if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG |
| || (GET_CODE (reg) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (reg)) == REG)) |
| || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS |
| && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0)) |
| == REG) |
| || (GET_CODE (reg) == SUBREG |
| && GET_CODE (reg = SUBREG_REG (reg)) == REG)) |
| && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1)) |
| == CONST_INT))) |
| { |
| if (i == 0) |
| { |
| base_reg = REGNO(reg); |
| unsorted_regs[0] = (GET_CODE (operands[i]) == REG |
| ? REGNO (operands[i]) |
| : REGNO (SUBREG_REG (operands[i]))); |
| order[0] = 0; |
| } |
| else |
| { |
| if (base_reg != REGNO (reg)) |
| /* Not addressed from the same base register. */ |
| return 0; |
| |
| unsorted_regs[i] = (GET_CODE (operands[i]) == REG |
| ? REGNO (operands[i]) |
| : REGNO (SUBREG_REG (operands[i]))); |
| if (unsorted_regs[i] < unsorted_regs[order[0]]) |
| order[0] = i; |
| } |
| |
| /* If it isn't an integer register, then we can't do this. */ |
| if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14) |
| return 0; |
| |
| unsorted_offsets[i] = INTVAL (offset); |
| } |
| else |
| /* Not a suitable memory address. */ |
| return 0; |
| } |
| |
| /* All the useful information has now been extracted from the |
| operands into unsorted_regs and unsorted_offsets; additionally, |
| order[0] has been set to the lowest numbered register in the |
| list. Sort the registers into order, and check that the memory |
| offsets are ascending and adjacent. */ |
| |
| for (i = 1; i < nops; i++) |
| { |
| int j; |
| |
| order[i] = order[i - 1]; |
| for (j = 0; j < nops; j++) |
| if (unsorted_regs[j] > unsorted_regs[order[i - 1]] |
| && (order[i] == order[i - 1] |
| || unsorted_regs[j] < unsorted_regs[order[i]])) |
| order[i] = j; |
| |
| /* Have we found a suitable register? if not, one must be used more |
| than once. */ |
| if (order[i] == order[i - 1]) |
| return 0; |
| |
| /* Is the memory address adjacent and ascending? */ |
| if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4) |
| return 0; |
| } |
| |
| if (base) |
| { |
| *base = base_reg; |
| |
| for (i = 0; i < nops; i++) |
| regs[i] = unsorted_regs[order[i]]; |
| |
| *load_offset = unsorted_offsets[order[0]]; |
| } |
| |
| if (unsorted_offsets[order[0]] == 0) |
| return 1; /* stmia */ |
| |
| if (unsorted_offsets[order[0]] == 4) |
| return 2; /* stmib */ |
| |
| if (unsorted_offsets[order[nops - 1]] == 0) |
| return 3; /* stmda */ |
| |
| if (unsorted_offsets[order[nops - 1]] == -4) |
| return 4; /* stmdb */ |
| |
| return 0; |
| } |
| |
| char * |
| emit_stm_seq (operands, nops) |
| rtx *operands; |
| int nops; |
| { |
| int regs[4]; |
| int base_reg; |
| HOST_WIDE_INT offset; |
| char buf[100]; |
| int i; |
| |
| switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset)) |
| { |
| case 1: |
| strcpy (buf, "stm%?ia\t"); |
| break; |
| |
| case 2: |
| strcpy (buf, "stm%?ib\t"); |
| break; |
| |
| case 3: |
| strcpy (buf, "stm%?da\t"); |
| break; |
| |
| case 4: |
| strcpy (buf, "stm%?db\t"); |
| break; |
| |
| default: |
| abort (); |
| } |
| |
| sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX, |
| reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]); |
| |
| for (i = 1; i < nops; i++) |
| sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX, |
| reg_names[regs[i]]); |
| |
| strcat (buf, "}\t%@ phole stm"); |
| |
| output_asm_insn (buf, operands); |
| return ""; |
| } |
| |
| int |
| multi_register_push (op, mode) |
| rtx op; |
| enum machine_mode mode; |
| { |
| if (GET_CODE (op) != PARALLEL |
| || (GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC) |
| || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2)) |
| return 0; |
| |
| return 1; |
| } |
| |
| |
| /* Routines for use with attributes */ |
| |
| /* Return nonzero if ATTR is a valid attribute for DECL. |
| ATTRIBUTES are any existing attributes and ARGS are the arguments |
| supplied with ATTR. |
| |
| Supported attributes: |
| |
| naked: don't output any prologue or epilogue code, the user is assumed |
| to do the right thing. */ |
| |
| int |
| arm_valid_machine_decl_attribute (decl, attributes, attr, args) |
| tree decl; |
| tree attributes; |
| tree attr; |
| tree args; |
| { |
| if (args != NULL_TREE) |
| return 0; |
| |
| if (is_attribute_p ("naked", attr)) |
| return TREE_CODE (decl) == FUNCTION_DECL; |
| return 0; |
| } |
| |
| /* Return non-zero if FUNC is a naked function. */ |
| |
| static int |
| arm_naked_function_p (func) |
| tree func; |
| { |
| tree a; |
| |
| if (TREE_CODE (func) != FUNCTION_DECL) |
| abort (); |
| |
| a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func)); |
| return a != NULL_TREE; |
| } |
| |
| /* Routines for use in generating RTL */ |
| |
| rtx |
| arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p, |
| in_struct_p) |
| int base_regno; |
| int count; |
| rtx from; |
| int up; |
| int write_back; |
| int unchanging_p; |
| int in_struct_p; |
| { |
| int i = 0, j; |
| rtx result; |
| int sign = up ? 1 : -1; |
| rtx mem; |
| |
| result = gen_rtx (PARALLEL, VOIDmode, |
| rtvec_alloc (count + (write_back ? 2 : 0))); |
| if (write_back) |
| { |
| XVECEXP (result, 0, 0) |
| = gen_rtx (SET, GET_MODE (from), from, |
| plus_constant (from, count * 4 * sign)); |
| i = 1; |
| count++; |
| } |
| |
| for (j = 0; i < count; i++, j++) |
| { |
| mem = gen_rtx (MEM, SImode, plus_constant (from, j * 4 * sign)); |
| RTX_UNCHANGING_P (mem) = unchanging_p; |
| MEM_IN_STRUCT_P (mem) = in_struct_p; |
| |
| XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, |
| gen_rtx (REG, SImode, base_regno + j), |
| mem); |
| } |
| |
| if (write_back) |
| XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from); |
| |
| return result; |
| } |
| |
| rtx |
| arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p, |
| in_struct_p) |
| int base_regno; |
| int count; |
| rtx to; |
| int up; |
| int write_back; |
| int unchanging_p; |
| int in_struct_p; |
| { |
| int i = 0, j; |
| rtx result; |
| int sign = up ? 1 : -1; |
| rtx mem; |
| |
| result = gen_rtx (PARALLEL, VOIDmode, |
| rtvec_alloc (count + (write_back ? 2 : 0))); |
| if (write_back) |
| { |
| XVECEXP (result, 0, 0) |
| = gen_rtx (SET, GET_MODE (to), to, |
| plus_constant (to, count * 4 * sign)); |
| i = 1; |
| count++; |
| } |
| |
| for (j = 0; i < count; i++, j++) |
| { |
| mem = gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign)); |
| RTX_UNCHANGING_P (mem) = unchanging_p; |
| MEM_IN_STRUCT_P (mem) = in_struct_p; |
| |
| XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, mem, |
| gen_rtx (REG, SImode, base_regno + j)); |
| } |
| |
| if (write_back) |
| XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to); |
| |
| return result; |
| } |
| |
| int |
| arm_gen_movstrqi (operands) |
| rtx *operands; |
| { |
| HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes; |
| int i, r; |
| rtx src, dst; |
| rtx st_src, st_dst, end_src, end_dst, fin_src, fin_dst; |
| rtx part_bytes_reg = NULL; |
| rtx mem; |
| int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p; |
| extern int optimize; |
| |
| if (GET_CODE (operands[2]) != CONST_INT |
| || GET_CODE (operands[3]) != CONST_INT |
| || INTVAL (operands[2]) > 64 |
| || INTVAL (operands[3]) & 3) |
| return 0; |
| |
| st_dst = XEXP (operands[0], 0); |
| st_src = XEXP (operands[1], 0); |
| |
| dst_unchanging_p = RTX_UNCHANGING_P (operands[0]); |
| dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]); |
| src_unchanging_p = RTX_UNCHANGING_P (operands[1]); |
| src_in_struct_p = MEM_IN_STRUCT_P (operands[1]); |
| |
| fin_dst = dst = copy_to_mode_reg (SImode, st_dst); |
| fin_src = src = copy_to_mode_reg (SImode, st_src); |
| |
| in_words_to_go = (INTVAL (operands[2]) + 3) / 4; |
| out_words_to_go = INTVAL (operands[2]) / 4; |
| last_bytes = INTVAL (operands[2]) & 3; |
| |
| if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0) |
| part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3); |
| |
| for (i = 0; in_words_to_go >= 2; i+=4) |
| { |
| if (in_words_to_go > 4) |
| emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE, |
| src_unchanging_p, src_in_struct_p)); |
| else |
| emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE, |
| FALSE, src_unchanging_p, |
| src_in_struct_p)); |
| |
| if (out_words_to_go) |
| { |
| if (out_words_to_go > 4) |
| emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE, |
| dst_unchanging_p, |
| dst_in_struct_p)); |
| else if (out_words_to_go != 1) |
| emit_insn (arm_gen_store_multiple (0, out_words_to_go, |
| dst, TRUE, |
| (last_bytes == 0 |
| ? FALSE : TRUE), |
| dst_unchanging_p, |
| dst_in_struct_p)); |
| else |
| { |
| mem = gen_rtx (MEM, SImode, dst); |
| RTX_UNCHANGING_P (mem) = dst_unchanging_p; |
| MEM_IN_STRUCT_P (mem) = dst_in_struct_p; |
| |