| /* Convert tree expression to rtl instructions, for GNU compiler. |
| Copyright (C) 1988, 92, 93, 94, 95, 96, 1997 Free Software Foundation, Inc. |
| |
| This file is part of GNU CC. |
| |
| GNU CC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 2, or (at your option) |
| any later version. |
| |
| GNU CC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GNU CC; see the file COPYING. If not, write to |
| the Free Software Foundation, 59 Temple Place - Suite 330, |
| Boston, MA 02111-1307, USA. */ |
| |
| |
| #include "config.h" |
| #include "machmode.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "obstack.h" |
| #include "flags.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "except.h" |
| #include "function.h" |
| #include "insn-flags.h" |
| #include "insn-codes.h" |
| #include "expr.h" |
| #include "insn-config.h" |
| #include "recog.h" |
| #include "output.h" |
| #include "typeclass.h" |
| |
| #include "bytecode.h" |
| #include "bc-opcode.h" |
| #include "bc-typecd.h" |
| #include "bc-optab.h" |
| #include "bc-emit.h" |
| |
| |
| #define CEIL(x,y) (((x) + (y) - 1) / (y)) |
| |
| /* Decide whether a function's arguments should be processed |
| from first to last or from last to first. |
| |
| They should if the stack and args grow in opposite directions, but |
| only if we have push insns. */ |
| |
| #ifdef PUSH_ROUNDING |
| |
| #if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD) |
| #define PUSH_ARGS_REVERSED /* If it's last to first */ |
| #endif |
| |
| #endif |
| |
| #ifndef STACK_PUSH_CODE |
| #ifdef STACK_GROWS_DOWNWARD |
| #define STACK_PUSH_CODE PRE_DEC |
| #else |
| #define STACK_PUSH_CODE PRE_INC |
| #endif |
| #endif |
| |
| /* Like STACK_BOUNDARY but in units of bytes, not bits. */ |
| #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT) |
| |
| /* If this is nonzero, we do not bother generating VOLATILE |
| around volatile memory references, and we are willing to |
| output indirect addresses. If cse is to follow, we reject |
| indirect addresses so a useful potential cse is generated; |
| if it is used only once, instruction combination will produce |
| the same indirect address eventually. */ |
| int cse_not_expected; |
| |
| /* Nonzero to generate code for all the subroutines within an |
| expression before generating the upper levels of the expression. |
| Nowadays this is never zero. */ |
| int do_preexpand_calls = 1; |
| |
| /* Number of units that we should eventually pop off the stack. |
| These are the arguments to function calls that have already returned. */ |
| int pending_stack_adjust; |
| |
| /* Nonzero means stack pops must not be deferred, and deferred stack |
| pops must not be output. It is nonzero inside a function call, |
| inside a conditional expression, inside a statement expression, |
| and in other cases as well. */ |
| int inhibit_defer_pop; |
| |
| /* When temporaries are created by TARGET_EXPRs, they are created at |
| this level of temp_slot_level, so that they can remain allocated |
| until no longer needed. CLEANUP_POINT_EXPRs define the lifetime |
| of TARGET_EXPRs. */ |
| int target_temp_slot_level; |
| |
| /* Nonzero means __builtin_saveregs has already been done in this function. |
| The value is the pseudoreg containing the value __builtin_saveregs |
| returned. */ |
| static rtx saveregs_value; |
| |
| /* Similarly for __builtin_apply_args. */ |
| static rtx apply_args_value; |
| |
| /* This structure is used by move_by_pieces to describe the move to |
| be performed. */ |
| |
| struct move_by_pieces |
| { |
| rtx to; |
| rtx to_addr; |
| int autinc_to; |
| int explicit_inc_to; |
| int to_struct; |
| rtx from; |
| rtx from_addr; |
| int autinc_from; |
| int explicit_inc_from; |
| int from_struct; |
| int len; |
| int offset; |
| int reverse; |
| }; |
| |
| /* This structure is used by clear_by_pieces to describe the clear to |
| be performed. */ |
| |
| struct clear_by_pieces |
| { |
| rtx to; |
| rtx to_addr; |
| int autinc_to; |
| int explicit_inc_to; |
| int to_struct; |
| int len; |
| int offset; |
| int reverse; |
| }; |
| |
| /* Used to generate bytecodes: keep track of size of local variables, |
| as well as depth of arithmetic stack. (Notice that variables are |
| stored on the machine's stack, not the arithmetic stack.) */ |
| |
| static rtx get_push_address PROTO ((int)); |
| extern int local_vars_size; |
| extern int stack_depth; |
| extern int max_stack_depth; |
| extern struct obstack permanent_obstack; |
| extern rtx arg_pointer_save_area; |
| |
| static rtx enqueue_insn PROTO((rtx, rtx)); |
| static int queued_subexp_p PROTO((rtx)); |
| static void init_queue PROTO((void)); |
| static void move_by_pieces PROTO((rtx, rtx, int, int)); |
| static int move_by_pieces_ninsns PROTO((unsigned int, int)); |
| static void move_by_pieces_1 PROTO((rtx (*) (), enum machine_mode, |
| struct move_by_pieces *)); |
| static void clear_by_pieces PROTO((rtx, int, int)); |
| static void clear_by_pieces_1 PROTO((rtx (*) (), enum machine_mode, |
| struct clear_by_pieces *)); |
| static int is_zeros_p PROTO((tree)); |
| static int mostly_zeros_p PROTO((tree)); |
| static void store_constructor PROTO((tree, rtx, int)); |
| static rtx store_field PROTO((rtx, int, int, enum machine_mode, tree, |
| enum machine_mode, int, int, int)); |
| static tree save_noncopied_parts PROTO((tree, tree)); |
| static tree init_noncopied_parts PROTO((tree, tree)); |
| static int safe_from_p PROTO((rtx, tree)); |
| static int fixed_type_p PROTO((tree)); |
| static rtx var_rtx PROTO((tree)); |
| static int get_pointer_alignment PROTO((tree, unsigned)); |
| static tree string_constant PROTO((tree, tree *)); |
| static tree c_strlen PROTO((tree)); |
| static rtx expand_builtin PROTO((tree, rtx, rtx, |
| enum machine_mode, int)); |
| static int apply_args_size PROTO((void)); |
| static int apply_result_size PROTO((void)); |
| static rtx result_vector PROTO((int, rtx)); |
| static rtx expand_builtin_apply_args PROTO((void)); |
| static rtx expand_builtin_apply PROTO((rtx, rtx, rtx)); |
| static void expand_builtin_return PROTO((rtx)); |
| static rtx expand_increment PROTO((tree, int, int)); |
| void bc_expand_increment PROTO((struct increment_operator *, tree)); |
| rtx bc_allocate_local PROTO((int, int)); |
| void bc_store_memory PROTO((tree, tree)); |
| tree bc_expand_component_address PROTO((tree)); |
| tree bc_expand_address PROTO((tree)); |
| void bc_expand_constructor PROTO((tree)); |
| void bc_adjust_stack PROTO((int)); |
| tree bc_canonicalize_array_ref PROTO((tree)); |
| void bc_load_memory PROTO((tree, tree)); |
| void bc_load_externaddr PROTO((rtx)); |
| void bc_load_externaddr_id PROTO((tree, int)); |
| void bc_load_localaddr PROTO((rtx)); |
| void bc_load_parmaddr PROTO((rtx)); |
| static void preexpand_calls PROTO((tree)); |
| static void do_jump_by_parts_greater PROTO((tree, int, rtx, rtx)); |
| void do_jump_by_parts_greater_rtx PROTO((enum machine_mode, int, rtx, rtx, rtx, rtx)); |
| static void do_jump_by_parts_equality PROTO((tree, rtx, rtx)); |
| static void do_jump_by_parts_equality_rtx PROTO((rtx, rtx, rtx)); |
| static void do_jump_for_compare PROTO((rtx, rtx, rtx)); |
| static rtx compare PROTO((tree, enum rtx_code, enum rtx_code)); |
| static rtx do_store_flag PROTO((tree, rtx, enum machine_mode, int)); |
| extern tree truthvalue_conversion PROTO((tree)); |
| |
| /* Record for each mode whether we can move a register directly to or |
| from an object of that mode in memory. If we can't, we won't try |
| to use that mode directly when accessing a field of that mode. */ |
| |
| static char direct_load[NUM_MACHINE_MODES]; |
| static char direct_store[NUM_MACHINE_MODES]; |
| |
| /* MOVE_RATIO is the number of move instructions that is better than |
| a block move. */ |
| |
| #ifndef MOVE_RATIO |
| #if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti) |
| #define MOVE_RATIO 2 |
| #else |
| /* A value of around 6 would minimize code size; infinity would minimize |
| execution time. */ |
| #define MOVE_RATIO 15 |
| #endif |
| #endif |
| |
| /* This array records the insn_code of insns to perform block moves. */ |
| enum insn_code movstr_optab[NUM_MACHINE_MODES]; |
| |
| /* This array records the insn_code of insns to perform block clears. */ |
| enum insn_code clrstr_optab[NUM_MACHINE_MODES]; |
| |
| /* SLOW_UNALIGNED_ACCESS is non-zero if unaligned accesses are very slow. */ |
| |
| #ifndef SLOW_UNALIGNED_ACCESS |
| #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT |
| #endif |
| |
| /* Register mappings for target machines without register windows. */ |
| #ifndef INCOMING_REGNO |
| #define INCOMING_REGNO(OUT) (OUT) |
| #endif |
| #ifndef OUTGOING_REGNO |
| #define OUTGOING_REGNO(IN) (IN) |
| #endif |
| |
| /* Maps used to convert modes to const, load, and store bytecodes. */ |
| enum bytecode_opcode mode_to_const_map[MAX_MACHINE_MODE]; |
| enum bytecode_opcode mode_to_load_map[MAX_MACHINE_MODE]; |
| enum bytecode_opcode mode_to_store_map[MAX_MACHINE_MODE]; |
| |
| /* Initialize maps used to convert modes to const, load, and store |
| bytecodes. */ |
| |
| void |
| bc_init_mode_to_opcode_maps () |
| { |
| int mode; |
| |
| for (mode = 0; mode < (int) MAX_MACHINE_MODE; mode++) |
| mode_to_const_map[mode] |
| = mode_to_load_map[mode] |
| = mode_to_store_map[mode] = neverneverland; |
| |
| #define DEF_MODEMAP(SYM, CODE, UCODE, CONST, LOAD, STORE) \ |
| mode_to_const_map[(int) SYM] = CONST; \ |
| mode_to_load_map[(int) SYM] = LOAD; \ |
| mode_to_store_map[(int) SYM] = STORE; |
| |
| #include "modemap.def" |
| #undef DEF_MODEMAP |
| } |
| |
| /* This is run once per compilation to set up which modes can be used |
| directly in memory and to initialize the block move optab. */ |
| |
| void |
| init_expr_once () |
| { |
| rtx insn, pat; |
| enum machine_mode mode; |
| /* Try indexing by frame ptr and try by stack ptr. |
| It is known that on the Convex the stack ptr isn't a valid index. |
| With luck, one or the other is valid on any machine. */ |
| rtx mem = gen_rtx (MEM, VOIDmode, stack_pointer_rtx); |
| rtx mem1 = gen_rtx (MEM, VOIDmode, frame_pointer_rtx); |
| |
| start_sequence (); |
| insn = emit_insn (gen_rtx (SET, 0, 0)); |
| pat = PATTERN (insn); |
| |
| for (mode = VOIDmode; (int) mode < NUM_MACHINE_MODES; |
| mode = (enum machine_mode) ((int) mode + 1)) |
| { |
| int regno; |
| rtx reg; |
| int num_clobbers; |
| |
| direct_load[(int) mode] = direct_store[(int) mode] = 0; |
| PUT_MODE (mem, mode); |
| PUT_MODE (mem1, mode); |
| |
| /* See if there is some register that can be used in this mode and |
| directly loaded or stored from memory. */ |
| |
| if (mode != VOIDmode && mode != BLKmode) |
| for (regno = 0; regno < FIRST_PSEUDO_REGISTER |
| && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0); |
| regno++) |
| { |
| if (! HARD_REGNO_MODE_OK (regno, mode)) |
| continue; |
| |
| reg = gen_rtx (REG, mode, regno); |
| |
| SET_SRC (pat) = mem; |
| SET_DEST (pat) = reg; |
| if (recog (pat, insn, &num_clobbers) >= 0) |
| direct_load[(int) mode] = 1; |
| |
| SET_SRC (pat) = mem1; |
| SET_DEST (pat) = reg; |
| if (recog (pat, insn, &num_clobbers) >= 0) |
| direct_load[(int) mode] = 1; |
| |
| SET_SRC (pat) = reg; |
| SET_DEST (pat) = mem; |
| if (recog (pat, insn, &num_clobbers) >= 0) |
| direct_store[(int) mode] = 1; |
| |
| SET_SRC (pat) = reg; |
| SET_DEST (pat) = mem1; |
| if (recog (pat, insn, &num_clobbers) >= 0) |
| direct_store[(int) mode] = 1; |
| } |
| } |
| |
| end_sequence (); |
| } |
| |
| /* This is run at the start of compiling a function. */ |
| |
| void |
| init_expr () |
| { |
| init_queue (); |
| |
| pending_stack_adjust = 0; |
| inhibit_defer_pop = 0; |
| saveregs_value = 0; |
| apply_args_value = 0; |
| forced_labels = 0; |
| } |
| |
| /* Save all variables describing the current status into the structure *P. |
| This is used before starting a nested function. */ |
| |
| void |
| save_expr_status (p) |
| struct function *p; |
| { |
| /* Instead of saving the postincrement queue, empty it. */ |
| emit_queue (); |
| |
| p->pending_stack_adjust = pending_stack_adjust; |
| p->inhibit_defer_pop = inhibit_defer_pop; |
| p->saveregs_value = saveregs_value; |
| p->apply_args_value = apply_args_value; |
| p->forced_labels = forced_labels; |
| |
| pending_stack_adjust = 0; |
| inhibit_defer_pop = 0; |
| saveregs_value = 0; |
| apply_args_value = 0; |
| forced_labels = 0; |
| } |
| |
| /* Restore all variables describing the current status from the structure *P. |
| This is used after a nested function. */ |
| |
| void |
| restore_expr_status (p) |
| struct function *p; |
| { |
| pending_stack_adjust = p->pending_stack_adjust; |
| inhibit_defer_pop = p->inhibit_defer_pop; |
| saveregs_value = p->saveregs_value; |
| apply_args_value = p->apply_args_value; |
| forced_labels = p->forced_labels; |
| } |
| |
| /* Manage the queue of increment instructions to be output |
| for POSTINCREMENT_EXPR expressions, etc. */ |
| |
| static rtx pending_chain; |
| |
| /* Queue up to increment (or change) VAR later. BODY says how: |
| BODY should be the same thing you would pass to emit_insn |
| to increment right away. It will go to emit_insn later on. |
| |
| The value is a QUEUED expression to be used in place of VAR |
| where you want to guarantee the pre-incrementation value of VAR. */ |
| |
| static rtx |
| enqueue_insn (var, body) |
| rtx var, body; |
| { |
| pending_chain = gen_rtx (QUEUED, GET_MODE (var), |
| var, NULL_RTX, NULL_RTX, body, pending_chain); |
| return pending_chain; |
| } |
| |
| /* Use protect_from_queue to convert a QUEUED expression |
| into something that you can put immediately into an instruction. |
| If the queued incrementation has not happened yet, |
| protect_from_queue returns the variable itself. |
| If the incrementation has happened, protect_from_queue returns a temp |
| that contains a copy of the old value of the variable. |
| |
| Any time an rtx which might possibly be a QUEUED is to be put |
| into an instruction, it must be passed through protect_from_queue first. |
| QUEUED expressions are not meaningful in instructions. |
| |
| Do not pass a value through protect_from_queue and then hold |
| on to it for a while before putting it in an instruction! |
| If the queue is flushed in between, incorrect code will result. */ |
| |
| rtx |
| protect_from_queue (x, modify) |
| register rtx x; |
| int modify; |
| { |
| register RTX_CODE code = GET_CODE (x); |
| |
| #if 0 /* A QUEUED can hang around after the queue is forced out. */ |
| /* Shortcut for most common case. */ |
| if (pending_chain == 0) |
| return x; |
| #endif |
| |
| if (code != QUEUED) |
| { |
| /* A special hack for read access to (MEM (QUEUED ...)) to facilitate |
| use of autoincrement. Make a copy of the contents of the memory |
| location rather than a copy of the address, but not if the value is |
| of mode BLKmode. Don't modify X in place since it might be |
| shared. */ |
| if (code == MEM && GET_MODE (x) != BLKmode |
| && GET_CODE (XEXP (x, 0)) == QUEUED && !modify) |
| { |
| register rtx y = XEXP (x, 0); |
| register rtx new = gen_rtx (MEM, GET_MODE (x), QUEUED_VAR (y)); |
| |
| MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x); |
| RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x); |
| MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x); |
| |
| if (QUEUED_INSN (y)) |
| { |
| register rtx temp = gen_reg_rtx (GET_MODE (new)); |
| emit_insn_before (gen_move_insn (temp, new), |
| QUEUED_INSN (y)); |
| return temp; |
| } |
| return new; |
| } |
| /* Otherwise, recursively protect the subexpressions of all |
| the kinds of rtx's that can contain a QUEUED. */ |
| if (code == MEM) |
| { |
| rtx tem = protect_from_queue (XEXP (x, 0), 0); |
| if (tem != XEXP (x, 0)) |
| { |
| x = copy_rtx (x); |
| XEXP (x, 0) = tem; |
| } |
| } |
| else if (code == PLUS || code == MULT) |
| { |
| rtx new0 = protect_from_queue (XEXP (x, 0), 0); |
| rtx new1 = protect_from_queue (XEXP (x, 1), 0); |
| if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1)) |
| { |
| x = copy_rtx (x); |
| XEXP (x, 0) = new0; |
| XEXP (x, 1) = new1; |
| } |
| } |
| return x; |
| } |
| /* If the increment has not happened, use the variable itself. */ |
| if (QUEUED_INSN (x) == 0) |
| return QUEUED_VAR (x); |
| /* If the increment has happened and a pre-increment copy exists, |
| use that copy. */ |
| if (QUEUED_COPY (x) != 0) |
| return QUEUED_COPY (x); |
| /* The increment has happened but we haven't set up a pre-increment copy. |
| Set one up now, and use it. */ |
| QUEUED_COPY (x) = gen_reg_rtx (GET_MODE (QUEUED_VAR (x))); |
| emit_insn_before (gen_move_insn (QUEUED_COPY (x), QUEUED_VAR (x)), |
| QUEUED_INSN (x)); |
| return QUEUED_COPY (x); |
| } |
| |
| /* Return nonzero if X contains a QUEUED expression: |
| if it contains anything that will be altered by a queued increment. |
| We handle only combinations of MEM, PLUS, MINUS and MULT operators |
| since memory addresses generally contain only those. */ |
| |
| static int |
| queued_subexp_p (x) |
| rtx x; |
| { |
| register enum rtx_code code = GET_CODE (x); |
| switch (code) |
| { |
| case QUEUED: |
| return 1; |
| case MEM: |
| return queued_subexp_p (XEXP (x, 0)); |
| case MULT: |
| case PLUS: |
| case MINUS: |
| return queued_subexp_p (XEXP (x, 0)) |
| || queued_subexp_p (XEXP (x, 1)); |
| } |
| return 0; |
| } |
| |
| /* Perform all the pending incrementations. */ |
| |
| void |
| emit_queue () |
| { |
| register rtx p; |
| while (p = pending_chain) |
| { |
| QUEUED_INSN (p) = emit_insn (QUEUED_BODY (p)); |
| pending_chain = QUEUED_NEXT (p); |
| } |
| } |
| |
| static void |
| init_queue () |
| { |
| if (pending_chain) |
| abort (); |
| } |
| |
| /* Copy data from FROM to TO, where the machine modes are not the same. |
| Both modes may be integer, or both may be floating. |
| UNSIGNEDP should be nonzero if FROM is an unsigned type. |
| This causes zero-extension instead of sign-extension. */ |
| |
| void |
| convert_move (to, from, unsignedp) |
| register rtx to, from; |
| int unsignedp; |
| { |
| enum machine_mode to_mode = GET_MODE (to); |
| enum machine_mode from_mode = GET_MODE (from); |
| int to_real = GET_MODE_CLASS (to_mode) == MODE_FLOAT; |
| int from_real = GET_MODE_CLASS (from_mode) == MODE_FLOAT; |
| enum insn_code code; |
| rtx libcall; |
| |
| /* rtx code for making an equivalent value. */ |
| enum rtx_code equiv_code = (unsignedp ? ZERO_EXTEND : SIGN_EXTEND); |
| |
| to = protect_from_queue (to, 1); |
| from = protect_from_queue (from, 0); |
| |
| if (to_real != from_real) |
| abort (); |
| |
| /* If FROM is a SUBREG that indicates that we have already done at least |
| the required extension, strip it. We don't handle such SUBREGs as |
| TO here. */ |
| |
| if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from) |
| && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (from))) |
| >= GET_MODE_SIZE (to_mode)) |
| && SUBREG_PROMOTED_UNSIGNED_P (from) == unsignedp) |
| from = gen_lowpart (to_mode, from), from_mode = to_mode; |
| |
| if (GET_CODE (to) == SUBREG && SUBREG_PROMOTED_VAR_P (to)) |
| abort (); |
| |
| if (to_mode == from_mode |
| || (from_mode == VOIDmode && CONSTANT_P (from))) |
| { |
| emit_move_insn (to, from); |
| return; |
| } |
| |
| if (to_real) |
| { |
| rtx value; |
| |
| if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode)) |
| { |
| /* Try converting directly if the insn is supported. */ |
| if ((code = can_extend_p (to_mode, from_mode, 0)) |
| != CODE_FOR_nothing) |
| { |
| emit_unop_insn (code, to, from, UNKNOWN); |
| return; |
| } |
| } |
| |
| #ifdef HAVE_trunchfqf2 |
| if (HAVE_trunchfqf2 && from_mode == HFmode && to_mode == QFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunchfqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncsfqf2 |
| if (HAVE_truncsfqf2 && from_mode == SFmode && to_mode == QFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncsfqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncdfqf2 |
| if (HAVE_truncdfqf2 && from_mode == DFmode && to_mode == QFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncdfqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncxfqf2 |
| if (HAVE_truncxfqf2 && from_mode == XFmode && to_mode == QFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncxfqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_trunctfqf2 |
| if (HAVE_trunctfqf2 && from_mode == TFmode && to_mode == QFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctfqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| |
| #ifdef HAVE_trunctqfhf2 |
| if (HAVE_trunctqfhf2 && from_mode == TQFmode && to_mode == HFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctqfhf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncsfhf2 |
| if (HAVE_truncsfhf2 && from_mode == SFmode && to_mode == HFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncsfhf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncdfhf2 |
| if (HAVE_truncdfhf2 && from_mode == DFmode && to_mode == HFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncdfhf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncxfhf2 |
| if (HAVE_truncxfhf2 && from_mode == XFmode && to_mode == HFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncxfhf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_trunctfhf2 |
| if (HAVE_trunctfhf2 && from_mode == TFmode && to_mode == HFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctfhf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| |
| #ifdef HAVE_truncsftqf2 |
| if (HAVE_truncsftqf2 && from_mode == SFmode && to_mode == TQFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncsftqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncdftqf2 |
| if (HAVE_truncdftqf2 && from_mode == DFmode && to_mode == TQFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncdftqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncxftqf2 |
| if (HAVE_truncxftqf2 && from_mode == XFmode && to_mode == TQFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncxftqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_trunctftqf2 |
| if (HAVE_trunctftqf2 && from_mode == TFmode && to_mode == TQFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctftqf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| |
| #ifdef HAVE_truncdfsf2 |
| if (HAVE_truncdfsf2 && from_mode == DFmode && to_mode == SFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncdfsf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncxfsf2 |
| if (HAVE_truncxfsf2 && from_mode == XFmode && to_mode == SFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncxfsf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_trunctfsf2 |
| if (HAVE_trunctfsf2 && from_mode == TFmode && to_mode == SFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctfsf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_truncxfdf2 |
| if (HAVE_truncxfdf2 && from_mode == XFmode && to_mode == DFmode) |
| { |
| emit_unop_insn (CODE_FOR_truncxfdf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| #ifdef HAVE_trunctfdf2 |
| if (HAVE_trunctfdf2 && from_mode == TFmode && to_mode == DFmode) |
| { |
| emit_unop_insn (CODE_FOR_trunctfdf2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| |
| libcall = (rtx) 0; |
| switch (from_mode) |
| { |
| case SFmode: |
| switch (to_mode) |
| { |
| case DFmode: |
| libcall = extendsfdf2_libfunc; |
| break; |
| |
| case XFmode: |
| libcall = extendsfxf2_libfunc; |
| break; |
| |
| case TFmode: |
| libcall = extendsftf2_libfunc; |
| break; |
| } |
| break; |
| |
| case DFmode: |
| switch (to_mode) |
| { |
| case SFmode: |
| libcall = truncdfsf2_libfunc; |
| break; |
| |
| case XFmode: |
| libcall = extenddfxf2_libfunc; |
| break; |
| |
| case TFmode: |
| libcall = extenddftf2_libfunc; |
| break; |
| } |
| break; |
| |
| case XFmode: |
| switch (to_mode) |
| { |
| case SFmode: |
| libcall = truncxfsf2_libfunc; |
| break; |
| |
| case DFmode: |
| libcall = truncxfdf2_libfunc; |
| break; |
| } |
| break; |
| |
| case TFmode: |
| switch (to_mode) |
| { |
| case SFmode: |
| libcall = trunctfsf2_libfunc; |
| break; |
| |
| case DFmode: |
| libcall = trunctfdf2_libfunc; |
| break; |
| } |
| break; |
| } |
| |
| if (libcall == (rtx) 0) |
| /* This conversion is not implemented yet. */ |
| abort (); |
| |
| value = emit_library_call_value (libcall, NULL_RTX, 1, to_mode, |
| 1, from, from_mode); |
| emit_move_insn (to, value); |
| return; |
| } |
| |
| /* Now both modes are integers. */ |
| |
| /* Handle expanding beyond a word. */ |
| if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode) |
| && GET_MODE_BITSIZE (to_mode) > BITS_PER_WORD) |
| { |
| rtx insns; |
| rtx lowpart; |
| rtx fill_value; |
| rtx lowfrom; |
| int i; |
| enum machine_mode lowpart_mode; |
| int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD); |
| |
| /* Try converting directly if the insn is supported. */ |
| if ((code = can_extend_p (to_mode, from_mode, unsignedp)) |
| != CODE_FOR_nothing) |
| { |
| /* If FROM is a SUBREG, put it into a register. Do this |
| so that we always generate the same set of insns for |
| better cse'ing; if an intermediate assignment occurred, |
| we won't be doing the operation directly on the SUBREG. */ |
| if (optimize > 0 && GET_CODE (from) == SUBREG) |
| from = force_reg (from_mode, from); |
| emit_unop_insn (code, to, from, equiv_code); |
| return; |
| } |
| /* Next, try converting via full word. */ |
| else if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD |
| && ((code = can_extend_p (to_mode, word_mode, unsignedp)) |
| != CODE_FOR_nothing)) |
| { |
| if (GET_CODE (to) == REG) |
| emit_insn (gen_rtx (CLOBBER, VOIDmode, to)); |
| convert_move (gen_lowpart (word_mode, to), from, unsignedp); |
| emit_unop_insn (code, to, |
| gen_lowpart (word_mode, to), equiv_code); |
| return; |
| } |
| |
| /* No special multiword conversion insn; do it by hand. */ |
| start_sequence (); |
| |
| /* Since we will turn this into a no conflict block, we must ensure |
| that the source does not overlap the target. */ |
| |
| if (reg_overlap_mentioned_p (to, from)) |
| from = force_reg (from_mode, from); |
| |
| /* Get a copy of FROM widened to a word, if necessary. */ |
| if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD) |
| lowpart_mode = word_mode; |
| else |
| lowpart_mode = from_mode; |
| |
| lowfrom = convert_to_mode (lowpart_mode, from, unsignedp); |
| |
| lowpart = gen_lowpart (lowpart_mode, to); |
| emit_move_insn (lowpart, lowfrom); |
| |
| /* Compute the value to put in each remaining word. */ |
| if (unsignedp) |
| fill_value = const0_rtx; |
| else |
| { |
| #ifdef HAVE_slt |
| if (HAVE_slt |
| && insn_operand_mode[(int) CODE_FOR_slt][0] == word_mode |
| && STORE_FLAG_VALUE == -1) |
| { |
| emit_cmp_insn (lowfrom, const0_rtx, NE, NULL_RTX, |
| lowpart_mode, 0, 0); |
| fill_value = gen_reg_rtx (word_mode); |
| emit_insn (gen_slt (fill_value)); |
| } |
| else |
| #endif |
| { |
| fill_value |
| = expand_shift (RSHIFT_EXPR, lowpart_mode, lowfrom, |
| size_int (GET_MODE_BITSIZE (lowpart_mode) - 1), |
| NULL_RTX, 0); |
| fill_value = convert_to_mode (word_mode, fill_value, 1); |
| } |
| } |
| |
| /* Fill the remaining words. */ |
| for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++) |
| { |
| int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); |
| rtx subword = operand_subword (to, index, 1, to_mode); |
| |
| if (subword == 0) |
| abort (); |
| |
| if (fill_value != subword) |
| emit_move_insn (subword, fill_value); |
| } |
| |
| insns = get_insns (); |
| end_sequence (); |
| |
| emit_no_conflict_block (insns, to, from, NULL_RTX, |
| gen_rtx (equiv_code, to_mode, copy_rtx (from))); |
| return; |
| } |
| |
| /* Truncating multi-word to a word or less. */ |
| if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD |
| && GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD) |
| { |
| if (!((GET_CODE (from) == MEM |
| && ! MEM_VOLATILE_P (from) |
| && direct_load[(int) to_mode] |
| && ! mode_dependent_address_p (XEXP (from, 0))) |
| || GET_CODE (from) == REG |
| || GET_CODE (from) == SUBREG)) |
| from = force_reg (from_mode, from); |
| convert_move (to, gen_lowpart (word_mode, from), 0); |
| return; |
| } |
| |
| /* Handle pointer conversion */ /* SPEE 900220 */ |
| if (to_mode == PSImode) |
| { |
| if (from_mode != SImode) |
| from = convert_to_mode (SImode, from, unsignedp); |
| |
| #ifdef HAVE_truncsipsi2 |
| if (HAVE_truncsipsi2) |
| { |
| emit_unop_insn (CODE_FOR_truncsipsi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif /* HAVE_truncsipsi2 */ |
| abort (); |
| } |
| |
| if (from_mode == PSImode) |
| { |
| if (to_mode != SImode) |
| { |
| from = convert_to_mode (SImode, from, unsignedp); |
| from_mode = SImode; |
| } |
| else |
| { |
| #ifdef HAVE_extendpsisi2 |
| if (HAVE_extendpsisi2) |
| { |
| emit_unop_insn (CODE_FOR_extendpsisi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif /* HAVE_extendpsisi2 */ |
| abort (); |
| } |
| } |
| |
| if (to_mode == PDImode) |
| { |
| if (from_mode != DImode) |
| from = convert_to_mode (DImode, from, unsignedp); |
| |
| #ifdef HAVE_truncdipdi2 |
| if (HAVE_truncdipdi2) |
| { |
| emit_unop_insn (CODE_FOR_truncdipdi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif /* HAVE_truncdipdi2 */ |
| abort (); |
| } |
| |
| if (from_mode == PDImode) |
| { |
| if (to_mode != DImode) |
| { |
| from = convert_to_mode (DImode, from, unsignedp); |
| from_mode = DImode; |
| } |
| else |
| { |
| #ifdef HAVE_extendpdidi2 |
| if (HAVE_extendpdidi2) |
| { |
| emit_unop_insn (CODE_FOR_extendpdidi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif /* HAVE_extendpdidi2 */ |
| abort (); |
| } |
| } |
| |
| /* Now follow all the conversions between integers |
| no more than a word long. */ |
| |
| /* For truncation, usually we can just refer to FROM in a narrower mode. */ |
| if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode) |
| && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode), |
| GET_MODE_BITSIZE (from_mode))) |
| { |
| if (!((GET_CODE (from) == MEM |
| && ! MEM_VOLATILE_P (from) |
| && direct_load[(int) to_mode] |
| && ! mode_dependent_address_p (XEXP (from, 0))) |
| || GET_CODE (from) == REG |
| || GET_CODE (from) == SUBREG)) |
| from = force_reg (from_mode, from); |
| if (GET_CODE (from) == REG && REGNO (from) < FIRST_PSEUDO_REGISTER |
| && ! HARD_REGNO_MODE_OK (REGNO (from), to_mode)) |
| from = copy_to_reg (from); |
| emit_move_insn (to, gen_lowpart (to_mode, from)); |
| return; |
| } |
| |
| /* Handle extension. */ |
| if (GET_MODE_BITSIZE (to_mode) > GET_MODE_BITSIZE (from_mode)) |
| { |
| /* Convert directly if that works. */ |
| if ((code = can_extend_p (to_mode, from_mode, unsignedp)) |
| != CODE_FOR_nothing) |
| { |
| emit_unop_insn (code, to, from, equiv_code); |
| return; |
| } |
| else |
| { |
| enum machine_mode intermediate; |
| |
| /* Search for a mode to convert via. */ |
| for (intermediate = from_mode; intermediate != VOIDmode; |
| intermediate = GET_MODE_WIDER_MODE (intermediate)) |
| if (((can_extend_p (to_mode, intermediate, unsignedp) |
| != CODE_FOR_nothing) |
| || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate) |
| && TRULY_NOOP_TRUNCATION (to_mode, intermediate))) |
| && (can_extend_p (intermediate, from_mode, unsignedp) |
| != CODE_FOR_nothing)) |
| { |
| convert_move (to, convert_to_mode (intermediate, from, |
| unsignedp), unsignedp); |
| return; |
| } |
| |
| /* No suitable intermediate mode. */ |
| abort (); |
| } |
| } |
| |
| /* Support special truncate insns for certain modes. */ |
| |
| if (from_mode == DImode && to_mode == SImode) |
| { |
| #ifdef HAVE_truncdisi2 |
| if (HAVE_truncdisi2) |
| { |
| emit_unop_insn (CODE_FOR_truncdisi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == DImode && to_mode == HImode) |
| { |
| #ifdef HAVE_truncdihi2 |
| if (HAVE_truncdihi2) |
| { |
| emit_unop_insn (CODE_FOR_truncdihi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == DImode && to_mode == QImode) |
| { |
| #ifdef HAVE_truncdiqi2 |
| if (HAVE_truncdiqi2) |
| { |
| emit_unop_insn (CODE_FOR_truncdiqi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == SImode && to_mode == HImode) |
| { |
| #ifdef HAVE_truncsihi2 |
| if (HAVE_truncsihi2) |
| { |
| emit_unop_insn (CODE_FOR_truncsihi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == SImode && to_mode == QImode) |
| { |
| #ifdef HAVE_truncsiqi2 |
| if (HAVE_truncsiqi2) |
| { |
| emit_unop_insn (CODE_FOR_truncsiqi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == HImode && to_mode == QImode) |
| { |
| #ifdef HAVE_trunchiqi2 |
| if (HAVE_trunchiqi2) |
| { |
| emit_unop_insn (CODE_FOR_trunchiqi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == TImode && to_mode == DImode) |
| { |
| #ifdef HAVE_trunctidi2 |
| if (HAVE_trunctidi2) |
| { |
| emit_unop_insn (CODE_FOR_trunctidi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == TImode && to_mode == SImode) |
| { |
| #ifdef HAVE_trunctisi2 |
| if (HAVE_trunctisi2) |
| { |
| emit_unop_insn (CODE_FOR_trunctisi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == TImode && to_mode == HImode) |
| { |
| #ifdef HAVE_trunctihi2 |
| if (HAVE_trunctihi2) |
| { |
| emit_unop_insn (CODE_FOR_trunctihi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| if (from_mode == TImode && to_mode == QImode) |
| { |
| #ifdef HAVE_trunctiqi2 |
| if (HAVE_trunctiqi2) |
| { |
| emit_unop_insn (CODE_FOR_trunctiqi2, to, from, UNKNOWN); |
| return; |
| } |
| #endif |
| convert_move (to, force_reg (from_mode, from), unsignedp); |
| return; |
| } |
| |
| /* Handle truncation of volatile memrefs, and so on; |
| the things that couldn't be truncated directly, |
| and for which there was no special instruction. */ |
| if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)) |
| { |
| rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from)); |
| emit_move_insn (to, temp); |
| return; |
| } |
| |
| /* Mode combination is not recognized. */ |
| abort (); |
| } |
| |
| /* Return an rtx for a value that would result |
| from converting X to mode MODE. |
| Both X and MODE may be floating, or both integer. |
| UNSIGNEDP is nonzero if X is an unsigned value. |
| This can be done by referring to a part of X in place |
| or by copying to a new temporary with conversion. |
| |
| This function *must not* call protect_from_queue |
| except when putting X into an insn (in which case convert_move does it). */ |
| |
| rtx |
| convert_to_mode (mode, x, unsignedp) |
| enum machine_mode mode; |
| rtx x; |
| int unsignedp; |
| { |
| return convert_modes (mode, VOIDmode, x, unsignedp); |
| } |
| |
| /* Return an rtx for a value that would result |
| from converting X from mode OLDMODE to mode MODE. |
| Both modes may be floating, or both integer. |
| UNSIGNEDP is nonzero if X is an unsigned value. |
| |
| This can be done by referring to a part of X in place |
| or by copying to a new temporary with conversion. |
| |
| You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. |
| |
| This function *must not* call protect_from_queue |
| except when putting X into an insn (in which case convert_move does it). */ |
| |
| rtx |
| convert_modes (mode, oldmode, x, unsignedp) |
| enum machine_mode mode, oldmode; |
| rtx x; |
| int unsignedp; |
| { |
| register rtx temp; |
| |
| /* If FROM is a SUBREG that indicates that we have already done at least |
| the required extension, strip it. */ |
| |
| if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x) |
| && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode) |
| && SUBREG_PROMOTED_UNSIGNED_P (x) == unsignedp) |
| x = gen_lowpart (mode, x); |
| |
| if (GET_MODE (x) != VOIDmode) |
| oldmode = GET_MODE (x); |
| |
| if (mode == oldmode) |
| return x; |
| |
| /* There is one case that we must handle specially: If we are converting |
| a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and |
| we are to interpret the constant as unsigned, gen_lowpart will do |
| the wrong if the constant appears negative. What we want to do is |
| make the high-order word of the constant zero, not all ones. */ |
| |
| if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT |
| && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT |
| && GET_CODE (x) == CONST_INT && INTVAL (x) < 0) |
| { |
| HOST_WIDE_INT val = INTVAL (x); |
| |
| if (oldmode != VOIDmode |
| && HOST_BITS_PER_WIDE_INT > GET_MODE_BITSIZE (oldmode)) |
| { |
| int width = GET_MODE_BITSIZE (oldmode); |
| |
| /* We need to zero extend VAL. */ |
| val &= ((HOST_WIDE_INT) 1 << width) - 1; |
| } |
| |
| return immed_double_const (val, (HOST_WIDE_INT) 0, mode); |
| } |
| |
| /* We can do this with a gen_lowpart if both desired and current modes |
| are integer, and this is either a constant integer, a register, or a |
| non-volatile MEM. Except for the constant case where MODE is no |
| wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */ |
| |
| if ((GET_CODE (x) == CONST_INT |
| && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) |
| || (GET_MODE_CLASS (mode) == MODE_INT |
| && GET_MODE_CLASS (oldmode) == MODE_INT |
| && (GET_CODE (x) == CONST_DOUBLE |
| || (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode) |
| && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x) |
| && direct_load[(int) mode]) |
| || (GET_CODE (x) == REG |
| && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), |
| GET_MODE_BITSIZE (GET_MODE (x))))))))) |
| { |
| /* ?? If we don't know OLDMODE, we have to assume here that |
| X does not need sign- or zero-extension. This may not be |
| the case, but it's the best we can do. */ |
| if (GET_CODE (x) == CONST_INT && oldmode != VOIDmode |
| && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode)) |
| { |
| HOST_WIDE_INT val = INTVAL (x); |
| int width = GET_MODE_BITSIZE (oldmode); |
| |
| /* We must sign or zero-extend in this case. Start by |
| zero-extending, then sign extend if we need to. */ |
| val &= ((HOST_WIDE_INT) 1 << width) - 1; |
| if (! unsignedp |
| && (val & ((HOST_WIDE_INT) 1 << (width - 1)))) |
| val |= (HOST_WIDE_INT) (-1) << width; |
| |
| return GEN_INT (val); |
| } |
| |
| return gen_lowpart (mode, x); |
| } |
| |
| temp = gen_reg_rtx (mode); |
| convert_move (temp, x, unsignedp); |
| return temp; |
| } |
| |
| /* Generate several move instructions to copy LEN bytes |
| from block FROM to block TO. (These are MEM rtx's with BLKmode). |
| The caller must pass FROM and TO |
| through protect_from_queue before calling. |
| ALIGN (in bytes) is maximum alignment we can assume. */ |
| |
| static void |
| move_by_pieces (to, from, len, align) |
| rtx to, from; |
| int len, align; |
| { |
| struct move_by_pieces data; |
| rtx to_addr = XEXP (to, 0), from_addr = XEXP (from, 0); |
| int max_size = MOVE_MAX + 1; |
| |
| data.offset = 0; |
| data.to_addr = to_addr; |
| data.from_addr = from_addr; |
| data.to = to; |
| data.from = from; |
| data.autinc_to |
| = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC |
| || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC); |
| data.autinc_from |
| = (GET_CODE (from_addr) == PRE_INC || GET_CODE (from_addr) == PRE_DEC |
| || GET_CODE (from_addr) == POST_INC |
| || GET_CODE (from_addr) == POST_DEC); |
| |
| data.explicit_inc_from = 0; |
| data.explicit_inc_to = 0; |
| data.reverse |
| = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC); |
| if (data.reverse) data.offset = len; |
| data.len = len; |
| |
| data.to_struct = MEM_IN_STRUCT_P (to); |
| data.from_struct = MEM_IN_STRUCT_P (from); |
| |
| /* If copying requires more than two move insns, |
| copy addresses to registers (to make displacements shorter) |
| and use post-increment if available. */ |
| if (!(data.autinc_from && data.autinc_to) |
| && move_by_pieces_ninsns (len, align) > 2) |
| { |
| #ifdef HAVE_PRE_DECREMENT |
| if (data.reverse && ! data.autinc_from) |
| { |
| data.from_addr = copy_addr_to_reg (plus_constant (from_addr, len)); |
| data.autinc_from = 1; |
| data.explicit_inc_from = -1; |
| } |
| #endif |
| #ifdef HAVE_POST_INCREMENT |
| if (! data.autinc_from) |
| { |
| data.from_addr = copy_addr_to_reg (from_addr); |
| data.autinc_from = 1; |
| data.explicit_inc_from = 1; |
| } |
| #endif |
| if (!data.autinc_from && CONSTANT_P (from_addr)) |
| data.from_addr = copy_addr_to_reg (from_addr); |
| #ifdef HAVE_PRE_DECREMENT |
| if (data.reverse && ! data.autinc_to) |
| { |
| data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len)); |
| data.autinc_to = 1; |
| data.explicit_inc_to = -1; |
| } |
| #endif |
| #ifdef HAVE_POST_INCREMENT |
| if (! data.reverse && ! data.autinc_to) |
| { |
| data.to_addr = copy_addr_to_reg (to_addr); |
| data.autinc_to = 1; |
| data.explicit_inc_to = 1; |
| } |
| #endif |
| if (!data.autinc_to && CONSTANT_P (to_addr)) |
| data.to_addr = copy_addr_to_reg (to_addr); |
| } |
| |
| if (! SLOW_UNALIGNED_ACCESS |
| || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT) |
| align = MOVE_MAX; |
| |
| /* First move what we can in the largest integer mode, then go to |
| successively smaller modes. */ |
| |
| while (max_size > 1) |
| { |
| enum machine_mode mode = VOIDmode, tmode; |
| enum insn_code icode; |
| |
| for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); |
| tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) |
| if (GET_MODE_SIZE (tmode) < max_size) |
| mode = tmode; |
| |
| if (mode == VOIDmode) |
| break; |
| |
| icode = mov_optab->handlers[(int) mode].insn_code; |
| if (icode != CODE_FOR_nothing |
| && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT, |
| GET_MODE_SIZE (mode))) |
| move_by_pieces_1 (GEN_FCN (icode), mode, &data); |
| |
| max_size = GET_MODE_SIZE (mode); |
| } |
| |
| /* The code above should have handled everything. */ |
| if (data.len > 0) |
| abort (); |
| } |
| |
| /* Return number of insns required to move L bytes by pieces. |
| ALIGN (in bytes) is maximum alignment we can assume. */ |
| |
| static int |
| move_by_pieces_ninsns (l, align) |
| unsigned int l; |
| int align; |
| { |
| register int n_insns = 0; |
| int max_size = MOVE_MAX + 1; |
| |
| if (! SLOW_UNALIGNED_ACCESS |
| || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT) |
| align = MOVE_MAX; |
| |
| while (max_size > 1) |
| { |
| enum machine_mode mode = VOIDmode, tmode; |
| enum insn_code icode; |
| |
| for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); |
| tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) |
| if (GET_MODE_SIZE (tmode) < max_size) |
| mode = tmode; |
| |
| if (mode == VOIDmode) |
| break; |
| |
| icode = mov_optab->handlers[(int) mode].insn_code; |
| if (icode != CODE_FOR_nothing |
| && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT, |
| GET_MODE_SIZE (mode))) |
| n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode); |
| |
| max_size = GET_MODE_SIZE (mode); |
| } |
| |
| return n_insns; |
| } |
| |
| /* Subroutine of move_by_pieces. Move as many bytes as appropriate |
| with move instructions for mode MODE. GENFUN is the gen_... function |
| to make a move insn for that mode. DATA has all the other info. */ |
| |
| static void |
| move_by_pieces_1 (genfun, mode, data) |
| rtx (*genfun) (); |
| enum machine_mode mode; |
| struct move_by_pieces *data; |
| { |
| register int size = GET_MODE_SIZE (mode); |
| register rtx to1, from1; |
| |
| while (data->len >= size) |
| { |
| if (data->reverse) data->offset -= size; |
| |
| to1 = (data->autinc_to |
| ? gen_rtx (MEM, mode, data->to_addr) |
| : copy_rtx (change_address (data->to, mode, |
| plus_constant (data->to_addr, |
| data->offset)))); |
| MEM_IN_STRUCT_P (to1) = data->to_struct; |
| |
| from1 |
| = (data->autinc_from |
| ? gen_rtx (MEM, mode, data->from_addr) |
| : copy_rtx (change_address (data->from, mode, |
| plus_constant (data->from_addr, |
| data->offset)))); |
| MEM_IN_STRUCT_P (from1) = data->from_struct; |
| |
| #ifdef HAVE_PRE_DECREMENT |
| if (data->explicit_inc_to < 0) |
| emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-size))); |
| if (data->explicit_inc_from < 0) |
| emit_insn (gen_add2_insn (data->from_addr, GEN_INT (-size))); |
| #endif |
| |
| emit_insn ((*genfun) (to1, from1)); |
| #ifdef HAVE_POST_INCREMENT |
| if (data->explicit_inc_to > 0) |
| emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); |
| if (data->explicit_inc_from > 0) |
| emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size))); |
| #endif |
| |
| if (! data->reverse) data->offset += size; |
| |
| data->len -= size; |
| } |
| } |
| |
| /* Emit code to move a block Y to a block X. |
| This may be done with string-move instructions, |
| with multiple scalar move instructions, or with a library call. |
| |
| Both X and Y must be MEM rtx's (perhaps inside VOLATILE) |
| with mode BLKmode. |
| SIZE is an rtx that says how long they are. |
| ALIGN is the maximum alignment we can assume they have, |
| measured in bytes. */ |
| |
| void |
| emit_block_move (x, y, size, align) |
| rtx x, y; |
| rtx size; |
| int align; |
| { |
| if (GET_MODE (x) != BLKmode) |
| abort (); |
| |
| if (GET_MODE (y) != BLKmode) |
| abort (); |
| |
| x = protect_from_queue (x, 1); |
| y = protect_from_queue (y, 0); |
| size = protect_from_queue (size, 0); |
| |
| if (GET_CODE (x) != MEM) |
| abort (); |
| if (GET_CODE (y) != MEM) |
| abort (); |
| if (size == 0) |
| abort (); |
| |
| if (GET_CODE (size) == CONST_INT |
| && (move_by_pieces_ninsns (INTVAL (size), align) < MOVE_RATIO)) |
| move_by_pieces (x, y, INTVAL (size), align); |
| else |
| { |
| /* Try the most limited insn first, because there's no point |
| including more than one in the machine description unless |
| the more limited one has some advantage. */ |
| |
| rtx opalign = GEN_INT (align); |
| enum machine_mode mode; |
| |
| for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; |
| mode = GET_MODE_WIDER_MODE (mode)) |
| { |
| enum insn_code code = movstr_optab[(int) mode]; |
| |
| if (code != CODE_FOR_nothing |
| /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT |
| here because if SIZE is less than the mode mask, as it is |
| returned by the macro, it will definitely be less than the |
| actual mode mask. */ |
| && ((GET_CODE (size) == CONST_INT |
| && ((unsigned HOST_WIDE_INT) INTVAL (size) |
| <= GET_MODE_MASK (mode))) |
| || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD) |
| && (insn_operand_predicate[(int) code][0] == 0 |
| || (*insn_operand_predicate[(int) code][0]) (x, BLKmode)) |
| && (insn_operand_predicate[(int) code][1] == 0 |
| || (*insn_operand_predicate[(int) code][1]) (y, BLKmode)) |
| && (insn_operand_predicate[(int) code][3] == 0 |
| || (*insn_operand_predicate[(int) code][3]) (opalign, |
| VOIDmode))) |
| { |
| rtx op2; |
| rtx last = get_last_insn (); |
| rtx pat; |
| |
| op2 = convert_to_mode (mode, size, 1); |
| if (insn_operand_predicate[(int) code][2] != 0 |
| && ! (*insn_operand_predicate[(int) code][2]) (op2, mode)) |
| op2 = copy_to_mode_reg (mode, op2); |
| |
| pat = GEN_FCN ((int) code) (x, y, op2, opalign); |
| if (pat) |
| { |
| emit_insn (pat); |
| return; |
| } |
| else |
| delete_insns_since (last); |
| } |
| } |
| |
| #ifdef TARGET_MEM_FUNCTIONS |
| emit_library_call (memcpy_libfunc, 0, |
| VOIDmode, 3, XEXP (x, 0), Pmode, |
| XEXP (y, 0), Pmode, |
| convert_to_mode (TYPE_MODE (sizetype), size, |
| TREE_UNSIGNED (sizetype)), |
| TYPE_MODE (sizetype)); |
| #else |
| emit_library_call (bcopy_libfunc, 0, |
| VOIDmode, 3, XEXP (y, 0), Pmode, |
| XEXP (x, 0), Pmode, |
| convert_to_mode (TYPE_MODE (integer_type_node), size, |
| TREE_UNSIGNED (integer_type_node)), |
| TYPE_MODE (integer_type_node)); |
| #endif |
| } |
| } |
| |
| /* Copy all or part of a value X into registers starting at REGNO. |
| The number of registers to be filled is NREGS. */ |
| |
| void |
| move_block_to_reg (regno, x, nregs, mode) |
| int regno; |
| rtx x; |
| int nregs; |
| enum machine_mode mode; |
| { |
| int i; |
| rtx pat, last; |
| |
| if (nregs == 0) |
| return; |
| |
| if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x)) |
| x = validize_mem (force_const_mem (mode, x)); |
| |
| /* See if the machine can do this with a load multiple insn. */ |
| #ifdef HAVE_load_multiple |
| if (HAVE_load_multiple) |
| { |
| last = get_last_insn (); |
| pat = gen_load_multiple (gen_rtx (REG, word_mode, regno), x, |
| GEN_INT (nregs)); |
| if (pat) |
| { |
| emit_insn (pat); |
| return; |
| } |
| else |
| delete_insns_since (last); |
| } |
| #endif |
| |
| for (i = 0; i < nregs; i++) |
| emit_move_insn (gen_rtx (REG, word_mode, regno + i), |
| operand_subword_force (x, i, mode)); |
| } |
| |
| /* Copy all or part of a BLKmode value X out of registers starting at REGNO. |
| The number of registers to be filled is NREGS. SIZE indicates the number |
| of bytes in the object X. */ |
| |
| |
| void |
| move_block_from_reg (regno, x, nregs, size) |
| int regno; |
| rtx x; |
| int nregs; |
| int size; |
| { |
| int i; |
| rtx pat, last; |
| enum machine_mode mode; |
| |
| /* If SIZE is that of a mode no bigger than a word, just use that |
| mode's store operation. */ |
| if (size <= UNITS_PER_WORD |
| && (mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0)) != BLKmode) |
| { |
| emit_move_insn (change_address (x, mode, NULL), |
| gen_rtx (REG, mode, regno)); |
| return; |
| } |
| |
| /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned |
| to the left before storing to memory. Note that the previous test |
| doesn't handle all cases (e.g. SIZE == 3). */ |
| if (size < UNITS_PER_WORD && BYTES_BIG_ENDIAN) |
| { |
| rtx tem = operand_subword (x, 0, 1, BLKmode); |
| rtx shift; |
| |
| if (tem == 0) |
| abort (); |
| |
| shift = expand_shift (LSHIFT_EXPR, word_mode, |
| gen_rtx (REG, word_mode, regno), |
| build_int_2 ((UNITS_PER_WORD - size) |
| * BITS_PER_UNIT, 0), NULL_RTX, 0); |
| emit_move_insn (tem, shift); |
| return; |
| } |
| |
| /* See if the machine can do this with a store multiple insn. */ |
| #ifdef HAVE_store_multiple |
| if (HAVE_store_multiple) |
| { |
| last = get_last_insn (); |
| pat = gen_store_multiple (x, gen_rtx (REG, word_mode, regno), |
| GEN_INT (nregs)); |
| if (pat) |
| { |
| emit_insn (pat); |
| return; |
| } |
| else |
| delete_insns_since (last); |
| } |
| #endif |
| |
| for (i = 0; i < nregs; i++) |
| { |
| rtx tem = operand_subword (x, i, 1, BLKmode); |
| |
| if (tem == 0) |
| abort (); |
| |
| emit_move_insn (tem, gen_rtx (REG, word_mode, regno + i)); |
| } |
| } |
| |
| /* Emit code to move a block Y to a block X, where X is non-consecutive |
| registers represented by a PARALLEL. */ |
| |
| void |
| emit_group_load (x, y) |
| rtx x, y; |
| { |
| rtx target_reg, source; |
| int i; |
| |
| if (GET_CODE (x) != PARALLEL) |
| abort (); |
| |
| /* Check for a NULL entry, used to indicate that the parameter goes |
| both on the stack and in registers. */ |
| if (XEXP (XVECEXP (x, 0, 0), 0)) |
| i = 0; |
| else |
| i = 1; |
| |
| for (; i < XVECLEN (x, 0); i++) |
| { |
| rtx element = XVECEXP (x, 0, i); |
| |
| target_reg = XEXP (element, 0); |
| |
| if (GET_CODE (y) == MEM) |
| source = change_address (y, GET_MODE (target_reg), |
| plus_constant (XEXP (y, 0), |
| INTVAL (XEXP (element, 1)))); |
| else if (XEXP (element, 1) == const0_rtx) |
| { |
| if (GET_MODE (target_reg) == GET_MODE (y)) |
| source = y; |
| /* Allow for the target_reg to be smaller than the input register |
| to allow for AIX with 4 DF arguments after a single SI arg. The |
| last DF argument will only load 1 word into the integer registers, |
| but load a DF value into the float registers. */ |
| else if ((GET_MODE_SIZE (GET_MODE (target_reg)) |
| <= GET_MODE_SIZE (GET_MODE (y))) |
| && GET_MODE (target_reg) == word_mode) |
| /* This might be a const_double, so we can't just use SUBREG. */ |
| source = operand_subword (y, 0, 0, VOIDmode); |
| else if (GET_MODE_SIZE (GET_MODE (target_reg)) |
| == GET_MODE_SIZE (GET_MODE (y))) |
| source = gen_lowpart (GET_MODE (target_reg), y); |
| else |
| abort (); |
| } |
| else |
| abort (); |
| |
| emit_move_insn (target_reg, source); |
| } |
| } |
| |
| /* Emit code to move a block Y to a block X, where Y is non-consecutive |
| registers represented by a PARALLEL. */ |
| |
| void |
| emit_group_store (x, y) |
| rtx x, y; |
| { |
| rtx source_reg, target; |
| int i; |
| |
| if (GET_CODE (y) != PARALLEL) |
| abort (); |
| |
| /* Check for a NULL entry, used to indicate that the parameter goes |
| both on the stack and in registers. */ |
| if (XEXP (XVECEXP (y, 0, 0), 0)) |
| i = 0; |
| else |
| i = 1; |
| |
| for (; i < XVECLEN (y, 0); i++) |
| { |
| rtx element = XVECEXP (y, 0, i); |
| |
| source_reg = XEXP (element, 0); |
| |
| if (GET_CODE (x) == MEM) |
| target = change_address (x, GET_MODE (source_reg), |
| plus_constant (XEXP (x, 0), |
| INTVAL (XEXP (element, 1)))); |
| else if (XEXP (element, 1) == const0_rtx) |
| { |
| target = x; |
| if (GET_MODE (target) != GET_MODE (source_reg)) |
| target = gen_lowpart (GET_MODE (source_reg), target); |
| } |
| else |
| abort (); |
| |
| emit_move_insn (target, source_reg); |
| } |
| } |
| |
| /* Add a USE expression for REG to the (possibly empty) list pointed |
| to by CALL_FUSAGE. REG must denote a hard register. */ |
| |
| void |
| use_reg (call_fusage, reg) |
| rtx *call_fusage, reg; |
| { |
| if (GET_CODE (reg) != REG |
| || REGNO (reg) >= FIRST_PSEUDO_REGISTER) |
| abort(); |
| |
| *call_fusage |
| = gen_rtx (EXPR_LIST, VOIDmode, |
| gen_rtx (USE, VOIDmode, reg), *call_fusage); |
| } |
| |
| /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs, |
| starting at REGNO. All of these registers must be hard registers. */ |
| |
| void |
| use_regs (call_fusage, regno, nregs) |
| rtx *call_fusage; |
| int regno; |
| int nregs; |
| { |
| int i; |
| |
| if (regno + nregs > FIRST_PSEUDO_REGISTER) |
| abort (); |
| |
| for (i = 0; i < nregs; i++) |
| use_reg (call_fusage, gen_rtx (REG, reg_raw_mode[regno + i], regno + i)); |
| } |
| |
| /* Add USE expressions to *CALL_FUSAGE for each REG contained in the |
| PARALLEL REGS. This is for calls that pass values in multiple |
| non-contiguous locations. The Irix 6 ABI has examples of this. */ |
| |
| void |
| use_group_regs (call_fusage, regs) |
| rtx *call_fusage; |
| rtx regs; |
| { |
| int i; |
| |
| /* Check for a NULL entry, used to indicate that the parameter goes |
| both on the stack and in registers. */ |
| if (XEXP (XVECEXP (regs, 0, 0), 0)) |
| i = 0; |
| else |
| i = 1; |
| |
| for (; i < XVECLEN (regs, 0); i++) |
| use_reg (call_fusage, XEXP (XVECEXP (regs, 0, i), 0)); |
| } |
| |
| /* Generate several move instructions to clear LEN bytes of block TO. |
| (A MEM rtx with BLKmode). The caller must pass TO through |
| protect_from_queue before calling. ALIGN (in bytes) is maximum alignment |
| we can assume. */ |
| |
| static void |
| clear_by_pieces (to, len, align) |
| rtx to; |
| int len, align; |
| { |
| struct clear_by_pieces data; |
| rtx to_addr = XEXP (to, 0); |
| int max_size = MOVE_MAX + 1; |
| |
| data.offset = 0; |
| data.to_addr = to_addr; |
| data.to = to; |
| data.autinc_to |
| = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC |
| || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC); |
| |
| data.explicit_inc_to = 0; |
| data.reverse |
| = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC); |
| if (data.reverse) data.offset = len; |
| data.len = len; |
| |
| data.to_struct = MEM_IN_STRUCT_P (to); |
| |
| /* If copying requires more than two move insns, |
| copy addresses to registers (to make displacements shorter) |
| and use post-increment if available. */ |
| if (!data.autinc_to |
| && move_by_pieces_ninsns (len, align) > 2) |
| { |
| #ifdef HAVE_PRE_DECREMENT |
| if (data.reverse && ! data.autinc_to) |
| { |
| data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len)); |
| data.autinc_to = 1; |
| data.explicit_inc_to = -1; |
| } |
| #endif |
| #ifdef HAVE_POST_INCREMENT |
| if (! data.reverse && ! data.autinc_to) |
| { |
| data.to_addr = copy_addr_to_reg (to_addr); |
| data.autinc_to = 1; |
| data.explicit_inc_to = 1; |
| } |
| #endif |
| if (!data.autinc_to && CONSTANT_P (to_addr)) |
| data.to_addr = copy_addr_to_reg (to_addr); |
| } |
| |
| if (! SLOW_UNALIGNED_ACCESS |
| || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT) |
| align = MOVE_MAX; |
| |
| /* First move what we can in the largest integer mode, then go to |
| successively smaller modes. */ |
| |
| while (max_size > 1) |
| { |
| enum machine_mode mode = VOIDmode, tmode; |
| enum insn_code icode; |
| |
| for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); |
| tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) |
| if (GET_MODE_SIZE (tmode) < max_size) |
| mode = tmode; |
| |
| if (mode == VOIDmode) |
| break; |
| |
| icode = mov_optab->handlers[(int) mode].insn_code; |
| if (icode != CODE_FOR_nothing |
| && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT, |
| GET_MODE_SIZE (mode))) |
| clear_by_pieces_1 (GEN_FCN (icode), mode, &data); |
| |
| max_size = GET_MODE_SIZE (mode); |
| } |
| |
| /* The code above should have handled everything. */ |
| if (data.len != 0) |
| abort (); |
| } |
| |
| /* Subroutine of clear_by_pieces. Clear as many bytes as appropriate |
| with move instructions for mode MODE. GENFUN is the gen_... function |
| to make a move insn for that mode. DATA has all the other info. */ |
| |
| static void |
| clear_by_pieces_1 (genfun, mode, data) |
| rtx (*genfun) (); |
| enum machine_mode mode; |
| struct clear_by_pieces *data; |
| { |
| register int size = GET_MODE_SIZE (mode); |
| register rtx to1; |
| |
| while (data->len >= size) |
| { |
| if (data->reverse) data->offset -= size; |
| |
| to1 = (data->autinc_to |
| ? gen_rtx (MEM, mode, data->to_addr) |
| : copy_rtx (change_address (data->to, mode, |
| plus_constant (data->to_addr, |
| data->offset)))); |
| MEM_IN_STRUCT_P (to1) = data->to_struct; |
| |
| #ifdef HAVE_PRE_DECREMENT |
| if (data->explicit_inc_to < 0) |
| emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-size))); |
| #endif |
| |
| emit_insn ((*genfun) (to1, const0_rtx)); |
| #ifdef HAVE_POST_INCREMENT |
| if (data->explicit_inc_to > 0) |
| emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); |
| #endif |
| |
| if (! data->reverse) data->offset += size; |
| |
| data->len -= size; |
| } |
| } |
| |
| /* Write zeros through the storage of OBJECT. |
| If OBJECT has BLKmode, SIZE is its length in bytes and ALIGN is |
| the maximum alignment we can is has, measured in bytes. */ |
| |
| void |
| clear_storage (object, size, align) |
| rtx object; |
| rtx size; |
| int align; |
| { |
| if (GET_MODE (object) == BLKmode) |
| { |
| object = protect_from_queue (object, 1); |
| size = protect_from_queue (size, 0); |
| |
| if (GET_CODE (size) == CONST_INT |
| && (move_by_pieces_ninsns (INTVAL (size), align) < MOVE_RATIO)) |
| clear_by_pieces (object, INTVAL (size), align); |
| |
| else |
| { |
| /* Try the most limited insn first, because there's no point |
| including more than one in the machine description unless |
| the more limited one has some advantage. */ |
| |
| rtx opalign = GEN_INT (align); |
| enum machine_mode mode; |
| |
| for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; |
| mode = GET_MODE_WIDER_MODE (mode)) |
| { |
| enum insn_code code = clrstr_optab[(int) mode]; |
| |
| if (code != CODE_FOR_nothing |
| /* We don't need MODE to be narrower than |
| BITS_PER_HOST_WIDE_INT here because if SIZE is less than |
| the mode mask, as it is returned by the macro, it will |
| definitely be less than the actual mode mask. */ |
| && ((GET_CODE (size) == CONST_INT |
| && ((unsigned HOST_WIDE_INT) INTVAL (size) |
| <= GET_MODE_MASK (mode))) |
| || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD) |
| && (insn_operand_predicate[(int) code][0] == 0 |
| || (*insn_operand_predicate[(int) code][0]) (object, |
| BLKmode)) |
| && (insn_operand_predicate[(int) code][2] == 0 |
| || (*insn_operand_predicate[(int) code][2]) (opalign, |
| VOIDmode))) |
| { |
| rtx op1; |
| rtx last = get_last_insn (); |
| rtx pat; |
| |
| op1 = convert_to_mode (mode, size, 1); |
| if (insn_operand_predicate[(int) code][1] != 0 |
| && ! (*insn_operand_predicate[(int) code][1]) (op1, |
| mode)) |
| op1 = copy_to_mode_reg (mode, op1); |
| |
| pat = GEN_FCN ((int) code) (object, op1, opalign); |
| if (pat) |
| { |
| emit_insn (pat); |
| return; |
| } |
| else |
| delete_insns_since (last); |
| } |
| } |
| |
| |
| #ifdef TARGET_MEM_FUNCTIONS |
| emit_library_call (memset_libfunc, 0, |
| VOIDmode, 3, |
| XEXP (object, 0), Pmode, |
| const0_rtx, TYPE_MODE (integer_type_node), |
| convert_to_mode (TYPE_MODE (sizetype), |
| size, TREE_UNSIGNED (sizetype)), |
| TYPE_MODE (sizetype)); |
| #else |
| emit_library_call (bzero_libfunc, 0, |
| VOIDmode, 2, |
| XEXP (object, 0), Pmode, |
| convert_to_mode (TYPE_MODE (integer_type_node), |
| size, |
| TREE_UNSIGNED (integer_type_node)), |
| TYPE_MODE (integer_type_node)); |
| #endif |
| } |
| } |
| else |
| emit_move_insn (object, const0_rtx); |
| } |
| |
| /* Generate code to copy Y into X. |
| Both Y and X must have the same mode, except that |
| Y can be a constant with VOIDmode. |
| This mode cannot be BLKmode; use emit_block_move for that. |
| |
| Return the last instruction emitted. */ |
| |
| rtx |
| emit_move_insn (x, y) |
| rtx x, y; |
| { |
| enum machine_mode mode = GET_MODE (x); |
| |
| x = protect_from_queue (x, 1); |
| y = protect_from_queue (y, 0); |
| |
| if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode)) |
| abort (); |
| |
| if (CONSTANT_P (y) && ! LEGITIMATE_CONSTANT_P (y)) |
| y = force_const_mem (mode, y); |
| |
| /* If X or Y are memory references, verify that their addresses are valid |
| for the machine. */ |
| if (GET_CODE (x) == MEM |
| && ((! memory_address_p (GET_MODE (x), XEXP (x, 0)) |
| && ! push_operand (x, GET_MODE (x))) |
| || (flag_force_addr |
| && CONSTANT_ADDRESS_P (XEXP (x, 0))))) |
| x = change_address (x, VOIDmode, XEXP (x, 0)); |
| |
| if (GET_CODE (y) == MEM |
| && (! memory_address_p (GET_MODE (y), XEXP (y, 0)) |
| || (flag_force_addr |
| && CONSTANT_ADDRESS_P (XEXP (y, 0))))) |
| y = change_address (y, VOIDmode, XEXP (y, 0)); |
| |
| if (mode == BLKmode) |
| abort (); |
| |
| return emit_move_insn_1 (x, y); |
| } |
| |
| /* Low level part of emit_move_insn. |
| Called just like emit_move_insn, but assumes X and Y |
| are basically valid. */ |
| |
| rtx |
| emit_move_insn_1 (x, y) |
| rtx x, y; |
| { |
| enum machine_mode mode = GET_MODE (x); |
| enum machine_mode submode; |
| enum mode_class class = GET_MODE_CLASS (mode); |
| int i; |
| |
| if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) |
| return |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y)); |
| |
| /* Expand complex moves by moving real part and imag part, if possible. */ |
| else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT) |
| && BLKmode != (submode = mode_for_size ((GET_MODE_UNIT_SIZE (mode) |
| * BITS_PER_UNIT), |
| (class == MODE_COMPLEX_INT |
| ? MODE_INT : MODE_FLOAT), |
| 0)) |
| && (mov_optab->handlers[(int) submode].insn_code |
| != CODE_FOR_nothing)) |
| { |
| /* Don't split destination if it is a stack push. */ |
| int stack = push_operand (x, GET_MODE (x)); |
| rtx insns; |
| |
| /* If this is a stack, push the highpart first, so it |
| will be in the argument order. |
| |
| In that case, change_address is used only to convert |
| the mode, not to change the address. */ |
| if (stack) |
| { |
| /* Note that the real part always precedes the imag part in memory |
| regardless of machine's endianness. */ |
| #ifdef STACK_GROWS_DOWNWARD |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_rtx (MEM, submode, (XEXP (x, 0))), |
| gen_imagpart (submode, y))); |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_rtx (MEM, submode, (XEXP (x, 0))), |
| gen_realpart (submode, y))); |
| #else |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_rtx (MEM, submode, (XEXP (x, 0))), |
| gen_realpart (submode, y))); |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_rtx (MEM, submode, (XEXP (x, 0))), |
| gen_imagpart (submode, y))); |
| #endif |
| } |
| else |
| { |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_realpart (submode, x), gen_realpart (submode, y))); |
| emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code) |
| (gen_imagpart (submode, x), gen_imagpart (submode, y))); |
| } |
| |
| return get_last_insn (); |
| } |
| |
| /* This will handle any multi-word mode that lacks a move_insn pattern. |
| However, you will get better code if you define such patterns, |
| even if they must turn into multiple assembler instructions. */ |
| else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) |
| { |
| rtx last_insn = 0; |
| rtx insns; |
| |
| #ifdef PUSH_ROUNDING |
| |
| /* If X is a push on the stack, do the push now and replace |
| X with a reference to the stack pointer. */ |
| if (push_operand (x, GET_MODE (x))) |
| { |
| anti_adjust_stack (GEN_INT (GET_MODE_SIZE (GET_MODE (x)))); |
| x = change_address (x, VOIDmode, stack_pointer_rtx); |
| } |
| #endif |
| |
| /* Show the output dies here. */ |
| if (x != y) |
| emit_insn (gen_rtx (CLOBBER, VOIDmode, x)); |
| |
| for (i = 0; |
| i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; |
| i++) |
| { |
| rtx xpart = operand_subword (x, i, 1, mode); |
| rtx ypart = operand_subword (y, i, 1, mode); |
| |
| /* If we can't get a part of Y, put Y into memory if it is a |
| constant. Otherwise, force it into a register. If we still |
| can't get a part of Y, abort. */ |
| if (ypart == 0 && CONSTANT_P (y)) |
| { |
| y = force_const_mem (mode, y); |
| ypart = operand_subword (y, i, 1, mode); |
| } |
| else if (ypart == 0) |
| ypart = operand_subword_force (y, i, mode); |
| |
| if (xpart == 0 || ypart == 0) |
| abort (); |
| |
| last_insn = emit_move_insn (xpart, ypart); |
| } |
| |
| return last_insn; |
| } |
| else |
| abort (); |
| } |
| |
| /* Pushing data onto the stack. */ |
| |
| /* Push a block of length SIZE (perhaps variable) |
| and return an rtx to address the beginning of the block. |
| Note that it is not possible for the value returned to be a QUEUED. |
| The value may be virtual_outgoing_args_rtx. |
| |
| EXTRA is the number of bytes of padding to push in addition to SIZE. |
| BELOW nonzero means this padding comes at low addresses; |
| otherwise, the padding comes at high addresses. */ |
| |
| rtx |
| push_block (size, extra, below) |
| rtx size; |
| int extra, below; |
| { |
| register rtx temp; |
| |
| size = convert_modes (Pmode, ptr_mode, size, 1); |
| if (CONSTANT_P (size)) |
| anti_adjust_stack (plus_constant (size, extra)); |
| else if (GET_CODE (size) == REG && extra == 0) |
| anti_adjust_stack (size); |
| else |
| { |
| rtx temp = copy_to_mode_reg (Pmode, size); |
| if (extra != 0) |
| temp = expand_binop (Pmode, add_optab, temp, GEN_INT (extra), |
| temp, 0, OPTAB_LIB_WIDEN); |
| anti_adjust_stack (temp); |
| } |
| |
| #ifdef STACK_GROWS_DOWNWARD |
| temp = virtual_outgoing_args_rtx; |
| if (extra != 0 && below) |
| temp = plus_constant (temp, extra); |
| #else |
| if (GET_CODE (size) == CONST_INT) |
| temp = plus_constant (virtual_outgoing_args_rtx, |
| - INTVAL (size) - (below ? 0 : extra)); |
| else if (extra != 0 && !below) |
| temp = gen_rtx (PLUS, Pmode, virtual_outgoing_args_rtx, |
| negate_rtx (Pmode, plus_constant (size, extra))); |
| else |
| temp = gen_rtx (PLUS, Pmode, virtual_outgoing_args_rtx, |
| negate_rtx (Pmode, size)); |
| #endif |
| |
| return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp); |
| } |
| |
| rtx |
| gen_push_operand () |
| { |
| return gen_rtx (STACK_PUSH_CODE, Pmode, stack_pointer_rtx); |
| } |
| |
| /* Return an rtx for the address of the beginning of a as-if-it-was-pushed |
| block of SIZE bytes. */ |
| |
| static rtx |
| get_push_address (size) |
| int size; |
| { |
| register rtx temp; |
| |
| if (STACK_PUSH_CODE == POST_DEC) |
| temp = gen_rtx (PLUS, Pmode, stack_pointer_rtx, GEN_INT (size)); |
| else if (STACK_PUSH_CODE == POST_INC) |
| temp = gen_rtx (MINUS, Pmode, stack_pointer_rtx, GEN_INT (size)); |
| else |
| temp = stack_pointer_rtx; |
| |
| return force_operand (temp, NULL_RTX); |
| } |
| |
| /* Generate code to push X onto the stack, assuming it has mode MODE and |
| type TYPE. |
| MODE is redundant except when X is a CONST_INT (since they don't |
| carry mode info). |
| SIZE is an rtx for the size of data to be copied (in bytes), |
| needed only if X is BLKmode. |
| |
| ALIGN (in bytes) is maximum alignment we can assume. |
| |
| If PARTIAL and REG are both nonzero, then copy that many of the first |
| words of X into registers starting with REG, and push the rest of X. |
| The amount of space pushed is decreased by PARTIAL words, |
| rounded *down* to a multiple of PARM_BOUNDARY. |
| REG must be a hard register in this case. |
| If REG is zero but PARTIAL is not, take any all others actions for an |
| argument partially in registers, but do not actually load any |
| registers. |
| |
| EXTRA is the amount in bytes of extra space to leave next to this arg. |
| This is ignored if an argument block has already been allocated. |
| |
| On a machine that lacks real push insns, ARGS_ADDR is the address of |
| the bottom of the argument block for this call. We use indexing off there |
| to store the arg. On machines with push insns, ARGS_ADDR is 0 when a |
| argument block has not been preallocated. |
| |
| ARGS_SO_FAR is the size of args previously pushed for this call. */ |
| |
| void |
| emit_push_insn (x, mode, type, size, align, partial, reg, extra, |
| args_addr, args_so_far) |
| register rtx x; |
| enum machine_mode mode; |
| tree type; |
| rtx size; |
| int align; |
| int partial; |
| rtx reg; |
| int extra; |
| rtx args_addr; |
| rtx args_so_far; |
| { |
| rtx xinner; |
| enum direction stack_direction |
| #ifdef STACK_GROWS_DOWNWARD |
| = downward; |
| #else |
| = upward; |
| #endif |
| |
| /* Decide where to pad the argument: `downward' for below, |
| `upward' for above, or `none' for don't pad it. |
| Default is below for small data on big-endian machines; else above. */ |
| enum direction where_pad = FUNCTION_ARG_PADDING (mode, type); |
| |
| /* Invert direction if stack is post-update. */ |
| if (STACK_PUSH_CODE == POST_INC || STACK_PUSH_CODE == POST_DEC) |
| if (where_pad != none) |
| where_pad = (where_pad == downward ? upward : downward); |
| |
| xinner = x = protect_from_queue (x, 0); |
| |
| if (mode == BLKmode) |
| { |
| /* Copy a block into the stack, entirely or partially. */ |
| |
| register rtx temp; |
| int used = partial * UNITS_PER_WORD; |
| int offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); |
| int skip; |
| |
| if (size == 0) |
| abort (); |
| |
| used -= offset; |
| |
| /* USED is now the # of bytes we need not copy to the stack |
| because registers will take care of them. */ |
| |
| if (partial != 0) |
| xinner = change_address (xinner, BLKmode, |
| plus_constant (XEXP (xinner, 0), used)); |
| |
| /* If the partial register-part of the arg counts in its stack size, |
| skip the part of stack space corresponding to the registers. |
| Otherwise, start copying to the beginning of the stack space, |
| by setting SKIP to 0. */ |
| #ifndef REG_PARM_STACK_SPACE |
| skip = 0; |
| #else |
| skip = used; |
| #endif |
| |
| #ifdef PUSH_ROUNDING |
| /* Do it with several push insns if that doesn't take lots of insns |
| and if there is no difficulty with push insns that skip bytes |
| on the stack for alignment purposes. */ |
| if (args_addr == 0 |
| && GET_CODE (size) == CONST_INT |
| && skip == 0 |
| && (move_by_pieces_ninsns ((unsigned) INTVAL (size) - used, align) |
| < MOVE_RATIO) |
| /* Here we avoid the case of a structure whose weak alignment |
| forces many pushes of a small amount of data, |
| and such small pushes do rounding that causes trouble. */ |
| && ((! SLOW_UNALIGNED_ACCESS) |
| || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT |
| || PUSH_ROUNDING (align) == align) |
| && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size)) |
| { |
| /* Push padding now if padding above and stack grows down, |
| or if padding below and stack grows up. |
| But if space already allocated, this has already been done. */ |
| if (extra && args_addr == 0 |
| && where_pad != none && where_pad != stack_direction) |
| anti_adjust_stack (GEN_INT (extra)); |
| |
| move_by_pieces (gen_rtx (MEM, BLKmode, gen_push_operand ()), xinner, |
| INTVAL (size) - used, align); |
| |
| if (flag_check_memory_usage) |
| { |
| rtx temp; |
| |
| temp = get_push_address (INTVAL(size) - used); |
| if (GET_CODE (x) == MEM && AGGREGATE_TYPE_P (type)) |
| emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3, |
| temp, ptr_mode, |
| XEXP (xinner, 0), ptr_mode, |
| GEN_INT (INTVAL(size) - used), |
| TYPE_MODE (sizetype)); |
| else |
| emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3, |
| temp, ptr_mode, |
| GEN_INT (INTVAL(size) - used), |
| TYPE_MODE (sizetype), |
| GEN_INT (MEMORY_USE_RW), QImode); |
| } |
| } |
| else |
| #endif /* PUSH_ROUNDING */ |
| { |
| /* Otherwise make space on the stack and copy the data |
| to the address of that space. */ |
| |
| /* Deduct words put into registers from the size we must copy. */ |
| if (partial != 0) |
| { |
| if (GET_CODE (size) == CONST_INT) |
| size = GEN_INT (INTVAL (size) - used); |
| else |
| size = expand_binop (GET_MODE (size), sub_optab, size, |
| GEN_INT (used), NULL_RTX, 0, |
| OPTAB_LIB_WIDEN); |
| } |
| |
| /* Get the address of the stack space. |
| In this case, we do not deal with EXTRA separately. |
| A single stack adjust will do. */ |
| if (! args_addr) |
| { |
| temp = push_block (size, extra, where_pad == downward); |
| extra = 0; |
| } |
| else if (GET_CODE (args_so_far) == CONST_INT) |
| temp = memory_address (BLKmode, |
| plus_constant (args_addr, |
| skip + INTVAL (args_so_far))); |
| else |
| temp = memory_address (BLKmode, |
| plus_constant (gen_rtx (PLUS, Pmode, |
| args_addr, args_so_far), |
| skip)); |
| if (flag_check_memory_usage) |
| { |
| rtx target; |
| |
| target = copy_to_reg (temp); |
| if (GET_CODE (x) == MEM && AGGREGATE_TYPE_P (type)) |
| emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3, |
| target, ptr_mode, |
| XEXP (xinner, 0), ptr_mode, |
| size, TYPE_MODE (sizetype)); |
| else |
| emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3, |
| target, ptr_mode, |
| size, TYPE_MODE (sizetype), |
| GEN_INT (MEMORY_USE_RW), QImode); |
| } |
| |
| /* TEMP is the address of the block. Copy the data there. */ |
| if (GET_CODE (size) == CONST_INT |
| && (move_by_pieces_ninsns ((unsigned) INTVAL (size), align) |
| < MOVE_RATIO)) |
| { |
| move_by_pieces (gen_rtx (MEM, BLKmode, temp), xinner, |
| INTVAL (size), align); |
| goto ret; |
| } |
| /* Try the most limited insn first, because there's no point |
| including more than one in the machine description unless |
| the more limited one has some advantage. */ |
| #ifdef HAVE_movstrqi |
| if (HAVE_movstrqi |
| && GET_CODE (size) == CONST_INT |
| && ((unsigned) INTVAL (size) |
| < (1 << (GET_MODE_BITSIZE (QImode) - 1)))) |
| { |
| rtx pat = gen_movstrqi (gen_rtx (MEM, BLKmode, temp), |
| xinner, size, GEN_INT (align)); |
| if (pat != 0) |
| { |
| emit_insn (pat); |
| goto ret; |
| } |
| } |
| #endif |
| #ifdef HAVE_movstrhi |
| if (HAVE_movstrhi |
| && GET_CODE (size) == CONST_INT |
| && ((unsigned) INTVAL (size) |
| < (1 << (GET_MODE_BITSIZE (HImode) - 1)))) |
| { |
| rtx pat = gen_movstrhi (gen_rtx (MEM, BLKmode, temp), |
| xinner, size, GEN_INT (align)); |
| if (pat != 0) |
| { |
| emit_insn (pat); |
| goto ret; |
| } |
| } |
| #endif |
| #ifdef HAVE_movstrsi |
| if (HAVE_movstrsi) |
| { |
| rtx pat = gen_movstrsi (gen_rtx (MEM, BLKmode, temp), |
| xinner, size, GEN_INT (align)); |
| if (pat != 0) |
| { |
| emit_insn (pat); |
| goto ret; |
| } |
| } |
| #endif |
| #ifdef HAVE_movstrdi |
| if (HAVE_movstrdi) |
| { |
| rtx pat = gen_movstrdi (gen_rtx (MEM, BLKmode, temp), |
| xinner, size, GEN_INT (align)); |
| if (pat != 0) |
| { |
| emit_insn (pat); |
| goto ret; |
| } |
| } |
| #endif |
| |
| #ifndef ACCUMULATE_OUTGOING_ARGS |
| /* If the source is referenced relative to the stack pointer, |
| copy it to another register to stabilize it. We do not need |
| to do this if we know that we won't be changing sp. */ |
| |
| if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp) |
| || reg_mentioned_p (virtual_outgoing_args_rtx, temp)) |
| temp = copy_to_reg (temp); |
| #endif |
| |
| /* Make inhibit_defer_pop nonzero around the library call |
| to force it to pop the bcopy-arguments right away. */ |
| NO_DEFER_POP; |
| #ifdef TARGET_MEM_FUNCTIONS |
| emit_library_call (memcpy_libfunc, 0, |
| VOIDmode, 3, temp, Pmode, XEXP (xinner, 0), Pmode, |
| convert_to_mode (TYPE_MODE (sizetype), |
| size, TREE_UNSIGNED (sizetype)), |
| TYPE_MODE (sizetype)); |
| #else |
| emit_library_call (bcopy_libfunc, 0, |
| VOIDmode, 3, XEXP (xinner, 0), Pmode, temp, Pmode, |
| convert_to_mode (TYPE_MODE (integer_type_node), |
| size, |
| TREE_UNSIGNED (integer_type_node)), |
| TYPE_MODE (integer_type_node)); |
| #endif |
| OK_DEFER_POP; |
| } |
| } |
| else if (partial > 0) |
| { |
| /* Scalar partly in registers. */ |
| |
| int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD; |
| int i; |
| int not_stack; |
| /* # words of start of argument |
| that we must make space for but need not store. */ |
| int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD); |
| int args_offset = INTVAL (args_so_far); |
| int skip; |
| |
| /* Push padding now if padding above and stack grows down, |
| or if padding below and stack grows up. |
| But if space already allocated, this has already been done. */ |
| if (extra && args_addr == 0 |
| && where_pad != none && where_pad != stack_direction) |
| anti_adjust_stack (GEN_INT (extra)); |
| |
| /* If we make space by pushing it, we might as well push |
| the real data. Otherwise, we can leave OFFSET nonzero |
| and leave the space uninitialized. */ |
| if (args_addr == 0) |
| offset = 0; |
| |
| /* Now NOT_STACK gets the number of words that we don't need to |
| allocate on the stack. */ |
| not_stack = partial - offset; |
| |
| /* If the partial register-part of the arg counts in its stack size, |
| skip the part of stack space corresponding to the registers. |
| Otherwise, start copying to the beginning of the stack space, |
| by setting SKIP to 0. */ |
| #ifndef REG_PARM_STACK_SPACE |
| skip = 0; |
| #else |
| skip = not_stack; |
| #endif |
| |
| if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x)) |
| x = validize_mem (force_const_mem (mode, x)); |
| |
| /* If X is a hard register in a non-integer mode, copy it into a pseudo; |
| SUBREGs of such registers are not allowed. */ |
| if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER |
| && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT)) |
| x = copy_to_reg (x); |
| |
| /* Loop over all the words allocated on the stack for this arg. */ |
| /* We can do it by words, because any scalar bigger than a word |
| has a size a multiple of a word. */ |
| #ifndef PUSH_ARGS_REVERSED |
| for (i = not_stack; i < size; i++) |
| #else |
| for (i = size - 1; i >= not_stack; i--) |
| #endif |
| if (i >= not_stack + offset) |
| emit_push_insn (operand_subword_force (x, i, mode), |
| word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX, |
| 0, args_addr, |
| GEN_INT (args_offset + ((i - not_stack + skip) |
| * UNITS_PER_WORD))); |
| } |
| else |
| { |
| rtx addr; |
| rtx target = NULL_RTX; |
| |
| /* Push padding now if padding above and stack grows down, |
| or if padding below and stack grows up. |
| But if space already allocated, this has already been done. */ |
| if (extra && args_addr == 0 |
| && where_pad != none && where_pad != stack_direction) |
| anti_adjust_stack (GEN_INT (extra)); |
| |
| #ifdef PUSH_ROUNDING |
| if (args_addr == 0) |
| addr = gen_push_operand (); |
| else |
| #endif |
| { |
| if (GET_CODE (args_so_far) == CONST_INT) |
| addr |
| = memory_address (mode, |
| plus_constant (args_addr, |
| INTVAL (args_so_far))); |
| else |
| addr = memory_address (mode, gen_rtx (PLUS, Pmode, args_addr, |
| args_so_far)); |
| target = addr; |
| } |
| |
| emit_move_insn (gen_rtx (MEM, mode, addr), x); |
| |
| if (flag_check_memory_usage) |
| { |
| if (target == 0) |
| target = get_push_address (GET_MODE_SIZE (mode)); |
| |
| if (GET_CODE (x) == MEM && AGGREGATE_TYPE_P (type)) |
| emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3, |
| target, ptr_mode, |
| XEXP (x, 0), ptr_mode, |
| GEN_INT (GET_MODE_SIZE (mode)), |
| TYPE_MODE (sizetype)); |
| else |
| emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3, |
| target, ptr_mode, |
| GEN_INT (GET_MODE_SIZE (mode)), |
| TYPE_MODE (sizetype), |
| GEN_INT (MEMORY_USE_RW), QImode); |
| } |
| } |
| |
| ret: |
| /* If part should go in registers, copy that part |
| into the appropriate registers. Do this now, at the end, |
| since mem-to-mem copies above may do function calls. */ |
| if (partial > 0 && reg != 0) |
| { |
| /* Handle calls that pass values in multiple non-contiguous locations. |
| The Irix 6 ABI has examples of this. */ |
| if (GET_CODE (reg) == PARALLEL) |
| emit_group_load (reg, x); |
| else |
| move_block_to_reg (REGNO (reg), x, partial, mode); |
| } |
| |
| if (extra && args_addr == 0 && where_pad == stack_direction) |
| anti_adjust_stack (GEN_INT (extra)); |
| } |
| |
| /* Expand an assignment that stores the value of FROM into TO. |
| If WANT_VALUE is nonzero, return an rtx for the value of TO. |
| (This may contain a QUEUED rtx; |
| if the value is constant, this rtx is a constant.) |
| Otherwise, the returned value is NULL_RTX. |
| |
| SUGGEST_REG is no longer actually used. |
| It used to mean, copy the value through a register |
| and return that register, if that is possible. |
| We now use WANT_VALUE to decide whether to do this. */ |
| |
| rtx |
| expand_assignment (to, from, want_value, suggest_reg) |
| tree to, from; |
| int want_value; |
| int suggest_reg; |
| { |
| register rtx to_rtx = 0; |
| rtx result; |
| |
| /* Don't crash if the lhs of the assignment was erroneous. */ |
| |
| if (TREE_CODE (to) == ERROR_MARK) |
| { |
| result = expand_expr (from, NULL_RTX, VOIDmode, 0); |
| return want_value ? result : NULL_RTX; |
| } |
| |
| if (output_bytecode) |
| { |
| tree dest_innermost; |
| |
| bc_expand_expr (from); |
| bc_emit_instruction (duplicate); |
| |
| dest_innermost = bc_expand_address (to); |
| |
| /* Can't deduce from TYPE that we're dealing with a bitfield, so |
| take care of it here. */ |
| |
| bc_store_memory (TREE_TYPE (to), dest_innermost); |
| return NULL; |
| } |
| |
| /* Assignment of a structure component needs special treatment |
| if the structure component's rtx is not simply a MEM. |
| Assignment of an array element at a constant index, and assignment of |
| an array element in an unaligned packed structure field, has the same |
| problem. */ |
| |
| if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF |
| || TREE_CODE (to) == ARRAY_REF) |
| { |
| enum machine_mode mode1; |
| int bitsize; |
| int bitpos; |
| tree offset; |
| int unsignedp; |
| int volatilep = 0; |
| tree tem; |
| int alignment; |
| |
| push_temp_slots (); |
| tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1, |
| &unsignedp, &volatilep, &alignment); |
| |
| /* If we are going to use store_bit_field and extract_bit_field, |
| make sure to_rtx will be safe for multiple use. */ |
| |
| if (mode1 == VOIDmode && want_value) |
| tem = stabilize_reference (tem); |
| |
| to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_DONT); |
| if (offset != 0) |
| { |
| rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0); |
| |
| if (GET_CODE (to_rtx) != MEM) |
| abort (); |
| to_rtx = change_address (to_rtx, VOIDmode, |
| gen_rtx (PLUS, ptr_mode, XEXP (to_rtx, 0), |
| force_reg (ptr_mode, offset_rtx))); |
| } |
| if (volatilep) |
| { |
| if (GET_CODE (to_rtx) == MEM) |
| { |
| /* When the offset is zero, to_rtx is the address of the |
| structure we are storing into, and hence may be shared. |
| We must make a new MEM before setting the volatile bit. */ |
| if (offset == 0) |
| to_rtx = copy_rtx (to_rtx); |
| |
| MEM_VOLATILE_P (to_rtx) = 1; |
| } |
| #if 0 /* This was turned off because, when a field is volatile |
| in an object which is not volatile, the object may be in a register, |
| and then we would abort over here. */ |
| else |
| abort (); |
| #endif |
| } |
| |
| /* Check the access. */ |
| if (flag_check_memory_usage && GET_CODE (to_rtx) == MEM) |
| { |
| rtx to_addr; |
| int size; |
| int best_mode_size; |
| enum machine_mode best_mode; |
| |
| best_mode = get_best_mode (bitsize, bitpos, |
| TYPE_ALIGN (TREE_TYPE (tem)), |
| mode1, volatilep); |
| if (best_mode == VOIDmode) |
| best_mode = QImode; |
| |
| best_mode_size = GET_MODE_BITSIZE (best_mode); |
| to_addr = plus_constant (XEXP (to_rtx, 0), (bitpos / BITS_PER_UNIT)); |
| size = CEIL ((bitpos % best_mode_size) + bitsize, best_mode_size); |
| size *= GET_MODE_SIZE (best_mode); |
| |
| /* Check the access right of the pointer. */ |
| emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3, to_addr, |
| ptr_mode, GEN_INT (size), TYPE_MODE (sizetype), |
| GEN_INT (MEMORY_USE_WO), QImode); |
| } |
| |
| result = store_field (to_rtx, bitsize, bitpos, mode1, from, |
| (want_value |
| /* Spurious cast makes HPUX compiler happy. */ |
| ? (enum machine_mode) TYPE_MODE (TREE_TYPE (to)) |
| : VOIDmode), |
| unsignedp, |
| /* Required alignment of containing datum. */ |
| alignment, |
| int_size_in_bytes (TREE_TYPE (tem))); |
| preserve_temp_slots (result); |
| free_temp_slots (); |
| pop_temp_slots (); |
| |
| /* If the value is meaningful, convert RESULT to the proper mode. |
| Otherwise, return nothing. */ |
| return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)), |
| TYPE_MODE (TREE_TYPE (from)), |
| result, |
| TREE_UNSIGNED (TREE_TYPE (to))) |
| : NULL_RTX); |
| } |
| |
| /* If the rhs is a function call and its value is not an aggregate, |
| call the function before we start to compute the lhs. |
| This is needed for correct code for cases such as |
| val = setjmp (buf) on machines where reference to val |
| requires loading up part of an address in a separate insn. |
| |
| Don't do this if TO is a VAR_DECL whose DECL_RTL is REG since it might be |
| a promoted variable where the zero- or sign- extension needs to be done. |
| Handling this in the normal way is safe because no computation is done |
| before the call. */ |
| if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from) |
| && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST |
| && ! (TREE_CODE (to) == VAR_DECL && GET_CODE (DECL_RTL (to)) == REG)) |
| { |
| rtx value; |
| |
| push_temp_slots (); |
| value = expand_expr (from, NULL_RTX, VOIDmode, 0); |
| if (to_rtx == 0) |
| to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_WO); |
| |
| /* Handle calls that return values in multiple non-contiguous locations. |
| The Irix 6 ABI has examples of this. */ |
| if (GET_CODE (to_rtx) == PARALLEL) |
| emit_group_load (to_rtx, value); |
| else if (GET_MODE (to_rtx) == BLKmode) |
| emit_block_move (to_rtx, value, expr_size (from), |
| TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT); |
| else |
| emit_move_insn (to_rtx, value); |
| preserve_temp_slots (to_rtx); |
| free_temp_slots (); |
| pop_temp_slots (); |
| return want_value ? to_rtx : NULL_RTX; |
| } |
| |
| /* Ordinary treatment. Expand TO to get a REG or MEM rtx. |
| Don't re-expand if it was expanded already (in COMPONENT_REF case). */ |
| |
| if (to_rtx == 0) |
| to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_WO); |
| |
| /* Don't move directly into a return register. */ |
| if (TREE_CODE (to) == RESULT_DECL && GET_CODE (to_rtx) == REG) |
| { |
| rtx temp; |
| |
| push_temp_slots (); |
| temp = expand_expr (from, 0, GET_MODE (to_rtx), 0); |
| emit_move_insn (to_rtx, temp); |
| preserve_temp_slots (to_rtx); |
| free_temp_slots (); |
| pop_temp_slots (); |
| return want_value ? to_rtx : NULL_RTX; |
| } |
| |
| /* In case we are returning the contents of an object which overlaps |
| the place the value is being stored, use a safe function when copying |
| a value through a pointer into a structure value return block. */ |
| if (TREE_CODE (to) == RESULT_DECL && TREE_CODE (from) == INDIRECT_REF |
| && current_function_returns_struct |
| && !current_function_returns_pcc_struct) |
| { |
| rtx from_rtx, size; |
| |
| push_temp_slots (); |
| size = expr_size (from); |
| from_rtx = expand_expr (from, NULL_RTX, VOIDmode, |
| EXPAND_MEMORY_USE_DONT); |
| |
| /* Copy the rights of the bitmap. */ |
| if (flag_check_memory_usage) |
| emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3, |
| XEXP (to_rtx, 0), ptr_mode, |
| XEXP (from_rtx, 0), ptr_mode, |
| convert_to_mode (TYPE_MODE (sizetype), |
| size, TREE_UNSIGNED (sizetype)), |
| TYPE_MODE (sizetype)); |
| |
| #ifdef TARGET_MEM_FUNCTIONS |
| emit_library_call (memcpy_libfunc, 0, |
| VOIDmode, 3, XEXP (to_rtx, 0), Pmode, |
| XEXP (from_rtx, 0), Pmode, |
| convert_to_mode (TYPE_MODE (sizetype), |
| size, TREE_UNSIGNED (sizetype)), |
| TYPE_MODE (sizetype)); |
| #else |
| emit_library_call (bcopy_libfunc, 0, |
| VOIDmode, 3, XEXP (from_rtx, 0), Pmode, |
| XEXP (to_rtx, 0), Pmode, |
| convert_to_mode (TYPE_MODE (integer_type_node), |
| size, TREE_UNSIGNED (integer_type_node)), |
| TYPE_MODE (integer_type_node)); |
| #endif |
| |
| preserve_temp_slots (to_rtx); |
| free_temp_slots (); |
| pop_temp_slots (); |
| return want_value ? to_rtx : NULL_RTX; |
| } |
| |
| /* Compute FROM and store the value in the rtx we got. */ |
| |
| push_temp_slots (); |
| result = store_expr (from, to_rtx, want_value); |
| preserve_temp_slots (result); |
| free_temp_slots (); |
| pop_temp_slots (); |
| return want_value ? result : NULL_RTX; |
| } |
| |
| /* Generate code for computing expression EXP, |
| and storing the value into TARGET. |
| TARGET may contain a QUEUED rtx. |
| |
| If WANT_VALUE is nonzero, return a copy of the value |
| not in TARGET, so that we can be sure to use the proper |
| value in a containing expression even if TARGET has something |
| else stored in it. If possible, we copy the value through a pseudo |
| and return that pseudo. Or, if the value is constant, we try to |
| return the constant. In some cases, we return a pseudo |
| copied *from* TARGET. |
| |
| If the mode is BLKmode then we may return TARGET itself. |
| It turns out that in BLKmode it doesn't cause a problem. |
| because C has no operators that could combine two different |
| assignments into the same BLKmode object with different values |
| with no sequence point. Will other languages need this to |
| be more thorough? |
| |
| If WANT_VALUE is 0, we return NULL, to make sure |
| to catch quickly any cases where the caller uses the value |
| and fails to set WANT_VALUE. */ |
| |
| rtx |
| store_expr (exp, target, want_value) |
| register tree exp; |
| register rtx target; |
| int want_value; |
| { |
| register rtx temp; |
| int dont_return_target = 0; |
| |
| if (TREE_CODE (exp) == COMPOUND_EXPR) |
| { |
| /* Perform first part of compound expression, then assign from second |
| part. */ |
| expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0); |
| emit_queue (); |
| return store_expr (TREE_OPERAND (exp, 1), target, want_value); |
| } |
| else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode) |
| { |
| /* For conditional expression, get safe form of the target. Then |
| test the condition, doing the appropriate assignment on either |
| side. This avoids the creation of unnecessary temporaries. |
| For non-BLKmode, it is more efficient not to do this. */ |
| |
| rtx lab1 = gen_label_rtx (), lab2 = gen_label_rtx (); |
| |
| emit_queue (); |
| target = protect_from_queue (target, 1); |
| |
| do_pending_stack_adjust (); |
| NO_DEFER_POP; |
| jumpifnot (TREE_OPERAND (exp, 0), lab1); |
| start_cleanup_deferal (); |
| store_expr (TREE_OPERAND (exp, 1), target, 0); |
| end_cleanup_deferal (); |
| emit_queue (); |
| emit_jump_insn (gen_jump (lab2)); |
| emit_barrier (); |
| emit_label (lab1); |
| start_cleanup_deferal (); |
| store_expr (TREE_OPERAND (exp, 2), target, 0); |
| end_cleanup_deferal (); |
| emit_queue (); |
| emit_label (lab2); |
| OK_DEFER_POP; |
| |
| return want_value ? target : NULL_RTX; |
| } |
|