;; Machine description of the M32R/D cpu for GNU C compiler ;; Copyright (C) 1996, 1997 Free Software Foundation, Inc.

;; This file is part of GNU CC.

;; GNU CC is free software; you can redistribute it and/or modify ;; it under the terms of the GNU General Public License as published by ;; the Free Software Foundation; either version 2, or (at your option) ;; any later version.

;; GNU CC is distributed in the hope that it will be useful, ;; but WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;; GNU General Public License for more details.

;; You should have received a copy of the GNU General Public License ;; along with GNU CC; see the file COPYING. If not, write to ;; the Free Software Foundation, 59 Temple Place - Suite 330, ;; Boston, MA 02111-1307, USA.

;; See file “rtl.def” for documentation on define_insn, match_*, et. al.

;; unspec usage ;; 0 - blockage ;; 1 - flush_icache ;; 2 - load_sda_base ;; Insn type. Used to default other attribute values. ;; move4 = 4 byte move (define_attr “type” “move,move4,load,store,unary,binary,compare,shift,mul,div,uncond_branch,branch,call,multi,misc” (const_string “misc”))

;; Length in bytes. (define_attr “length” "" (cond [(eq_attr “type” “move,unary,shift,mul,div”) (const_int 2)

 (eq_attr "type" "binary")
 (if_then_else (match_operand 2 "register_operand" "")
	       (const_int 2) (const_int 4))

 (eq_attr "type" "compare")
 (if_then_else (match_operand 1 "register_operand" "")
	       (const_int 2) (const_int 4))

 (eq_attr "type" "load")
 (if_then_else (match_operand 1 "memreg_operand" "")
	       (const_int 2) (const_int 4))

 (eq_attr "type" "store")
 (if_then_else (match_operand 0 "memreg_operand" "")
	       (const_int 2) (const_int 4))

 (eq_attr "type" "multi")
 (const_int 8)

 (eq_attr "type" "uncond_branch,branch,call")
 (const_int 4)]

 (const_int 4)))

;; The length here is the length of a single asm. Unfortunately it might be ;; 2 or 4 so we must allow for 4. That's ok though. (define_asm_attributes [(set_attr “length” “4”) (set_attr “type” “multi”)]) ;; Function units of the M32R ;; Units that take one cycle do not need to be specified.

;; (define_function_unit {name} {num-units} {n-users} {test} ;; {ready-delay} {issue-delay} [{conflict-list}])

;; References to loaded registers should wait a cycle. ;; Memory with load-delay of 1 (i.e. 2 cycle load). (define_function_unit “memory” 1 1 (eq_attr “type” “load”) 2 0)

;; Hack to get GCC to better pack the instructions. ;; We pretend there is a separate long function unit that conflicts with ;; both the left and right 16 bit insn slots.

(define_function_unit “left” 1 1 (eq_attr “length” “2”) 1 0 [(not (eq_attr “length” “2”))])

(define_function_unit “right” 1 1 (eq_attr “length” “1”) 1 0 [(not (eq_attr “length” “2”))])

(define_function_unit “long” 1 1 (not (eq_attr “length” “2”)) 1 0 [(eq_attr “length” “2”)]) ;; Expand prologue as RTL ;; ??? Unfinished.

;(define_expand “prologue” ; [(const_int 1)] ; "" ; " ;{ ;}") ;; Move instructions. ;; ;; For QI and HI moves, the register must contain the full properly ;; sign-extended value. nonzero_bits assumes this [otherwise ;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it ;; says it's a kludge and the .md files should be fixed instead].

(define_expand “movqi” [(set (match_operand:QI 0 “general_operand” "") (match_operand:QI 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. Objects in the small data area are handled too. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (QImode, operands[1]); }")

(define_insn “*movqi_insn” [(set (match_operand:QI 0 “move_dest_operand” “=r,r,r,r,m”) (match_operand:QI 1 “move_src_operand” “r,I,JQR,m,r”))] “register_operand (operands[0], QImode) || register_operand (operands[1], QImode)” “@ mv %0,%1 ldi %0,%#%1 ldi %0,%#%1 ldub %0,%1 stb %1,%0” [(set_attr “type” “move,move,move4,load,store”)])

(define_expand “movhi” [(set (match_operand:HI 0 “general_operand” "") (match_operand:HI 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (HImode, operands[1]); }")

(define_insn “*movhi_insn” [(set (match_operand:HI 0 “move_dest_operand” “=r,r,r,r,r,m”) (match_operand:HI 1 “move_src_operand” “r,I,JQR,K,m,r”))] “register_operand (operands[0], HImode) || register_operand (operands[1], HImode)” “@ mv %0,%1 ldi %0,%#%1 ldi %0,%#%1 ld24 %0,%#%1 lduh %0,%1 sth %1,%0” [(set_attr “type” “move,move,move4,move4,load,store”)])

(define_expand “movsi” [(set (match_operand:SI 0 “general_operand” "") (match_operand:SI 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (SImode, operands[1]);

/* Small Data Area reference? */ if (small_data_operand (operands[1], SImode)) { emit_insn (gen_movsi_sda (operands[0], operands[1])); DONE; }

/* If medium or large code model, symbols have to be loaded with seth/add3. */ if (addr32_operand (operands[1], SImode)) { emit_insn (gen_movsi_addr32 (operands[0], operands[1])); DONE; } }")

(define_insn “*movsi_insn” [(set (match_operand:SI 0 “move_dest_operand” “=r,r,r,r,r,r,r,m”) ;; FIXME: Do we need a const_double constraint here for large unsigned values? (match_operand:SI 1 “move_src_operand” “r,I,J,MQ,L,N,m,r”))] “register_operand (operands[0], SImode) || register_operand (operands[1], SImode)” “@ mv %0,%1 ldi %0,%#%1 ; %X1 ldi %0,%#%1 ; %X1 ld24 %0,%#%1 ; %X1 seth %0,%#%T1 seth %0,%#%T1;or3 %0,%0,%#%B1 ld %0,%1 st %1,%0” [(set_attr “type” “move,move,move4,move4,move4,multi,load,store”)])

; Try to use a four byte / two byte pair for constants not loadable with ; ldi, ld24, seth.

(define_split [(set (match_operand:SI 0 “register_operand” "") (match_operand:SI 1 “two_insn_const_operand” ""))] "" [(set (match_dup 0) (match_dup 2)) (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))] " { unsigned HOST_WIDE_INT val = INTVAL (operands[1]); unsigned HOST_WIDE_INT tmp; int shift;

/* In all cases we will emit two instructions. However we try to use 2 byte instructions whereever possible. We can assume the constant isn't loadable with any of ldi, ld24, or seth. */

/* See if we can load a 24 bit unsigned value and invert it. */ if (UINT24_P (~ val)) { emit_insn (gen_movsi (operands[0], GEN_INT (~ val))); emit_insn (gen_one_cmplsi2 (operands[0], operands[0])); DONE; }

/* See if we can load a 24 bit unsigned value and shift it into place. 0x01fffffe is just beyond ld24's range. */ for (shift = 1, tmp = 0x01fffffe; shift < 8; ++shift, tmp <<= 1) { if ((val & ~tmp) == 0) { emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift))); emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift))); DONE; } }

/* Can't use any two byte insn, fall back to seth/or3. */ operands[2] = GEN_INT ((val) & 0xffff0000); operands[3] = GEN_INT ((val) & 0xffff); }")

;; Small data area support. ;; The address of SDA_BASE is loaded into a register and all objects in ;; the small data area are indexed off that. This is done for each reference ;; but cse will clean things up for us. We let the compiler choose the ;; register to use so we needn't allocate (and maybe even fix) a special ;; register to use. Since the load and store insns have a 16 bit offset the ;; total size of the data area can be 64K. However, if the data area lives ;; above 16M (24 bits), SDA_BASE will have to be loaded with seth/add3 which ;; would then yield 3 instructions to reference an object [though there would ;; be no net loss if two or more objects were referenced]. The 3 insns can be ;; reduced back to 2 if the size of the small data area were reduced to 32K ;; [then seth + ld/st would work for any object in the area]. Doing this ;; would require special handling of SDA_BASE (its value would be ;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different ;; [I think]. What to do about this is defered until later and for now we ;; require .sdata to be in the first 16M.

(define_expand “movsi_sda” [(set (match_dup 2) (unspec [(const_int 0)] 2)) (set (match_operand:SI 0 “register_operand” "") (lo_sum:SI (match_dup 2) (match_operand:SI 1 “small_data_operand” "“)))] "" " { if (reload_in_progress || reload_completed) operands[2] = operands[0]; else operands[2] = gen_reg_rtx (SImode); }”)

(define_insn “*load_sda_base” [(set (match_operand:SI 0 “register_operand” “=r”) (unspec [(const_int 0)] 2))] "" “ld24 %0,#SDA_BASE” [(set_attr “type” “move4”)])

;; 32 bit address support.

(define_expand “movsi_addr32” [(set (match_dup 2) ; addr32_operand isn‘t used because it’s too restrictive, ; seth_add3_operand is more general and thus safer. (high:SI (match_operand:SI 1 “seth_add3_operand” ""))) (set (match_operand:SI 0 “register_operand” "“) (lo_sum:SI (match_dup 2) (match_dup 1)))] "" " { if (reload_in_progress || reload_completed) operands[2] = operands[0]; else operands[2] = gen_reg_rtx (SImode); }”)

(define_insn “set_hi_si” [(set (match_operand:SI 0 “register_operand” “=r”) (high:SI (match_operand 1 “symbolic_operand” "")))] "" “seth %0,%#shigh(%1)” [(set_attr “type” “move4”)])

(define_insn “lo_sum_si” [(set (match_operand:SI 0 “register_operand” “=r”) (lo_sum:SI (match_operand:SI 1 “register_operand” “r”) (match_operand:SI 2 “immediate_operand” “in”)))] "" “add3 %0,%1,%#%B2” [(set_attr “length” “4”)])

(define_expand “movdi” [(set (match_operand:DI 0 “general_operand” "") (match_operand:DI 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (DImode, operands[1]);

if (CONSTANT_P (operands[1]) && ! easy_di_const (operands[1])) { rtx mem = force_const_mem (DImode, operands[1]); rtx reg = ((reload_in_progress || reload_completed) ? copy_to_suggested_reg (XEXP (mem, 0), gen_rtx (REG, Pmode, REGNO (operands[0])), Pmode) : force_reg (Pmode, XEXP (mem, 0))); operands[1] = change_address (mem, DImode, reg); } }")

(define_insn “*movdi_insn” [(set (match_operand:DI 0 “move_dest_operand” “=r,r,r,m”) (match_operand:DI 1 “move_double_src_operand” “r,nG,m,r”))] “register_operand (operands[0], DImode) || register_operand (operands[1], DImode)” “* { switch (which_alternative) { case 0 : /* We normally copy the low-numbered register first. However, if the first register operand 0 is the same as the second register of operand 1, we must copy in the opposite order. / if (REGNO (operands[0]) == REGNO (operands[1]) + 1) return "mv %R0,%R1;mv %0,%1"; else return "mv %0,%1;mv %R0,%R1"; case 1 : return "#"; case 2 : / If the low-address word is used in the address, we must load it last. Otherwise, load it first. Note that we cannot have auto-increment in that case since the address register is known to be dead. / if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1, operands [1], 0)) { return "ld %R0,%R1;ld %0,%1"; } else { / Try to use auto-inc addressing if we can. / if (GET_CODE (XEXP (operands[1], 0)) == REG && dead_or_set_p (insn, XEXP (operands[1], 0))) { operands[1] = XEXP (operands[1], 0); return "ld %0,@%1+;ld %R0,@%1"; } return "ld %0,%1;ld %R0,%R1"; } case 3 : / Try to use auto-inc addressing if we can. */ if (GET_CODE (XEXP (operands[0], 0)) == REG && dead_or_set_p (insn, XEXP (operands[0], 0))) { operands[0] = XEXP (operands[0], 0); return "st %1,@%0;st %R1,@+%0"; } return "st %1,%0;st %R1,%R0"; } }” [(set_attr “type” “multi,multi,multi,multi”) (set_attr “length” “4,4,6,6”)])

(define_split [(set (match_operand:DI 0 “register_operand” "") (match_operand:DI 1 “const_double_operand” "“))] “reload_completed” [(set (match_dup 2) (match_dup 4)) (set (match_dup 3) (match_dup 5))] " { operands[2] = gen_rtx (SUBREG, SImode, operands[0], WORDS_BIG_ENDIAN == 0); operands[3] = gen_rtx (SUBREG, SImode, operands[0], WORDS_BIG_ENDIAN != 0); split_double (operands[1], operands + 4, operands + 5); }”) ;; Floating point move insns.

(define_expand “movsf” [(set (match_operand:SF 0 “general_operand” "") (match_operand:SF 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (SFmode, operands[1]); }")

(define_insn “*movsf_insn” [(set (match_operand:SF 0 “move_dest_operand” “=r,r,r,m”) (match_operand:SF 1 “move_src_operand” “r,F,m,r”))] “register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)” “* { switch (which_alternative) { case 0 : return "mv %0,%1"; case 1 : { REAL_VALUE_TYPE r; long l; REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); REAL_VALUE_TO_TARGET_SINGLE (r, l); operands[1] = GEN_INT (l); if (l == 0) return "ldi %0,%#0"; if ((l & 0xffff) == 0) return "seth %0,%#%T1"; else return "seth %0,%#%T1;or3 %0,%0,%#%B1"; } case 2 : return "ld %0,%1"; case 3 : return "st %1,%0"; } }” ;; ??? Length of alternative 1 is either 2, 4 or 8. [(set_attr “type” “move,multi,load,store”)])

(define_expand “movdf” [(set (match_operand:DF 0 “general_operand” "") (match_operand:DF 1 “general_operand” ""))] "" " { /* Everything except mem = const or mem = mem can be done easily. */

if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (DFmode, operands[1]);

if (GET_CODE (operands[1]) == CONST_DOUBLE && ! easy_df_const (operands[1])) { rtx mem = force_const_mem (DFmode, operands[1]); rtx reg = ((reload_in_progress || reload_completed) ? copy_to_suggested_reg (XEXP (mem, 0), gen_rtx (REG, Pmode, REGNO (operands[0])), Pmode) : force_reg (Pmode, XEXP (mem, 0))); operands[1] = change_address (mem, DFmode, reg); } }")

(define_insn “*movdf_insn” [(set (match_operand:DF 0 “move_dest_operand” “=r,r,r,m”) (match_operand:DF 1 “move_double_src_operand” “r,H,m,r”))] “register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)” “* { switch (which_alternative) { case 0 : /* We normally copy the low-numbered register first. However, if the first register operand 0 is the same as the second register of operand 1, we must copy in the opposite order. / if (REGNO (operands[0]) == REGNO (operands[1]) + 1) return "mv %R0,%R1;mv %0,%1"; else return "mv %0,%1;mv %R0,%R1"; case 1 : { REAL_VALUE_TYPE r; long l[2]; REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); REAL_VALUE_TO_TARGET_DOUBLE (r, l); operands[1] = GEN_INT (l[0]); if (l[0] == 0 && l[1] == 0) return "ldi %0,%#0;ldi %R0,%#0"; else if (l[1] != 0) abort (); else if ((l[0] & 0xffff) == 0) return "seth %0,%#%T1;ldi %R0,%#0"; else abort (); } case 2 : / If the low-address word is used in the address, we must load it last. Otherwise, load it first. Note that we cannot have auto-increment in that case since the address register is known to be dead. / if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1, operands [1], 0)) { return "ld %R0,%R1;ld %0,%1"; } else { / Try to use auto-inc addressing if we can. / if (GET_CODE (XEXP (operands[1], 0)) == REG && dead_or_set_p (insn, XEXP (operands[1], 0))) { operands[1] = XEXP (operands[1], 0); return "ld %0,@%1+;ld %R0,@%1"; } return "ld %0,%1;ld %R0,%R1"; } case 3 : / Try to use auto-inc addressing if we can. */ if (GET_CODE (XEXP (operands[0], 0)) == REG && dead_or_set_p (insn, XEXP (operands[0], 0))) { operands[0] = XEXP (operands[0], 0); return "st %1,@%0;st %R1,@+%0"; } return "st %1,%0;st %R1,%R0"; } }” [(set_attr “type” “multi,multi,multi,multi”) (set_attr “length” “4,6,6,6”)]) ;; Zero extension instructions.

(define_insn “zero_extendqihi2” [(set (match_operand:HI 0 “register_operand” “=r,r”) (zero_extend:HI (match_operand:QI 1 “nonimmediate_operand” “r,m”)))] "" “@ and3 %0,%1,%#255 ldub %0,%1” [(set_attr “type” “unary,load”) (set_attr “length” “4,*”)])

(define_insn “zero_extendqisi2” [(set (match_operand:SI 0 “register_operand” “=r,r”) (zero_extend:SI (match_operand:QI 1 “nonimmediate_operand” “r,m”)))] "" “@ and3 %0,%1,%#255 ldub %0,%1” [(set_attr “type” “unary,load”) (set_attr “length” “4,*”)])

(define_insn “zero_extendhisi2” [(set (match_operand:SI 0 “register_operand” “=r,r”) (zero_extend:SI (match_operand:HI 1 “nonimmediate_operand” “r,m”)))] "" “@ and3 %0,%1,%#65535 lduh %0,%1” [(set_attr “type” “unary,load”) (set_attr “length” “4,*”)]) ;; Sign extension instructions. ;; FIXME: See v850.md.

;; These patterns originally accepted general_operands, however, slightly ;; better code is generated by only accepting register_operands, and then ;; letting combine generate the lds[hb] insns. ;; [This comment copied from sparc.md, I think.]

(define_expand “extendqihi2” [(set (match_operand:HI 0 “register_operand” "") (sign_extend:HI (match_operand:QI 1 “register_operand” "")))] "" " { rtx temp = gen_reg_rtx (SImode); rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24); int op1_subword = 0; int op0_subword = 0;

if (GET_CODE (operand1) == SUBREG) { op1_subword = SUBREG_WORD (operand1); operand1 = XEXP (operand1, 0); } if (GET_CODE (operand0) == SUBREG) { op0_subword = SUBREG_WORD (operand0); operand0 = XEXP (operand0, 0); } emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, op1_subword), shift_24)); if (GET_MODE (operand0) != SImode) operand0 = gen_rtx (SUBREG, SImode, operand0, op0_subword); emit_insn (gen_ashrsi3 (operand0, temp, shift_24)); DONE; }")

(define_insn “*sign_extendqihi2_insn” [(set (match_operand:HI 0 “register_operand” “=r”) (sign_extend:HI (match_operand:QI 1 “memory_operand” “m”)))] "" “ldb %0,%1” [(set_attr “type” “load”)])

(define_expand “extendqisi2” [(set (match_operand:SI 0 “register_operand” "") (sign_extend:SI (match_operand:QI 1 “register_operand” "")))] "" " { rtx temp = gen_reg_rtx (SImode); rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24); int op1_subword = 0;

if (GET_CODE (operand1) == SUBREG) { op1_subword = SUBREG_WORD (operand1); operand1 = XEXP (operand1, 0); }

emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, op1_subword), shift_24)); emit_insn (gen_ashrsi3 (operand0, temp, shift_24)); DONE; }")

(define_insn “*sign_extendqisi2_insn” [(set (match_operand:SI 0 “register_operand” “=r”) (sign_extend:SI (match_operand:QI 1 “memory_operand” “m”)))] "" “ldb %0,%1” [(set_attr “type” “load”)])

(define_expand “extendhisi2” [(set (match_operand:SI 0 “register_operand” "") (sign_extend:SI (match_operand:HI 1 “register_operand” "")))] "" " { rtx temp = gen_reg_rtx (SImode); rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16); int op1_subword = 0;

if (GET_CODE (operand1) == SUBREG) { op1_subword = SUBREG_WORD (operand1); operand1 = XEXP (operand1, 0); }

emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, op1_subword), shift_16)); emit_insn (gen_ashrsi3 (operand0, temp, shift_16)); DONE; }")

(define_insn “*sign_extendhisi2_insn” [(set (match_operand:SI 0 “register_operand” “=r”) (sign_extend:SI (match_operand:HI 1 “memory_operand” “m”)))] "" “ldh %0,%1” [(set_attr “type” “load”)]) ;; Arithmetic instructions.

; ??? Adding an alternative to split add3 of small constants into two ; insns yields better instruction packing but slower code. Adds of small ; values is done a lot.

(define_insn “addsi3” [(set (match_operand:SI 0 “register_operand” “=r,r,r”) (plus:SI (match_operand:SI 1 “register_operand” “%0,0,r”) (match_operand:SI 2 “nonmemory_operand” “r,I,J”)))] "" “@ add %0,%2 addi %0,%#%2 add3 %0,%1,%#%2” [(set_attr “type” “binary”) (set_attr “length” “2,2,4”)])

;(define_split ; [(set (match_operand:SI 0 “register_operand” "") ; (plus:SI (match_operand:SI 1 “register_operand” "") ; (match_operand:SI 2 “int8_operand” "")))] ; “reload_completed ; && REGNO (operands[0]) != REGNO (operands[1]) ; && INT8_P (INTVAL (operands[2])) ; && INTVAL (operands[2]) != 0” ; [(set (match_dup 0) (match_dup 1)) ; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))] ; "")

(define_insn “adddi3” [(set (match_operand:DI 0 “register_operand” “=r”) (plus:DI (match_operand:DI 1 “register_operand” “%0”) (match_operand:DI 2 “register_operand” “r”))) (clobber (reg:CC 17))] "" “* { /* ??? The cmp clears the condition bit. Can we speed up somehow? */ return "cmp %L0,%L0;addx %L0,%L2;addx %H0,%H2"; }” [(set_attr “type” “binary”) (set_attr “length” “6”)])

(define_insn “subsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (minus:SI (match_operand:SI 1 “register_operand” “0”) (match_operand:SI 2 “register_operand” “r”)))] "" “sub %0,%2” [(set_attr “type” “binary”)])

(define_insn “subdi3” [(set (match_operand:DI 0 “register_operand” “=r”) (minus:DI (match_operand:DI 1 “register_operand” “0”) (match_operand:DI 2 “register_operand” “r”))) (clobber (reg:CC 17))] "" “* { /* ??? The cmp clears the condition bit. Can we speed up somehow? */ return "cmp %L0,%L0;subx %L0,%L2;subx %H0,%H2"; }” [(set_attr “type” “binary”) (set_attr “length” “6”)]) ; Multiply/Divide instructions.

(define_insn “mulhisi3” [(set (match_operand:SI 0 “register_operand” “=r”) (mult:SI (sign_extend:SI (match_operand:HI 1 “register_operand” “r”)) (sign_extend:SI (match_operand:HI 2 “register_operand” “r”))))] "" “mullo %1,%2;mvfacmi %0” [(set_attr “type” “mul”) (set_attr “length” “4”)])

(define_insn “mulsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (mult:SI (match_operand:SI 1 “register_operand” “%0”) (match_operand:SI 2 “register_operand” “r”)))] "" “mul %0,%2” [(set_attr “type” “mul”)])

(define_insn “divsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (div:SI (match_operand:SI 1 “register_operand” “0”) (match_operand:SI 2 “register_operand” “r”)))] "" “div %0,%2” [(set_attr “type” “div”)])

(define_insn “udivsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (udiv:SI (match_operand:SI 1 “register_operand” “0”) (match_operand:SI 2 “register_operand” “r”)))] "" “divu %0,%2” [(set_attr “type” “div”)])

(define_insn “modsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (mod:SI (match_operand:SI 1 “register_operand” “0”) (match_operand:SI 2 “register_operand” “r”)))] "" “rem %0,%2” [(set_attr “type” “div”)])

(define_insn “umodsi3” [(set (match_operand:SI 0 “register_operand” “=r”) (umod:SI (match_operand:SI 1 “register_operand” “0”) (match_operand:SI 2 “register_operand” “r”)))] "" “remu %0,%2” [(set_attr “type” “div”)]) ;; Boolean instructions. ;; ;; We don‘t define the DImode versions as expand_binop does a good enough job. ;; And if it doesn’t it should be fixed.

(define_insn “andsi3” [(set (match_operand:SI 0 “register_operand” “=r,r”) (and:SI (match_operand:SI 1 “register_operand” “%0,r”) (match_operand:SI 2 “nonmemory_operand” “r,K”)))] "" “@ and %0,%2 and3 %0,%1,%#%2 ; %X2” [(set_attr “type” “binary”)])

(define_insn “iorsi3” [(set (match_operand:SI 0 “register_operand” “=r,r”) (ior:SI (match_operand:SI 1 “register_operand” “%0,r”) (match_operand:SI 2 “nonmemory_operand” “r,K”)))] "" “@ or %0,%2 or3 %0,%1,%#%2 ; %X2” [(set_attr “type” “binary”)])

(define_insn “xorsi3” [(set (match_operand:SI 0 “register_operand” “=r,r”) (xor:SI (match_operand:SI 1 “register_operand” “%0,r”) (match_operand:SI 2 “nonmemory_operand” “r,K”)))] "" “@ xor %0,%2 xor3 %0,%1,%#%2 ; %X2” [(set_attr “type” “binary”)])

(define_insn “negsi2” [(set (match_operand:SI 0 “register_operand” “=r”) (neg:SI (match_operand:SI 1 “register_operand” “r”)))] "" “neg %0,%1” [(set_attr “type” “unary”)])

(define_insn “one_cmplsi2” [(set (match_operand:SI 0 “register_operand” “=r”) (not:SI (match_operand:SI 1 “register_operand” “r”)))] "" “not %0,%1” [(set_attr “type” “unary”)]) ;; Shift instructions.

(define_insn “ashlsi3” [(set (match_operand:SI 0 “register_operand” “=r,r,r”) (ashift:SI (match_operand:SI 1 “register_operand” “0,0,r”) (match_operand:SI 2 “reg_or_uint16_operand” “r,O,K”)))] "" “@ sll %0,%2 slli %0,%#%2 sll3 %0,%1,%#%2” [(set_attr “type” “shift”) (set_attr “length” “2,2,4”)])

(define_insn “ashrsi3” [(set (match_operand:SI 0 “register_operand” “=r,r,r”) (ashiftrt:SI (match_operand:SI 1 “register_operand” “0,0,r”) (match_operand:SI 2 “reg_or_uint16_operand” “r,O,K”)))] "" “@ sra %0,%2 srai %0,%#%2 sra3 %0,%1,%#%2” [(set_attr “type” “shift”) (set_attr “length” “2,2,4”)])

(define_insn “lshrsi3” [(set (match_operand:SI 0 “register_operand” “=r,r,r”) (lshiftrt:SI (match_operand:SI 1 “register_operand” “0,0,r”) (match_operand:SI 2 “reg_or_uint16_operand” “r,O,K”)))] "" “@ srl %0,%2 srli %0,%#%2 srl3 %0,%1,%#%2” [(set_attr “type” “shift”) (set_attr “length” “2,2,4”)]) ;; Compare instructions. ;; This controls RTL generation and register allocation.

;; We generate RTL for comparisons and branches by having the cmpxx ;; patterns store away the operands. Then the bcc patterns ;; emit RTL for both the compare and the branch. ;; ;; On the m32r it is more efficient to use the bxxz instructions and ;; thus merge the compare and branch into one instruction, so they are ;; prefered.

(define_expand “cmpsi” [(set (reg:CC 17) (compare:CC (match_operand:SI 0 “register_operand” "") (match_operand:SI 1 “nonmemory_operand” "“)))] "" " { m32r_compare_op0 = operands[0]; m32r_compare_op1 = operands[1]; DONE; }”)

;; The cmp_xxx_insn patterns set the condition bit to the result of the ;; comparison. There isn't a “compare equal” instruction so cmp_eqsi_insn ;; is quite inefficient. However, it is rarely used.

(define_insn “cmp_eqsi_insn” [(set (reg:CC 17) (eq:CC (match_operand:SI 0 “register_operand” “r,r”) (match_operand:SI 1 “reg_or_cmp_int16_operand” “r,P”))) (clobber (match_scratch:SI 2 “=&r,&r”))] “TARGET_OLD_COMPARE” “@ mv %2,%0;sub %2,%1;cmpui %2,#1 add3 %2,%0,%#%N1;cmpui %2,#1” [(set_attr “type” “compare,compare”) (set_attr “length” “8,8”)])

(define_insn “cmp_ltsi_insn” [(set (reg:CC 17) (lt:CC (match_operand:SI 0 “register_operand” “r,r”) (match_operand:SI 1 “reg_or_int16_operand” “r,J”)))] "" “@ cmp %0,%1 cmpi %0,%#%1” [(set_attr “type” “compare”)])

(define_insn “cmp_ltusi_insn” [(set (reg:CC 17) (ltu:CC (match_operand:SI 0 “register_operand” “r,r”) (match_operand:SI 1 “reg_or_uint16_operand” “r,K”)))] "" “@ cmpu %0,%1 cmpui %0,%#%1” [(set_attr “type” “compare”)])

;; reg == small constant comparisons are best handled by putting the result ;; of the comparison in a tmp reg and then using beqz/bnez. ;; ??? The result register doesn't contain 0/STORE_FLAG_VALUE, ;; it contains 0/non-zero.

(define_insn “cmp_ne_small_const_insn” [(set (match_operand:SI 0 “register_operand” “=r”) (ne:SI (match_operand:SI 1 “register_operand” “r”) (match_operand:SI 2 “cmp_int16_operand” “P”)))] "" “add3 %0,%1,%#%N2” [(set_attr “type” “compare”) (set_attr “length” “4”)]) ;; These control RTL generation for conditional jump insns.

(define_expand “beq” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bne” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bgt” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “ble” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bge” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “blt” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bgtu” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bleu” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bgeu” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1); }”)

(define_expand “bltu” [(set (pc) (if_then_else (match_dup 1) (label_ref (match_operand 0 "" "“)) (pc)))] "" " { operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1); }”)

;; Now match both normal and inverted jump.

(define_insn “*branch_insn” [(set (pc) (if_then_else (match_operator 1 “eqne_comparison_operator” [(reg 17) (const_int 0)]) (label_ref (match_operand 0 "" "")) (pc)))] "" “* { if (GET_CODE (operands[1]) == NE) return "bc %l0"; else return "bnc %l0"; }” [(set_attr “type” “branch”) ; We use 400/800 instead of 512,1024 to account for inaccurate insn ; lengths and insn alignments that are complex to track. ; It's not important that we be hyper-precise here. It may be more ; important blah blah blah when the chip supports parallel execution ; blah blah blah but until then blah blah blah this is simple and ; suffices. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 400)) (const_int 800)) (const_int 2) (const_int 4)))])

(define_insn “*rev_branch_insn” [(set (pc) (if_then_else (match_operator 1 “eqne_comparison_operator” [(reg 17) (const_int 0)]) (pc) (label_ref (match_operand 0 "" ""))))] ;“REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))” "" “* { if (GET_CODE (operands[1]) == EQ) return "bc %l0"; else return "bnc %l0"; }” [(set_attr “type” “branch”) ; We use 400/800 instead of 512,1024 to account for inaccurate insn ; lengths and insn alignments that are complex to track. ; It's not important that we be hyper-precise here. It may be more ; important blah blah blah when the chip supports parallel execution ; blah blah blah but until then blah blah blah this is simple and ; suffices. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 400)) (const_int 800)) (const_int 2) (const_int 4)))])

; reg/reg compare and branch insns

(define_insn “*reg_branch_insn” [(set (pc) (if_then_else (match_operator 1 “eqne_comparison_operator” [(match_operand:SI 2 “register_operand” “r”) (match_operand:SI 3 “register_operand” “r”)]) (label_ref (match_operand 0 "" "")) (pc)))] "" “* { /* Is branch target reachable with beq/bne? */ if (get_attr_length (insn) == 4) { if (GET_CODE (operands[1]) == EQ) return "beq %2,%3,%l0"; else return "bne %2,%3,%l0"; } else { if (GET_CODE (operands[1]) == EQ) return "bne %2,%3,1f;bra %l0;1:"; else return "beq %2,%3,1f;bra %l0;1:"; } }” [(set_attr “type” “branch”) ; We use 25000/50000 instead of 32768/65536 to account for slot filling ; which is complex to track and inaccurate length specs. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 25000)) (const_int 50000)) (const_int 4) (const_int 8)))])

(define_insn “*rev_reg_branch_insn” [(set (pc) (if_then_else (match_operator 1 “eqne_comparison_operator” [(match_operand:SI 2 “register_operand” “r”) (match_operand:SI 3 “register_operand” “r”)]) (pc) (label_ref (match_operand 0 "" ""))))] "" “* { /* Is branch target reachable with beq/bne? */ if (get_attr_length (insn) == 4) { if (GET_CODE (operands[1]) == NE) return "beq %2,%3,%l0"; else return "bne %2,%3,%l0"; } else { if (GET_CODE (operands[1]) == NE) return "bne %2,%3,1f;bra %l0;1:"; else return "beq %2,%3,1f;bra %l0;1:"; } }” [(set_attr “type” “branch”) ; We use 25000/50000 instead of 32768/65536 to account for slot filling ; which is complex to track and inaccurate length specs. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 25000)) (const_int 50000)) (const_int 4) (const_int 8)))])

; reg/zero compare and branch insns

(define_insn “*zero_branch_insn” [(set (pc) (if_then_else (match_operator 1 “signed_comparison_operator” [(match_operand:SI 2 “register_operand” “r”) (const_int 0)]) (label_ref (match_operand 0 "" "")) (pc)))] "" "* { char *br,*invbr; char asmtext[40];

switch (GET_CODE (operands[1])) { case EQ : br = "eq"; invbr = "ne"; break; case NE : br = "ne"; invbr = "eq"; break; case LE : br = "le"; invbr = "gt"; break; case GT : br = "gt"; invbr = "le"; break; case LT : br = "lt"; invbr = "ge"; break; case GE : br = "ge"; invbr = "lt"; break; }

/* Is branch target reachable with bxxz? */ if (get_attr_length (insn) == 4) { sprintf (asmtext, "b%sz %%2,%%l0", br); output_asm_insn (asmtext, operands); } else { sprintf (asmtext, "b%sz %%2,1f;bra %%l0;1:", invbr); output_asm_insn (asmtext, operands); } return ""; }" [(set_attr “type” “branch”) ; We use 25000/50000 instead of 32768/65536 to account for slot filling ; which is complex to track and inaccurate length specs. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 25000)) (const_int 50000)) (const_int 4) (const_int 8)))])

(define_insn “*rev_zero_branch_insn” [(set (pc) (if_then_else (match_operator 1 “eqne_comparison_operator” [(match_operand:SI 2 “register_operand” “r”) (const_int 0)]) (pc) (label_ref (match_operand 0 "" ""))))] "" "* { char *br,*invbr; char asmtext[40];

switch (GET_CODE (operands[1])) { case EQ : br = "eq"; invbr = "ne"; break; case NE : br = "ne"; invbr = "eq"; break; case LE : br = "le"; invbr = "gt"; break; case GT : br = "gt"; invbr = "le"; break; case LT : br = "lt"; invbr = "ge"; break; case GE : br = "ge"; invbr = "lt"; break; }

/* Is branch target reachable with bxxz? */ if (get_attr_length (insn) == 4) { sprintf (asmtext, "b%sz %%2,%%l0", invbr); output_asm_insn (asmtext, operands); } else { sprintf (asmtext, "b%sz %%2,1f;bra %%l0;1:", br); output_asm_insn (asmtext, operands); } return ""; }" [(set_attr “type” “branch”) ; We use 25000/50000 instead of 32768/65536 to account for slot filling ; which is complex to track and inaccurate length specs. (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 25000)) (const_int 50000)) (const_int 4) (const_int 8)))]) ;; Unconditional and other jump instructions.

(define_insn “jump” [(set (pc) (label_ref (match_operand 0 "" "")))] "" “bra %l0” [(set_attr “type” “uncond_branch”) (set (attr “length”) (if_then_else (ltu (plus (minus (match_dup 0) (pc)) (const_int 400)) (const_int 800)) (const_int 2) (const_int 4)))])

(define_insn “indirect_jump” [(set (pc) (match_operand:SI 0 “address_operand” “p”))] "" “jmp %a0” [(set_attr “type” “uncond_branch”) (set_attr “length” “2”)])

(define_insn “tablejump” [(set (pc) (match_operand:SI 0 “address_operand” “p”)) (use (label_ref (match_operand 1 "" "")))] "" “jmp %a0” [(set_attr “type” “uncond_branch”) (set_attr “length” “2”)])

(define_expand “call” ;; operands[1] is stack_size_rtx ;; operands[2] is next_arg_register [(parallel [(call (match_operand:SI 0 “call_operand” "") (match_operand 1 "" "")) (clobber (reg:SI 14))])] "" "")

(define_insn “*call_via_reg” [(call (mem:SI (match_operand:SI 0 “register_operand” “r”)) (match_operand 1 "" "")) (clobber (reg:SI 14))] "" “jl %0” [(set_attr “type” “call”) (set_attr “length” “2”)])

(define_insn “*call_via_label” [(call (mem:SI (match_operand:SI 0 “call_address_operand” "")) (match_operand 1 "" "")) (clobber (reg:SI 14))] "" "* { int call26_p = call26_operand (operands[0], FUNCTION_MODE);

if (! call26_p) { /* We may not be able to reach with a `bl' insn so punt and leave it to the linker. We do this here, rather than doing a force_reg in the define_expand so these insns won‘t be separated, say by scheduling, thus simplifying the linker. */ return "seth r14,%T0;add3 r14,r14,%B0;jl r14"; } else return "bl %0"; }" [(set_attr “type” “call”) (set (attr “length”) (if_then_else (eq (symbol_ref “call26_operand (operands[0], FUNCTION_MODE)”) (const_int 0)) (const_int 12) ; 10 + 2 for nop filler ; The return address must be on a 4 byte boundary so ; there’s no point in using a value of 2 here. A 2 byte ; insn may go in the left slot but we currently can't ; use such knowledge. (const_int 4)))])

(define_expand “call_value” ;; operand 2 is stack_size_rtx ;; operand 3 is next_arg_register [(parallel [(set (match_operand 0 “register_operand” “=r”) (call (match_operand:SI 1 “call_operand” "") (match_operand 2 "" ""))) (clobber (reg:SI 14))])] "" "")

(define_insn “*call_value_via_reg” [(set (match_operand 0 “register_operand” “=r”) (call (mem:SI (match_operand:SI 1 “register_operand” “r”)) (match_operand 2 "" ""))) (clobber (reg:SI 14))] "" “jl %1” [(set_attr “type” “call”) (set_attr “length” “2”)])

(define_insn “*call_value_via_label” [(set (match_operand 0 “register_operand” “=r”) (call (mem:SI (match_operand:SI 1 “call_address_operand” "")) (match_operand 2 "" ""))) (clobber (reg:SI 14))] "" "* { int call26_p = call26_operand (operands[1], FUNCTION_MODE);

if (! call26_p) { /* We may not be able to reach with a `bl' insn so punt and leave it to the linker. We do this here, rather than doing a force_reg in the define_expand so these insns won‘t be separated, say by scheduling, thus simplifying the linker. */ return "seth r14,%T1;add3 r14,r14,%B1;jl r14"; } else return "bl %1"; }" [(set_attr “type” “call”) (set (attr “length”) (if_then_else (eq (symbol_ref “call26_operand (operands[1], FUNCTION_MODE)”) (const_int 0)) (const_int 12) ; 10 + 2 for nop filler ; The return address must be on a 4 byte boundary so ; there’s no point in using a value of 2 here. A 2 byte ; insn may go in the left slot but we currently can't ; use such knowledge. (const_int 4)))]) (define_insn “nop” [(const_int 0)] "" “nop” [(set_attr “type” “misc”) (set_attr “length” “2”)])

;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and ;; all of memory. This blocks insns from being moved across this point.

(define_insn “blockage” [(unspec_volatile [(const_int 0)] 0)] "" "")

;; Special pattern to flush the icache.

(define_insn “flush_icache” [(unspec_volatile [(match_operand 0 “memory_operand” “m”)] 0)] "" “* return "nop ; flush-icache";” [(set_attr “type” “misc”)]) ;; Split up troublesome insns for better scheduling. ;; Peepholes go at the end.

;; ??? Setting the type attribute may not be useful, but for completeness ;; we do it.

(define_peephole [(set (mem:SI (plus:SI (match_operand:SI 0 “register_operand” “r”) (const_int 4))) (match_operand:SI 1 “register_operand” “r”))] “dead_or_set_p (insn, operands[0])” “st %1,@+%0” [(set_attr “type” “store”) (set_attr “length” “2”)])