blob: 17ea851c7137665823a7b24a650285781b5da0db [file] [log] [blame]
/* aarch64-tbl.h -- AArch64 opcode description table and instruction
operand description table.
Copyright (C) 2012-2021 Free Software Foundation, Inc.
This file is part of the GNU opcodes library.
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the
Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA. */
#include "aarch64-opc.h"
#ifndef VERIFIER
#error VERIFIER must be defined.
#endif
/* Operand type. */
#define OPND(x) AARCH64_OPND_##x
#define OP0() {}
#define OP1(a) {OPND(a)}
#define OP2(a,b) {OPND(a), OPND(b)}
#define OP3(a,b,c) {OPND(a), OPND(b), OPND(c)}
#define OP4(a,b,c,d) {OPND(a), OPND(b), OPND(c), OPND(d)}
#define OP5(a,b,c,d,e) {OPND(a), OPND(b), OPND(c), OPND(d), OPND(e)}
#define QLF(x) AARCH64_OPND_QLF_##x
#define QLF1(a) {QLF(a)}
#define QLF2(a,b) {QLF(a), QLF(b)}
#define QLF3(a,b,c) {QLF(a), QLF(b), QLF(c)}
#define QLF4(a,b,c,d) {QLF(a), QLF(b), QLF(c), QLF(d)}
#define QLF5(a,b,c,d,e) {QLF(a), QLF(b), QLF(c), QLF(d), QLF(e)}
/* Qualifiers list. */
/* e.g. MSR <systemreg>, <Xt>. */
#define QL_SRC_X \
{ \
QLF2(NIL,X), \
}
/* e.g. MRS <Xt>, <systemreg>. */
#define QL_DST_X \
{ \
QLF2(X,NIL), \
}
/* e.g. SYS #<op1>, <Cn>, <Cm>, #<op2>{, <Xt>}. */
#define QL_SYS \
{ \
QLF5(NIL,CR,CR,NIL,X), \
}
/* e.g. SYSL <Xt>, #<op1>, <Cn>, <Cm>, #<op2>. */
#define QL_SYSL \
{ \
QLF5(X,NIL,CR,CR,NIL), \
}
/* e.g. ADRP <Xd>, <label>. */
#define QL_ADRP \
{ \
QLF2(X,NIL), \
}
/* e.g. TCANCEL #<imm>. */
#define QL_IMM_NIL \
{ \
QLF1(NIL), \
}
/* e.g. B.<cond> <label>. */
#define QL_PCREL_NIL \
{ \
QLF1(NIL), \
}
/* e.g. TBZ <Xt>, #<imm>, <label>. */
#define QL_PCREL_14 \
{ \
QLF3(X,imm_0_63,NIL), \
}
/* e.g. BL <label>. */
#define QL_PCREL_26 \
{ \
QLF1(NIL), \
}
/* e.g. LDRSW <Xt>, <label>. */
#define QL_X_PCREL \
{ \
QLF2(X,NIL), \
}
/* e.g. LDR <Wt>, <label>. */
#define QL_R_PCREL \
{ \
QLF2(W,NIL), \
QLF2(X,NIL), \
}
/* e.g. LDR <Dt>, <label>. */
#define QL_FP_PCREL \
{ \
QLF2(S_S,NIL), \
QLF2(S_D,NIL), \
QLF2(S_Q,NIL), \
}
/* e.g. PRFM <prfop>, <label>. */
#define QL_PRFM_PCREL \
{ \
QLF2(NIL,NIL), \
}
/* e.g. BR <Xn>. */
#define QL_I1X \
{ \
QLF1(X), \
}
/* e.g. STG <Xt|SP>, [<Xn|SP>, #<imm9>]. */
#define QL_LDST_AT \
{ \
QLF2(X, imm_tag), \
QLF2(SP, imm_tag), \
}
/* e.g. RBIT <Wd>, <Wn>. */
#define QL_I2SAME \
{ \
QLF2(W,W), \
QLF2(X,X), \
}
/* e.g. CMN <Wn|WSP>, <Wm>{, <extend> {#<amount>}}. */
#define QL_I2_EXT \
{ \
QLF2(W,W), \
QLF2(X,W), \
QLF2(X,X), \
}
/* e.g. MOV <Wd|WSP>, <Wn|WSP>, at least one SP. */
#define QL_I2SP \
{ \
QLF2(WSP,W), \
QLF2(W,WSP), \
QLF2(SP,X), \
QLF2(X,SP), \
}
/* e.g. REV <Wd>, <Wn>. */
#define QL_I2SAMEW \
{ \
QLF2(W,W), \
}
/* e.g. REV32 <Xd>, <Xn>. */
#define QL_I2SAMEX \
{ \
QLF2(X,X), \
}
#define QL_I2SAMER \
{ \
QLF2(W,W), \
QLF2(X,X), \
}
/* e.g. CRC32B <Wd>, <Wn>, <Wm>. */
#define QL_I3SAMEW \
{ \
QLF3(W,W,W), \
}
/* e.g. SMULH <Xd>, <Xn>, <Xm>. */
#define QL_I3SAMEX \
{ \
QLF3(X,X,X), \
}
/* e.g. CRC32X <Wd>, <Wn>, <Xm>. */
#define QL_I3WWX \
{ \
QLF3(W,W,X), \
}
/* e.g. UDIV <Xd>, <Xn>, <Xm>. */
#define QL_I3SAMER \
{ \
QLF3(W,W,W), \
QLF3(X,X,X), \
}
/* e.g. ADDS <Xd>, <Xn|SP>, <R><m>{, <extend> {#<amount>}}. */
#define QL_I3_EXT \
{ \
QLF3(W,W,W), \
QLF3(X,X,W), \
QLF3(X,X,X), \
}
/* e.g. MADD <Xd>, <Xn>, <Xm>, <Xa>. */
#define QL_I4SAMER \
{ \
QLF4(W,W,W,W), \
QLF4(X,X,X,X), \
}
/* e.g. SMADDL <Xd>, <Wn>, <Wm>, <Xa>. */
#define QL_I3SAMEL \
{ \
QLF3(X,W,W), \
}
/* e.g. SMADDL <Xd>, <Wn>, <Wm>, <Xa>. */
#define QL_I4SAMEL \
{ \
QLF4(X,W,W,X), \
}
/* e.g. CSINC <Xd>, <Xn>, <Xm>, <cond>. */
#define QL_CSEL \
{ \
QLF4(W, W, W, NIL), \
QLF4(X, X, X, NIL), \
}
/* e.g. CSET <Wd>, <cond>. */
#define QL_DST_R \
{ \
QLF2(W, NIL), \
QLF2(X, NIL), \
}
/* e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>. */
#define QL_BF \
{ \
QLF4(W,W,imm_0_31,imm_0_31), \
QLF4(X,X,imm_0_63,imm_0_63), \
}
/* e.g. ADDG <Xd>, <Xn>, #<uimm10>, #<uimm4>. */
#define QL_ADDG \
{ \
QLF4(X,X,NIL,imm_0_15), \
} \
/* e.g. BFC <Wd>, #<immr>, #<imms>. */
#define QL_BF1 \
{ \
QLF3 (W, imm_0_31, imm_1_32), \
QLF3 (X, imm_0_63, imm_1_64), \
}
/* e.g. UBFIZ <Wd>, <Wn>, #<lsb>, #<width>. */
#define QL_BF2 \
{ \
QLF4(W,W,imm_0_31,imm_1_32), \
QLF4(X,X,imm_0_63,imm_1_64), \
}
/* e.g. SCVTF <Sd>, <Xn>, #<fbits>. */
#define QL_FIX2FP \
{ \
QLF3(S_D,W,imm_1_32), \
QLF3(S_S,W,imm_1_32), \
QLF3(S_D,X,imm_1_64), \
QLF3(S_S,X,imm_1_64), \
}
/* e.g. SCVTF <Hd>, <Xn>, #<fbits>. */
#define QL_FIX2FP_H \
{ \
QLF3 (S_H, W, imm_1_32), \
QLF3 (S_H, X, imm_1_64), \
}
/* e.g. FCVTZS <Wd>, <Dn>, #<fbits>. */
#define QL_FP2FIX \
{ \
QLF3(W,S_D,imm_1_32), \
QLF3(W,S_S,imm_1_32), \
QLF3(X,S_D,imm_1_64), \
QLF3(X,S_S,imm_1_64), \
}
/* e.g. FCVTZS <Wd>, <Hn>, #<fbits>. */
#define QL_FP2FIX_H \
{ \
QLF3 (W, S_H, imm_1_32), \
QLF3 (X, S_H, imm_1_64), \
}
/* e.g. SCVTF <Dd>, <Wn>. */
#define QL_INT2FP \
{ \
QLF2(S_D,W), \
QLF2(S_S,W), \
QLF2(S_D,X), \
QLF2(S_S,X), \
}
/* e.g. FMOV <Dd>, <Xn>. */
#define QL_INT2FP_FMOV \
{ \
QLF2(S_S,W), \
QLF2(S_D,X), \
}
/* e.g. SCVTF <Hd>, <Wn>. */
#define QL_INT2FP_H \
{ \
QLF2 (S_H, W), \
QLF2 (S_H, X), \
}
/* e.g. FCVTNS <Xd>, <Dn>. */
#define QL_FP2INT \
{ \
QLF2(W,S_D), \
QLF2(W,S_S), \
QLF2(X,S_D), \
QLF2(X,S_S), \
}
/* e.g. FMOV <Xd>, <Dn>. */
#define QL_FP2INT_FMOV \
{ \
QLF2(W,S_S), \
QLF2(X,S_D), \
}
/* e.g. FCVTNS <Hd>, <Wn>. */
#define QL_FP2INT_H \
{ \
QLF2 (W, S_H), \
QLF2 (X, S_H), \
}
/* e.g. FJCVTZS <Wd>, <Dn>. */
#define QL_FP2INT_W_D \
{ \
QLF2 (W, S_D), \
}
/* e.g. FMOV <Xd>, <Vn>.D[1]. */
#define QL_XVD1 \
{ \
QLF2(X,S_D), \
}
/* e.g. FMOV <Vd>.D[1], <Xn>. */
#define QL_VD1X \
{ \
QLF2(S_D,X), \
}
/* e.g. EXTR <Xd>, <Xn>, <Xm>, #<lsb>. */
#define QL_EXTR \
{ \
QLF4(W,W,W,imm_0_31), \
QLF4(X,X,X,imm_0_63), \
}
/* e.g. LSL <Wd>, <Wn>, #<uimm>. */
#define QL_SHIFT \
{ \
QLF3(W,W,imm_0_31), \
QLF3(X,X,imm_0_63), \
}
/* e.g. UXTH <Xd>, <Wn>. */
#define QL_EXT \
{ \
QLF2(W,W), \
QLF2(X,W), \
}
/* e.g. UXTW <Xd>, <Wn>. */
#define QL_EXT_W \
{ \
QLF2(X,W), \
}
/* e.g. SQSHL <V><d>, <V><n>, #<shift>. */
#define QL_SSHIFT \
{ \
QLF3(S_B , S_B , S_B ), \
QLF3(S_H , S_H , S_H ), \
QLF3(S_S , S_S , S_S ), \
QLF3(S_D , S_D , S_D ) \
}
/* e.g. SSHR <V><d>, <V><n>, #<shift>. */
#define QL_SSHIFT_D \
{ \
QLF3(S_D , S_D , S_D ) \
}
/* e.g. UCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */
#define QL_SSHIFT_SD \
{ \
QLF3(S_S , S_S , S_S ), \
QLF3(S_D , S_D , S_D ) \
}
/* e.g. UCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */
#define QL_SSHIFT_H \
{ \
QLF3 (S_H, S_H, S_H) \
}
/* e.g. SQSHRUN <Vb><d>, <Va><n>, #<shift>. */
#define QL_SSHIFTN \
{ \
QLF3(S_B , S_H , S_B ), \
QLF3(S_H , S_S , S_H ), \
QLF3(S_S , S_D , S_S ), \
}
/* e.g. SSHR <Vd>.<T>, <Vn>.<T>, #<shift>.
The register operand variant qualifiers are deliberately used for the
immediate operand to ease the operand encoding/decoding and qualifier
sequence matching. */
#define QL_VSHIFT \
{ \
QLF3(V_8B , V_8B , V_8B ), \
QLF3(V_16B, V_16B, V_16B), \
QLF3(V_4H , V_4H , V_4H ), \
QLF3(V_8H , V_8H , V_8H ), \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
QLF3(V_2D , V_2D , V_2D ) \
}
/* e.g. SCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */
#define QL_VSHIFT_SD \
{ \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
QLF3(V_2D , V_2D , V_2D ) \
}
/* e.g. SCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */
#define QL_VSHIFT_H \
{ \
QLF3 (V_4H, V_4H, V_4H), \
QLF3 (V_8H, V_8H, V_8H) \
}
/* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */
#define QL_VSHIFTN \
{ \
QLF3(V_8B , V_8H , V_8B ), \
QLF3(V_4H , V_4S , V_4H ), \
QLF3(V_2S , V_2D , V_2S ), \
}
/* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */
#define QL_VSHIFTN2 \
{ \
QLF3(V_16B, V_8H, V_16B), \
QLF3(V_8H , V_4S , V_8H ), \
QLF3(V_4S , V_2D , V_4S ), \
}
/* e.g. SSHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>.
the 3rd qualifier is used to help the encoding. */
#define QL_VSHIFTL \
{ \
QLF3(V_8H , V_8B , V_8B ), \
QLF3(V_4S , V_4H , V_4H ), \
QLF3(V_2D , V_2S , V_2S ), \
}
/* e.g. SSHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */
#define QL_VSHIFTL2 \
{ \
QLF3(V_8H , V_16B, V_16B), \
QLF3(V_4S , V_8H , V_8H ), \
QLF3(V_2D , V_4S , V_4S ), \
}
/* e.g. TBL. */
#define QL_TABLE \
{ \
QLF3(V_8B , V_16B, V_8B ), \
QLF3(V_16B, V_16B, V_16B), \
}
/* e.g. SHA1H. */
#define QL_2SAMES \
{ \
QLF2(S_S, S_S), \
}
/* e.g. ABS <V><d>, <V><n>. */
#define QL_2SAMED \
{ \
QLF2(S_D, S_D), \
}
/* e.g. CMGT <V><d>, <V><n>, #0. */
#define QL_SISD_CMP_0 \
{ \
QLF3(S_D, S_D, NIL), \
}
/* e.g. FCMEQ <V><d>, <V><n>, #0. */
#define QL_SISD_FCMP_0 \
{ \
QLF3(S_S, S_S, NIL), \
QLF3(S_D, S_D, NIL), \
}
/* e.g. FCMEQ <V><d>, <V><n>, #0. */
#define QL_SISD_FCMP_H_0 \
{ \
QLF3 (S_H, S_H, NIL), \
}
/* e.g. FMAXNMP <V><d>, <Vn>.<T>. */
#define QL_SISD_PAIR \
{ \
QLF2(S_S, V_2S), \
QLF2(S_D, V_2D), \
}
/* e.g. FMAXNMP <V><d>, <Vn>.<T>. */
#define QL_SISD_PAIR_H \
{ \
QLF2 (S_H, V_2H), \
}
/* e.g. ADDP <V><d>, <Vn>.<T>. */
#define QL_SISD_PAIR_D \
{ \
QLF2(S_D, V_2D), \
}
/* e.g. DUP <V><d>, <Vn>.<T>[<index>]. */
#define QL_S_2SAME \
{ \
QLF2(S_B, S_B), \
QLF2(S_H, S_H), \
QLF2(S_S, S_S), \
QLF2(S_D, S_D), \
}
/* e.g. FCVTNS <V><d>, <V><n>. */
#define QL_S_2SAMESD \
{ \
QLF2(S_S, S_S), \
QLF2(S_D, S_D), \
}
/* e.g. FCVTNS <V><d>, <V><n>. */
#define QL_S_2SAMEH \
{ \
QLF2 (S_H, S_H), \
}
/* e.g. SQXTN <Vb><d>, <Va><n>. */
#define QL_SISD_NARROW \
{ \
QLF2(S_B, S_H), \
QLF2(S_H, S_S), \
QLF2(S_S, S_D), \
}
/* e.g. FCVTXN <Vb><d>, <Va><n>. */
#define QL_SISD_NARROW_S \
{ \
QLF2(S_S, S_D), \
}
/* e.g. FCVT. */
#define QL_FCVT \
{ \
QLF2(S_S, S_H), \
QLF2(S_S, S_D), \
QLF2(S_D, S_H), \
QLF2(S_D, S_S), \
QLF2(S_H, S_S), \
QLF2(S_H, S_D), \
}
/* FMOV <Dd>, <Dn>. */
#define QL_FP2 \
{ \
QLF2(S_S, S_S), \
QLF2(S_D, S_D), \
}
/* FMOV <Hd>, <Hn>. */
#define QL_FP2_H \
{ \
QLF2 (S_H, S_H), \
}
/* e.g. SQADD <V><d>, <V><n>, <V><m>. */
#define QL_S_3SAME \
{ \
QLF3(S_B, S_B, S_B), \
QLF3(S_H, S_H, S_H), \
QLF3(S_S, S_S, S_S), \
QLF3(S_D, S_D, S_D), \
}
/* e.g. CMGE <V><d>, <V><n>, <V><m>. */
#define QL_S_3SAMED \
{ \
QLF3(S_D, S_D, S_D), \
}
/* e.g. SQDMULH <V><d>, <V><n>, <V><m>. */
#define QL_SISD_HS \
{ \
QLF3(S_H, S_H, S_H), \
QLF3(S_S, S_S, S_S), \
}
/* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vb><m>. */
#define QL_SISDL_HS \
{ \
QLF3(S_S, S_H, S_H), \
QLF3(S_D, S_S, S_S), \
}
/* FMUL <Sd>, <Sn>, <Sm>. */
#define QL_FP3 \
{ \
QLF3(S_S, S_S, S_S), \
QLF3(S_D, S_D, S_D), \
}
/* FMUL <Hd>, <Hn>, <Hm>. */
#define QL_FP3_H \
{ \
QLF3 (S_H, S_H, S_H), \
}
/* FMADD <Dd>, <Dn>, <Dm>, <Da>. */
#define QL_FP4 \
{ \
QLF4(S_S, S_S, S_S, S_S), \
QLF4(S_D, S_D, S_D, S_D), \
}
/* FMADD <Hd>, <Hn>, <Hm>, <Ha>. */
#define QL_FP4_H \
{ \
QLF4 (S_H, S_H, S_H, S_H), \
}
/* e.g. FCMP <Dn>, #0.0. */
#define QL_DST_SD \
{ \
QLF2(S_S, NIL), \
QLF2(S_D, NIL), \
}
/* e.g. FCMP <Hn>, #0.0. */
#define QL_DST_H \
{ \
QLF2 (S_H, NIL), \
}
/* FCSEL <Sd>, <Sn>, <Sm>, <cond>. */
#define QL_FP_COND \
{ \
QLF4(S_S, S_S, S_S, NIL), \
QLF4(S_D, S_D, S_D, NIL), \
}
/* FCSEL <Hd>, <Hn>, <Hm>, <cond>. */
#define QL_FP_COND_H \
{ \
QLF4 (S_H, S_H, S_H, NIL), \
}
/* e.g. CCMN <Xn>, <Xm>, #<nzcv>, <cond>. */
#define QL_CCMP \
{ \
QLF4(W, W, NIL, NIL), \
QLF4(X, X, NIL, NIL), \
}
/* e.g. CCMN <Xn>, #<imm>, #<nzcv>, <cond>, */
#define QL_CCMP_IMM \
{ \
QLF4(W, NIL, NIL, NIL), \
QLF4(X, NIL, NIL, NIL), \
}
/* e.g. FCCMP <Sn>, <Sm>, #<nzcv>, <cond>. */
#define QL_FCCMP \
{ \
QLF4(S_S, S_S, NIL, NIL), \
QLF4(S_D, S_D, NIL, NIL), \
}
/* e.g. FCCMP <Sn>, <Sm>, #<nzcv>, <cond>. */
#define QL_FCCMP_H \
{ \
QLF4 (S_H, S_H, NIL, NIL), \
}
/* e.g. DUP <Vd>.<T>, <Vn>.<Ts>[<index>]. */
#define QL_DUP_VX \
{ \
QLF2(V_8B , S_B ), \
QLF2(V_16B, S_B ), \
QLF2(V_4H , S_H ), \
QLF2(V_8H , S_H ), \
QLF2(V_2S , S_S ), \
QLF2(V_4S , S_S ), \
QLF2(V_2D , S_D ), \
}
/* e.g. DUP <Vd>.<T>, <Wn>. */
#define QL_DUP_VR \
{ \
QLF2(V_8B , W ), \
QLF2(V_16B, W ), \
QLF2(V_4H , W ), \
QLF2(V_8H , W ), \
QLF2(V_2S , W ), \
QLF2(V_4S , W ), \
QLF2(V_2D , X ), \
}
/* e.g. INS <Vd>.<Ts>[<index>], <Wn>. */
#define QL_INS_XR \
{ \
QLF2(S_H , W ), \
QLF2(S_S , W ), \
QLF2(S_D , X ), \
QLF2(S_B , W ), \
}
/* e.g. SMOV <Wd>, <Vn>.<Ts>[<index>]. */
#define QL_SMOV \
{ \
QLF2(W , S_H), \
QLF2(X , S_H), \
QLF2(X , S_S), \
QLF2(W , S_B), \
QLF2(X , S_B), \
}
/* e.g. UMOV <Wd>, <Vn>.<Ts>[<index>]. */
#define QL_UMOV \
{ \
QLF2(W , S_H), \
QLF2(W , S_S), \
QLF2(X , S_D), \
QLF2(W , S_B), \
}
/* e.g. MOV <Wd>, <Vn>.<Ts>[<index>]. */
#define QL_MOV \
{ \
QLF2(W , S_S), \
QLF2(X , S_D), \
}
/* e.g. SUQADD <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAME \
{ \
QLF2(V_8B , V_8B ), \
QLF2(V_16B, V_16B), \
QLF2(V_4H , V_4H ), \
QLF2(V_8H , V_8H ), \
QLF2(V_2S , V_2S ), \
QLF2(V_4S , V_4S ), \
QLF2(V_2D , V_2D ), \
}
/* e.g. URSQRTE <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAMES \
{ \
QLF2(V_2S , V_2S ), \
QLF2(V_4S , V_4S ), \
}
/* e.g. REV32 <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAMEBH \
{ \
QLF2(V_8B , V_8B ), \
QLF2(V_16B, V_16B), \
QLF2(V_4H , V_4H ), \
QLF2(V_8H , V_8H ), \
}
/* e.g. FRINTN <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAMESD \
{ \
QLF2(V_2S , V_2S ), \
QLF2(V_4S , V_4S ), \
QLF2(V_2D , V_2D ), \
}
/* e.g. REV64 <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAMEBHS \
{ \
QLF2(V_8B , V_8B ), \
QLF2(V_16B, V_16B), \
QLF2(V_4H , V_4H ), \
QLF2(V_8H , V_8H ), \
QLF2(V_2S , V_2S ), \
QLF2(V_4S , V_4S ), \
}
/* e.g. FCMGT <Vd>.<T>, <Vd>.<T>>, #0.0. */
#define QL_V2SAMEH \
{ \
QLF2 (V_4H, V_4H), \
QLF2 (V_8H, V_8H), \
}
/* e.g. REV16 <Vd>.<T>, <Vn>.<T>. */
#define QL_V2SAMEB \
{ \
QLF2(V_8B , V_8B ), \
QLF2(V_16B, V_16B), \
}
/* e.g. SADDLP <Vd>.<Ta>, <Vn>.<Tb>. */
#define QL_V2PAIRWISELONGBHS \
{ \
QLF2(V_4H , V_8B ), \
QLF2(V_8H , V_16B), \
QLF2(V_2S , V_4H ), \
QLF2(V_4S , V_8H ), \
QLF2(V_1D , V_2S ), \
QLF2(V_2D , V_4S ), \
}
/* e.g. SHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */
#define QL_V2LONGBHS \
{ \
QLF2(V_8H , V_8B ), \
QLF2(V_4S , V_4H ), \
QLF2(V_2D , V_2S ), \
}
/* e.g. SHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */
#define QL_V2LONGBHS2 \
{ \
QLF2(V_8H , V_16B), \
QLF2(V_4S , V_8H ), \
QLF2(V_2D , V_4S ), \
}
/* */
#define QL_V3SAME \
{ \
QLF3(V_8B , V_8B , V_8B ), \
QLF3(V_16B, V_16B, V_16B), \
QLF3(V_4H , V_4H , V_4H ), \
QLF3(V_8H , V_8H , V_8H ), \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
QLF3(V_2D , V_2D , V_2D ) \
}
/* e.g. SHADD. */
#define QL_V3SAMEBHS \
{ \
QLF3(V_8B , V_8B , V_8B ), \
QLF3(V_16B, V_16B, V_16B), \
QLF3(V_4H , V_4H , V_4H ), \
QLF3(V_8H , V_8H , V_8H ), \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
}
/* e.g. FCVTXN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRS \
{ \
QLF2(V_2S , V_2D ), \
}
/* e.g. FCVTXN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRS2 \
{ \
QLF2(V_4S , V_2D ), \
}
/* e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRHS \
{ \
QLF2(V_4H , V_4S ), \
QLF2(V_2S , V_2D ), \
}
/* e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRHS2 \
{ \
QLF2(V_8H , V_4S ), \
QLF2(V_4S , V_2D ), \
}
/* e.g. FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
#define QL_V2LONGHS \
{ \
QLF2(V_4S , V_4H ), \
QLF2(V_2D , V_2S ), \
}
/* e.g. FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
#define QL_V2LONGHS2 \
{ \
QLF2(V_4S , V_8H ), \
QLF2(V_2D , V_4S ), \
}
/* e.g. XTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRBHS \
{ \
QLF2(V_8B , V_8H ), \
QLF2(V_4H , V_4S ), \
QLF2(V_2S , V_2D ), \
}
/* e.g. XTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
#define QL_V2NARRBHS2 \
{ \
QLF2(V_16B, V_8H ), \
QLF2(V_8H , V_4S ), \
QLF2(V_4S , V_2D ), \
}
/* e.g. ORR. */
#define QL_V2SAMEB \
{ \
QLF2(V_8B , V_8B ), \
QLF2(V_16B, V_16B), \
}
/* e.g. AESE. */
#define QL_V2SAME16B \
{ \
QLF2(V_16B, V_16B), \
}
/* e.g. SHA1SU1. */
#define QL_V2SAME4S \
{ \
QLF2(V_4S, V_4S), \
}
/* e.g. SHA1SU0. */
#define QL_V3SAME4S \
{ \
QLF3(V_4S, V_4S, V_4S), \
}
/* e.g. SHADD. */
#define QL_V3SAMEB \
{ \
QLF3(V_8B , V_8B , V_8B ), \
QLF3(V_16B, V_16B, V_16B), \
}
/* e.g. EXT <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<index>. */
#define QL_VEXT \
{ \
QLF4(V_8B , V_8B , V_8B , imm_0_7), \
QLF4(V_16B, V_16B, V_16B, imm_0_15), \
}
/* e.g. . */
#define QL_V3SAMEHS \
{ \
QLF3(V_4H , V_4H , V_4H ), \
QLF3(V_8H , V_8H , V_8H ), \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
}
/* */
#define QL_V3SAMESD \
{ \
QLF3(V_2S , V_2S , V_2S ), \
QLF3(V_4S , V_4S , V_4S ), \
QLF3(V_2D , V_2D , V_2D ) \
}
/* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */
#define QL_V3SAMEHSD_ROT \
{ \
QLF4 (V_4H, V_4H, V_4H, NIL), \
QLF4 (V_8H, V_8H, V_8H, NIL), \
QLF4 (V_2S, V_2S, V_2S, NIL), \
QLF4 (V_4S, V_4S, V_4S, NIL), \
QLF4 (V_2D, V_2D, V_2D, NIL), \
}
/* e.g. FMAXNM <Vd>.<T>, <Vn>.<T>, <Vm>.<T>. */
#define QL_V3SAMEH \
{ \
QLF3 (V_4H , V_4H , V_4H ), \
QLF3 (V_8H , V_8H , V_8H ), \
}
/* e.g. SQDMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */
#define QL_V3LONGHS \
{ \
QLF3(V_4S , V_4H , V_4H ), \
QLF3(V_2D , V_2S , V_2S ), \
}
/* e.g. SQDMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */
#define QL_V3LONGHS2 \
{ \
QLF3(V_4S , V_8H , V_8H ), \
QLF3(V_2D , V_4S , V_4S ), \
}
/* e.g. SADDL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */
#define QL_V3LONGBHS \
{ \
QLF3(V_8H , V_8B , V_8B ), \
QLF3(V_4S , V_4H , V_4H ), \
QLF3(V_2D , V_2S , V_2S ), \
}
/* e.g. SADDL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */
#define QL_V3LONGBHS2 \
{ \
QLF3(V_8H , V_16B , V_16B ), \
QLF3(V_4S , V_8H , V_8H ), \
QLF3(V_2D , V_4S , V_4S ), \
}
/* e.g. SADDW<Q> <Vd>.<Ta>, <Vn>.<Ta>, <Vm>.<Tb>. */
#define QL_V3WIDEBHS \
{ \
QLF3(V_8H , V_8H , V_8B ), \
QLF3(V_4S , V_4S , V_4H ), \
QLF3(V_2D , V_2D , V_2S ), \
}
/* e.g. SADDW<Q> <Vd>.<Ta>, <Vn>.<Ta>, <Vm>.<Tb>. */
#define QL_V3WIDEBHS2 \
{ \
QLF3(V_8H , V_8H , V_16B ), \
QLF3(V_4S , V_4S , V_8H ), \
QLF3(V_2D , V_2D , V_4S ), \
}
/* e.g. ADDHN<Q> <Vd>.<Tb>, <Vn>.<Ta>, <Vm>.<Ta>. */
#define QL_V3NARRBHS \
{ \
QLF3(V_8B , V_8H , V_8H ), \
QLF3(V_4H , V_4S , V_4S ), \
QLF3(V_2S , V_2D , V_2D ), \
}
/* e.g. ADDHN<Q> <Vd>.<Tb>, <Vn>.<Ta>, <Vm>.<Ta>. */
#define QL_V3NARRBHS2 \
{ \
QLF3(V_16B , V_8H , V_8H ), \
QLF3(V_8H , V_4S , V_4S ), \
QLF3(V_4S , V_2D , V_2D ), \
}
/* e.g. PMULL. */
#define QL_V3LONGB \
{ \
QLF3(V_8H , V_8B , V_8B ), \
}
/* e.g. PMULL crypto. */
#define QL_V3LONGD \
{ \
QLF3(V_1Q , V_1D , V_1D ), \
}
/* e.g. PMULL2. */
#define QL_V3LONGB2 \
{ \
QLF3(V_8H , V_16B, V_16B), \
}
/* e.g. PMULL2 crypto. */
#define QL_V3LONGD2 \
{ \
QLF3(V_1Q , V_2D , V_2D ), \
}
/* e.g. SHA1C. */
#define QL_SHAUPT \
{ \
QLF3(S_Q, S_S, V_4S), \
}
/* e.g. SHA256H2. */
#define QL_SHA256UPT \
{ \
QLF3(S_Q, S_Q, V_4S), \
}
/* e.g. LDXRB <Wt>, [<Xn|SP>{,#0}]. */
#define QL_W1_LDST_EXC \
{ \
QLF2(W, NIL), \
}
/* e.g. LDXR <Xt>, [<Xn|SP>{,#0}]. */
#define QL_R1NIL \
{ \
QLF2(W, NIL), \
QLF2(X, NIL), \
}
/* e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
#define QL_W2_LDST_EXC \
{ \
QLF3(W, W, NIL), \
}
/* e.g. STXR <Ws>, <Xt>, [<Xn|SP>{,#0}]. */
#define QL_R2_LDST_EXC \
{ \
QLF3(W, W, NIL), \
QLF3(W, X, NIL), \
}
/* e.g. ST64B <Xs>, <Xt>, [<Xn|SP>]. */
#define QL_X2NIL \
{ \
QLF3(X, X, NIL), \
}
/* e.g. LDRAA <Xt>, [<Xn|SP>{,#imm}]. */
#define QL_X1NIL \
{ \
QLF2(X, NIL), \
}
/* e.g. LDXP <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
#define QL_R2NIL \
{ \
QLF3(W, W, NIL), \
QLF3(X, X, NIL), \
}
/* e.g. CASP <Xt1>, <Xt1+1>, <Xt2>, <Xt2+1>, [<Xn|SP>{,#0}]. */
#define QL_R4NIL \
{ \
QLF5(W, W, W, W, NIL), \
QLF5(X, X, X, X, NIL), \
}
/* e.g. STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
#define QL_R3_LDST_EXC \
{ \
QLF4(W, W, W, NIL), \
QLF4(W, X, X, NIL), \
}
/* e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_FP \
{ \
QLF2(S_B, S_B), \
QLF2(S_H, S_H), \
QLF2(S_S, S_S), \
QLF2(S_D, S_D), \
QLF2(S_Q, S_Q), \
}
/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_R \
{ \
QLF2(W, S_S), \
QLF2(X, S_D), \
}
/* e.g. STRB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_W8 \
{ \
QLF2(W, S_B), \
}
/* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_R8 \
{ \
QLF2(W, S_B), \
QLF2(X, S_B), \
}
/* e.g. STRH <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_W16 \
{ \
QLF2(W, S_H), \
}
/* e.g. LDRSW <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_X32 \
{ \
QLF2(X, S_S), \
}
/* e.g. LDRSH <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_R16 \
{ \
QLF2(W, S_H), \
QLF2(X, S_H), \
}
/* e.g. PRFM <prfop>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
#define QL_LDST_PRFM \
{ \
QLF2(NIL, S_D), \
}
/* e.g. LDG <Xt>, [<Xn|SP>{, #<simm>}]. */
#define QL_LDG \
{ \
QLF2(X, imm_tag), \
}
/* e.g. LDPSW <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]. */
#define QL_LDST_PAIR_X32 \
{ \
QLF3(X, X, S_S), \
}
/* e.g. STGP <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]. */
#define QL_STGP \
{ \
QLF3(X, X, imm_tag), \
}
/* e.g. STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!. */
#define QL_LDST_PAIR_R \
{ \
QLF3(W, W, S_S), \
QLF3(X, X, S_D), \
}
/* e.g. STNP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
#define QL_LDST_PAIR_FP \
{ \
QLF3(S_S, S_S, S_S), \
QLF3(S_D, S_D, S_D), \
QLF3(S_Q, S_Q, S_Q), \
}
/* e.g. LD3 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>]. */
#define QL_SIMD_LDST \
{ \
QLF2(V_8B, NIL), \
QLF2(V_16B, NIL), \
QLF2(V_4H, NIL), \
QLF2(V_8H, NIL), \
QLF2(V_2S, NIL), \
QLF2(V_4S, NIL), \
QLF2(V_2D, NIL), \
}
/* e.g. LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>]. */
#define QL_SIMD_LDST_ANY \
{ \
QLF2(V_8B, NIL), \
QLF2(V_16B, NIL), \
QLF2(V_4H, NIL), \
QLF2(V_8H, NIL), \
QLF2(V_2S, NIL), \
QLF2(V_4S, NIL), \
QLF2(V_1D, NIL), \
QLF2(V_2D, NIL), \
}
/* e.g. LD4 {<Vt>.<T>, <Vt2a>.<T>, <Vt3a>.<T>, <Vt4a>.<T>}[<index>], [<Xn|SP>]. */
#define QL_SIMD_LDSTONE \
{ \
QLF2(S_B, NIL), \
QLF2(S_H, NIL), \
QLF2(S_S, NIL), \
QLF2(S_D, NIL), \
}
/* e.g. ADDV <V><d>, <Vn>.<T>. */
#define QL_XLANES \
{ \
QLF2(S_B, V_8B), \
QLF2(S_B, V_16B), \
QLF2(S_H, V_4H), \
QLF2(S_H, V_8H), \
QLF2(S_S, V_4S), \
}
/* e.g. FMINV <V><d>, <Vn>.<T>. */
#define QL_XLANES_FP \
{ \
QLF2(S_S, V_4S), \
}
/* e.g. FMINV <V><d>, <Vn>.<T>. */
#define QL_XLANES_FP_H \
{ \
QLF2 (S_H, V_4H), \
QLF2 (S_H, V_8H), \
}
/* e.g. SADDLV <V><d>, <Vn>.<T>. */
#define QL_XLANES_L \
{ \
QLF2(S_H, V_8B), \
QLF2(S_H, V_16B), \
QLF2(S_S, V_4H), \
QLF2(S_S, V_8H), \
QLF2(S_D, V_4S), \
}
/* e.g. MUL <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>]. */
#define QL_ELEMENT \
{ \
QLF3(V_4H, V_4H, S_H), \
QLF3(V_8H, V_8H, S_H), \
QLF3(V_2S, V_2S, S_S), \
QLF3(V_4S, V_4S, S_S), \
}
/* e.g. SMLAL <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. */
#define QL_ELEMENT_L \
{ \
QLF3(V_4S, V_4H, S_H), \
QLF3(V_2D, V_2S, S_S), \
}
/* e.g. SMLAL2 <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. */
#define QL_ELEMENT_L2 \
{ \
QLF3(V_4S, V_8H, S_H), \
QLF3(V_2D, V_4S, S_S), \
}
/* e.g. FMLA <V><d>, <V><n>, <Vm>.<Ts>[<index>]. */
#define QL_ELEMENT_FP \
{ \
QLF3(V_2S, V_2S, S_S), \
QLF3(V_4S, V_4S, S_S), \
QLF3(V_2D, V_2D, S_D), \
}
/* e.g. FMLA <V><d>, <V><n>, <Vm>.<Ts>[<index>]. */
#define QL_ELEMENT_FP_H \
{ \
QLF3 (V_4H, V_4H, S_H), \
QLF3 (V_8H, V_8H, S_H), \
}
/* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>], #<rotate>. */
#define QL_ELEMENT_ROT \
{ \
QLF4 (V_4H, V_4H, S_H, NIL), \
QLF4 (V_8H, V_8H, S_H, NIL), \
QLF4 (V_4S, V_4S, S_S, NIL), \
}
/* e.g. MOVI <Vd>.4S, #<imm8> {, LSL #<amount>}. */
#define QL_SIMD_IMM_S0W \
{ \
QLF2(V_2S, LSL), \
QLF2(V_4S, LSL), \
}
/* e.g. MOVI <Vd>.4S, #<imm8>, MSL #<amount>. */
#define QL_SIMD_IMM_S1W \
{ \
QLF2(V_2S, MSL), \
QLF2(V_4S, MSL), \
}
/* e.g. MOVI <Vd>.4H, #<imm8> {, LSL #<amount>}. */
#define QL_SIMD_IMM_S0H \
{ \
QLF2(V_4H, LSL), \
QLF2(V_8H, LSL), \
}
/* e.g. FMOV <Vd>.<T>, #<imm>. */
#define QL_SIMD_IMM_S \
{ \
QLF2(V_2S, NIL), \
QLF2(V_4S, NIL), \
}
/* e.g. MOVI <Vd>.8B, #<imm8> {, LSL #<amount>}. */
#define QL_SIMD_IMM_B \
{ \
QLF2(V_8B, LSL), \
QLF2(V_16B, LSL), \
}
/* e.g. MOVI <Dd>, #<imm>. */
#define QL_SIMD_IMM_D \
{ \
QLF2(S_D, NIL), \
}
/* e.g. FMOV <Vd>.<T>, #<imm>. */
#define QL_SIMD_IMM_H \
{ \
QLF2 (V_4H, NIL), \
QLF2 (V_8H, NIL), \
}
/* e.g. MOVI <Vd>.2D, #<imm>. */
#define QL_SIMD_IMM_V2D \
{ \
QLF2(V_2D, NIL), \
}
/* The naming convention for SVE macros is:
OP_SVE_<operands>[_<sizes>]*
<operands> contains one character per operand, using the following scheme:
- U: the operand is unqualified (NIL).
- [BHSD]: the operand has a S_[BHSD] qualifier and the choice of
qualifier is the same for all variants. This is used for both
.[BHSD] suffixes on an SVE predicate or vector register and
scalar FPRs of the form [BHSD]<number>.
- [WX]: the operand has a [WX] qualifier and the choice of qualifier
is the same for all variants.
- [ZM]: the operand has a /[ZM] suffix and the choice of suffix
is the same for all variants.
- V: the operand has a S_[BHSD] qualifier and the choice of qualifier
is not the same for all variants.
- R: the operand has a [WX] qualifier and the choice of qualifier is
not the same for all variants.
- P: the operand has a /[ZM] suffix and the choice of suffix is not
the same for all variants.
The _<sizes>, if present, give the subset of [BHSD] that are accepted
by the V entries in <operands>. */
#define OP_SVE_B \
{ \
QLF1(S_B), \
}
#define OP_SVE_BB \
{ \
QLF2(S_B,S_B), \
}
#define OP_SVE_BBU \
{ \
QLF3(S_B,S_B,NIL), \
}
#define OP_SVE_BBB \
{ \
QLF3(S_B,S_B,S_B), \
}
#define OP_SVE_BBBU \
{ \
QLF4(S_B,S_B,S_B,NIL), \
}
#define OP_SVE_BMB \
{ \
QLF3(S_B,P_M,S_B), \
}
#define OP_SVE_BPB \
{ \
QLF3(S_B,P_Z,S_B), \
QLF3(S_B,P_M,S_B), \
}
#define OP_SVE_BUB \
{ \
QLF3(S_B,NIL,S_B), \
}
#define OP_SVE_BUBB \
{ \
QLF4(S_B,NIL,S_B,S_B), \
}
#define OP_SVE_BUU \
{ \
QLF3(S_B,NIL,NIL), \
}
#define OP_SVE_BZ \
{ \
QLF2(S_B,P_Z), \
}
#define OP_SVE_BZB \
{ \
QLF3(S_B,P_Z,S_B), \
}
#define OP_SVE_BZBB \
{ \
QLF4(S_B,P_Z,S_B,S_B), \
}
#define OP_SVE_BZU \
{ \
QLF3(S_B,P_Z,NIL), \
}
#define OP_SVE_DD \
{ \
QLF2(S_D,S_D), \
}
#define OP_SVE_DDD \
{ \
QLF3(S_D,S_D,S_D), \
}
#define OP_SVE_QQQ \
{ \
QLF3(S_Q,S_Q,S_Q), \
}
#define OP_SVE_DDDD \
{ \
QLF4(S_D,S_D,S_D,S_D), \
}
#define OP_SVE_DMD \
{ \
QLF3(S_D,P_M,S_D), \
}
#define OP_SVE_DMH \
{ \
QLF3(S_D,P_M,S_H), \
}
#define OP_SVE_DMS \
{ \
QLF3(S_D,P_M,S_S), \
}
#define OP_SVE_DU \
{ \
QLF2(S_D,NIL), \
}
#define OP_SVE_DUD \
{ \
QLF3(S_D,NIL,S_D), \
}
#define OP_SVE_DUU \
{ \
QLF3(S_D,NIL,NIL), \
}
#define OP_SVE_DUV_BHS \
{ \
QLF3(S_D,NIL,S_B), \
QLF3(S_D,NIL,S_H), \
QLF3(S_D,NIL,S_S), \
}
#define OP_SVE_DUV_BHSD \
{ \
QLF3(S_D,NIL,S_B), \
QLF3(S_D,NIL,S_H), \
QLF3(S_D,NIL,S_S), \
QLF3(S_D,NIL,S_D), \
}
#define OP_SVE_DZD \
{ \
QLF3(S_D,P_Z,S_D), \
}
#define OP_SVE_DZU \
{ \
QLF3(S_D,P_Z,NIL), \
}
#define OP_SVE_HB \
{ \
QLF2(S_H,S_B), \
}
#define OP_SVE_HHH \
{ \
QLF3(S_H,S_H,S_H), \
}
#define OP_SVE_HHHU \
{ \
QLF4(S_H,S_H,S_H,NIL), \
}
#define OP_SVE_HMH \
{ \
QLF3(S_H,P_M,S_H), \
}
#define OP_SVE_HMD \
{ \
QLF3(S_H,P_M,S_D), \
}
#define OP_SVE_HMS \
{ \
QLF3(S_H,P_M,S_S), \
}
#define OP_SVE_HU \
{ \
QLF2(S_H,NIL), \
}
#define OP_SVE_HUU \
{ \
QLF3(S_H,NIL,NIL), \
}
#define OP_SVE_HZU \
{ \
QLF3(S_H,P_Z,NIL), \
}
#define OP_SVE_RR \
{ \
QLF2(W,W), \
QLF2(X,X), \
}
#define OP_SVE_RURV_BHSD \
{ \
QLF4(W,NIL,W,S_B), \
QLF4(W,NIL,W,S_H), \
QLF4(W,NIL,W,S_S), \
QLF4(X,NIL,X,S_D), \
}
#define OP_SVE_RUV_BHSD \
{ \
QLF3(W,NIL,S_B), \
QLF3(W,NIL,S_H), \
QLF3(W,NIL,S_S), \
QLF3(X,NIL,S_D), \
}
#define OP_SVE_SMD \
{ \
QLF3(S_S,P_M,S_D), \
}
#define OP_SVE_SSS \
{ \
QLF3(S_S,S_S,S_S), \
}
#define OP_SVE_SSSU \
{ \
QLF4(S_S,S_S,S_S,NIL), \
}
#define OP_SVE_SMH \
{ \
QLF3(S_S,P_M,S_H), \
}
#define OP_SVE_SHH \
{ \
QLF3(S_S,S_H,S_H), \
}
#define OP_SVE_SMS \
{ \
QLF3(S_S,P_M,S_S), \
}
#define OP_SVE_SU \
{ \
QLF2(S_S,NIL), \
}
#define OP_SVE_SUS \
{ \
QLF3(S_S,NIL,S_S), \
}
#define OP_SVE_SUU \
{ \
QLF3(S_S,NIL,NIL), \
}
#define OP_SVE_SZS \
{ \
QLF3(S_S,P_Z,S_S), \
}
#define OP_SVE_SBB \
{ \
QLF3(S_S,S_B,S_B), \
}
#define OP_SVE_SBBU \
{ \
QLF4(S_S,S_B,S_B,NIL), \
}
#define OP_SVE_DSS \
{ \
QLF3(S_D,S_S,S_S), \
}
#define OP_SVE_DHHU \
{ \
QLF4(S_D,S_H,S_H,NIL), \
}
#define OP_SVE_SZU \
{ \
QLF3(S_S,P_Z,NIL), \
}
#define OP_SVE_UB \
{ \
QLF2(NIL,S_B), \
}
#define OP_SVE_UUD \
{ \
QLF3(NIL,NIL,S_D), \
}
#define OP_SVE_UUS \
{ \
QLF3(NIL,NIL,S_S), \
}
#define OP_SVE_VMR_BHSD \
{ \
QLF3(S_B,P_M,W), \
QLF3(S_H,P_M,W), \
QLF3(S_S,P_M,W), \
QLF3(S_D,P_M,X), \
}
#define OP_SVE_VMU_HSD \
{ \
QLF3(S_H,P_M,NIL), \
QLF3(S_S,P_M,NIL), \
QLF3(S_D,P_M,NIL), \
}
#define OP_SVE_VMVD_BHS \
{ \
QLF4(S_B,P_M,S_B,S_D), \
QLF4(S_H,P_M,S_H,S_D), \
QLF4(S_S,P_M,S_S,S_D), \
}
#define OP_SVE_VMVU_BHSD \
{ \
QLF4(S_B,P_M,S_B,NIL), \
QLF4(S_H,P_M,S_H,NIL), \
QLF4(S_S,P_M,S_S,NIL), \
QLF4(S_D,P_M,S_D,NIL), \
}
#define OP_SVE_VMVU_HSD \
{ \
QLF4(S_H,P_M,S_H,NIL), \
QLF4(S_S,P_M,S_S,NIL), \
QLF4(S_D,P_M,S_D,NIL), \
}
#define OP_SVE_VMVV_BHSD \
{ \
QLF4(S_B,P_M,S_B,S_B), \
QLF4(S_H,P_M,S_H,S_H), \
QLF4(S_S,P_M,S_S,S_S), \
QLF4(S_D,P_M,S_D,S_D), \
}
#define OP_SVE_VMVV_HSD \
{ \
QLF4(S_H,P_M,S_H,S_H), \
QLF4(S_S,P_M,S_S,S_S), \
QLF4(S_D,P_M,S_D,S_D), \
}
#define OP_SVE_VMVV_SD \
{ \
QLF4(S_S,P_M,S_S,S_S), \
QLF4(S_D,P_M,S_D,S_D), \
}
#define OP_SVE_VMVVU_HSD \
{ \
QLF5(S_H,P_M,S_H,S_H,NIL), \
QLF5(S_S,P_M,S_S,S_S,NIL), \
QLF5(S_D,P_M,S_D,S_D,NIL), \
}
#define OP_SVE_VMV_BHSD \
{ \
QLF3(S_B,P_M,S_B), \
QLF3(S_H,P_M,S_H), \
QLF3(S_S,P_M,S_S), \
QLF3(S_D,P_M,S_D), \
}
#define OP_SVE_VMV_HSD \
{ \
QLF3(S_H,P_M,S_H), \
QLF3(S_S,P_M,S_S), \
QLF3(S_D,P_M,S_D), \
}
#define OP_SVE_VMV_HSD_BHS \
{ \
QLF3(S_H,P_M,S_B), \
QLF3(S_S,P_M,S_H), \
QLF3(S_D,P_M,S_S), \
}
#define OP_SVE_VVU_HSD_BHS \
{ \
QLF3(S_H,S_B,NIL), \
QLF3(S_S,S_H,NIL), \
QLF3(S_D,S_S,NIL), \
}
#define OP_SVE_VMV_SD \
{ \
QLF3(S_S,P_M,S_S), \
QLF3(S_D,P_M,S_D), \
}
#define OP_SVE_VM_HSD \
{ \
QLF2(S_H,P_M), \
QLF2(S_S,P_M), \
QLF2(S_D,P_M), \
}
#define OP_SVE_VPU_BHSD \
{ \
QLF3(S_B,P_Z,NIL), \
QLF3(S_B,P_M,NIL), \
QLF3(S_H,P_Z,NIL), \
QLF3(S_H,P_M,NIL), \
QLF3(S_S,P_Z,NIL), \
QLF3(S_S,P_M,NIL), \
QLF3(S_D,P_Z,NIL), \
QLF3(S_D,P_M,NIL), \
}
#define OP_SVE_VPV_BHSD \
{ \
QLF3(S_B,P_Z,S_B), \
QLF3(S_B,P_M,S_B), \
QLF3(S_H,P_Z,S_H), \
QLF3(S_H,P_M,S_H), \
QLF3(S_S,P_Z,S_S), \
QLF3(S_S,P_M,S_S), \
QLF3(S_D,P_Z,S_D), \
QLF3(S_D,P_M,S_D), \
}
#define OP_SVE_VRR_BHSD \
{ \
QLF3(S_B,W,W), \
QLF3(S_H,W,W), \
QLF3(S_S,W,W), \
QLF3(S_D,X,X), \
}
#define OP_SVE_VRU_BHSD \
{ \
QLF3(S_B,W,NIL), \
QLF3(S_H,W,NIL), \
QLF3(S_S,W,NIL), \
QLF3(S_D,X,NIL), \
}
#define OP_SVE_VR_BHSD \
{ \
QLF2(S_B,W), \
QLF2(S_H,W), \
QLF2(S_S,W), \
QLF2(S_D,X), \
}
#define OP_SVE_VUR_BHSD \
{ \
QLF3(S_B,NIL,W), \
QLF3(S_H,NIL,W), \
QLF3(S_S,NIL,W), \
QLF3(S_D,NIL,X), \
}
#define OP_SVE_VUU_BHSD \
{ \
QLF3(S_B,NIL,NIL), \
QLF3(S_H,NIL,NIL), \
QLF3(S_S,NIL,NIL), \
QLF3(S_D,NIL,NIL), \
}
#define OP_SVE_VUVV_BHSD \
{ \
QLF4(S_B,NIL,S_B,S_B), \
QLF4(S_H,NIL,S_H,S_H), \
QLF4(S_S,NIL,S_S,S_S), \
QLF4(S_D,NIL,S_D,S_D), \
}
#define OP_SVE_VUVV_HSD \
{ \
QLF4(S_H,NIL,S_H,S_H), \
QLF4(S_S,NIL,S_S,S_S), \
QLF4(S_D,NIL,S_D,S_D), \
}
#define OP_SVE_VUV_BHSD \
{ \
QLF3(S_B,NIL,S_B), \
QLF3(S_H,NIL,S_H), \
QLF3(S_S,NIL,S_S), \
QLF3(S_D,NIL,S_D), \
}
#define OP_SVE_VUV_HSD \
{ \
QLF3(S_H,NIL,S_H), \
QLF3(S_S,NIL,S_S), \
QLF3(S_D,NIL,S_D), \
}
#define OP_SVE_VUV_SD \
{ \
QLF3(S_S,NIL,S_S), \
QLF3(S_D,NIL,S_D), \
}
#define OP_SVE_VU_BHSD \
{ \
QLF2(S_B,NIL), \
QLF2(S_H,NIL), \
QLF2(S_S,NIL), \
QLF2(S_D,NIL), \
}
#define OP_SVE_VU_HSD \
{ \
QLF2(S_H,NIL), \
QLF2(S_S,NIL), \
QLF2(S_D,NIL), \
}
#define OP_SVE_VU_HSD \
{ \
QLF2(S_H,NIL), \
QLF2(S_S,NIL), \
QLF2(S_D,NIL), \
}
#define OP_SVE_VVD_BHS \
{ \
QLF3(S_B,S_B,S_D), \
QLF3(S_H,S_H,S_D), \
QLF3(S_S,S_S,S_D), \
}
#define OP_SVE_VVU_BHSD \
{ \
QLF3(S_B,S_B,NIL), \
QLF3(S_H,S_H,NIL), \
QLF3(S_S,S_S,NIL), \
QLF3(S_D,S_D,NIL), \
}
#define OP_SVE_VVVU_H \
{ \
QLF4(S_H,S_H,S_H,NIL), \
}
#define OP_SVE_VVVU_S \
{ \
QLF4(S_S,S_S,S_S,NIL), \
}
#define OP_SVE_VVVU_SD_BH \
{ \
QLF4(S_S,S_B,S_B,NIL), \
QLF4(S_D,S_H,S_H,NIL), \
}
#define OP_SVE_VVVU_HSD \
{ \
QLF4(S_H,S_H,S_H,NIL), \
QLF4(S_S,S_S,S_S,NIL), \
QLF4(S_D,S_D,S_D,NIL), \
}
#define OP_SVE_VVVU_BHSD \
{ \
QLF4(S_B,S_B,S_B,NIL), \
QLF4(S_H,S_H,S_H,NIL), \
QLF4(S_S,S_S,S_S,NIL), \
QLF4(S_D,S_D,S_D,NIL), \
}
#define OP_SVE_VVV_BHSD \
{ \
QLF3(S_B,S_B,S_B), \
QLF3(S_H,S_H,S_H), \
QLF3(S_S,S_S,S_S), \
QLF3(S_D,S_D,S_D), \
}
#define OP_SVE_VVV_D \
{ \
QLF3(S_D,S_D,S_D), \
}
#define OP_SVE_VVV_D_H \
{ \
QLF3(S_D,S_H,S_H), \
}
#define OP_SVE_VVV_H \
{ \
QLF3(S_H,S_H,S_H), \
}
#define OP_SVE_VVV_HSD \
{ \
QLF3(S_H,S_H,S_H), \
QLF3(S_S,S_S,S_S), \
QLF3(S_D,S_D,S_D), \
}
#define OP_SVE_VVV_S \
{ \
QLF3(S_S,S_S,S_S), \
}
#define OP_SVE_VVV_HD_BS \
{ \
QLF3(S_H,S_B,S_B), \
QLF3(S_D,S_S,S_S), \
}
#define OP_SVE_VVV_S_B \
{ \
QLF3(S_S,S_B,S_B), \
}
#define OP_SVE_VVV_Q_D \
{ \
QLF3(S_Q,S_D,S_D), \
}
#define OP_SVE_VVV_HSD_BHS \
{ \
QLF3(S_H,S_B,S_B), \
QLF3(S_S,S_H,S_H), \
QLF3(S_D,S_S,S_S), \
}
#define OP_SVE_VVV_HSD_BHS2 \
{ \
QLF3(S_H,S_H,S_B), \
QLF3(S_S,S_S,S_H), \
QLF3(S_D,S_D,S_S), \
}
#define OP_SVE_VVV_BHS_HSD \
{ \
QLF3(S_B,S_H,S_H), \
QLF3(S_H,S_S,S_S), \
QLF3(S_S,S_D,S_D), \
}
#define OP_SVE_VV_BHS_HSD \
{ \
QLF2(S_B,S_H), \
QLF2(S_H,S_S), \
QLF2(S_S,S_D), \
}
#define OP_SVE_VVV_SD_BH \
{ \
QLF3(S_S,S_B,S_B), \
QLF3(S_D,S_H,S_H), \
}
#define OP_SVE_VVV_SD \
{ \
QLF3(S_S,S_S,S_S), \
QLF3(S_D,S_D,S_D), \
}
#define OP_SVE_VV_BHSD \
{ \
QLF2(S_B,S_B), \
QLF2(S_H,S_H), \
QLF2(S_S,S_S), \
QLF2(S_D,S_D), \
}
#define OP_SVE_VV_BHSDQ \
{ \
QLF2(S_B,S_B), \
QLF2(S_H,S_H), \
QLF2(S_S,S_S), \
QLF2(S_D,S_D), \
QLF2(S_Q,S_Q), \
}
#define OP_SVE_VV_HSD \
{ \
QLF2(S_H,S_H), \
QLF2(S_S,S_S), \
QLF2(S_D,S_D), \
}
#define OP_SVE_VVU_BHS_HSD \
{ \
QLF3(S_B,S_H,NIL), \
QLF3(S_H,S_S,NIL), \
QLF3(S_S,S_D,NIL), \
}
#define OP_SVE_VV_HSD_BHS \
{ \
QLF2(S_H,S_B), \
QLF2(S_S,S_H), \
QLF2(S_D,S_S), \
}
#define OP_SVE_VV_SD \
{ \
QLF2(S_S,S_S), \
QLF2(S_D,S_D), \
}
#define OP_SVE_VWW_BHSD \
{ \
QLF3(S_B,W,W), \
QLF3(S_H,W,W), \
QLF3(S_S,W,W), \
QLF3(S_D,W,W), \
}
#define OP_SVE_VXX_BHSD \
{ \
QLF3(S_B,X,X), \
QLF3(S_H,X,X), \
QLF3(S_S,X,X), \
QLF3(S_D,X,X), \
}
#define OP_SVE_VZVD_BHS \
{ \
QLF4(S_B,P_Z,S_B,S_D), \
QLF4(S_H,P_Z,S_H,S_D), \
QLF4(S_S,P_Z,S_S,S_D), \
}
#define OP_SVE_VZVU_BHSD \
{ \
QLF4(S_B,P_Z,S_B,NIL), \
QLF4(S_H,P_Z,S_H,NIL), \
QLF4(S_S,P_Z,S_S,NIL), \
QLF4(S_D,P_Z,S_D,NIL), \
}
#define OP_SVE_VZVV_BHSD \
{ \
QLF4(S_B,P_Z,S_B,S_B), \
QLF4(S_H,P_Z,S_H,S_H), \
QLF4(S_S,P_Z,S_S,S_S), \
QLF4(S_D,P_Z,S_D,S_D), \
}
#define OP_SVE_VZVV_HSD \
{ \
QLF4(S_H,P_Z,S_H,S_H), \
QLF4(S_S,P_Z,S_S,S_S), \
QLF4(S_D,P_Z,S_D,S_D), \
}
#define OP_SVE_VZVV_SD \
{ \
QLF4(S_S,P_Z,S_S,S_S), \
QLF4(S_D,P_Z,S_D,S_D), \
}
#define OP_SVE_VZVV_BH \
{ \
QLF4(S_B,P_Z,S_B,S_B), \
QLF4(S_H,P_Z,S_H,S_H), \
}
#define OP_SVE_VZV_SD \
{ \
QLF3(S_S,P_Z,S_S), \
QLF3(S_D,P_Z,S_D), \
}
#define OP_SVE_VZV_HSD \
{ \
QLF3(S_H,P_Z,S_H), \
QLF3(S_S,P_Z,S_S), \
QLF3(S_D,P_Z,S_D), \
}
#define OP_SVE_V_HSD \
{ \
QLF1(S_H), \
QLF1(S_S), \
QLF1(S_D), \
}
#define OP_SVE_WU \
{ \
QLF2(W,NIL), \
}
#define OP_SVE_WV_BHSD \
{ \
QLF2(W,S_B), \
QLF2(W,S_H), \
QLF2(W,S_S), \
QLF2(W,S_D), \
}
#define OP_SVE_XU \
{ \
QLF2(X,NIL), \
}
#define OP_SVE_XUV_BHSD \
{ \
QLF3(X,NIL,S_B), \
QLF3(X,NIL,S_H), \
QLF3(X,NIL,S_S), \
QLF3(X,NIL,S_D), \
}
#define OP_SVE_XVW_BHSD \
{ \
QLF3(X,S_B,W), \
QLF3(X,S_H,W), \
QLF3(X,S_S,W), \
QLF3(X,S_D,W), \
}
#define OP_SVE_XV_BHSD \
{ \
QLF2(X,S_B), \
QLF2(X,S_H), \
QLF2(X,S_S), \
QLF2(X,S_D), \
}
#define OP_SVE_XWU \
{ \
QLF3(X,W,NIL), \
}
#define OP_SVE_XXU \
{ \
QLF3(X,X,NIL), \
}
/* e.g. UDOT <Vd>.2S, <Vn>.8B, <Vm>.8B. */
#define QL_V3DOT \
{ \
QLF3(V_2S, V_8B, V_8B), \
QLF3(V_4S, V_16B, V_16B),\
}
/* e.g. UDOT <Vd>.2S, <Vn>.8B, <Vm>.4B[<index>]. */
#define QL_V2DOT \
{ \
QLF3(V_2S, V_8B, S_4B),\
QLF3(V_4S, V_16B, S_4B),\
}
/* e.g. SHA512H <Qd>, <Qn>, <Vm>.2D . */
#define QL_SHA512UPT \
{ \
QLF3(S_Q, S_Q, V_2D), \
}
/* e.g. SHA512SU0 <Vd.2D>, <Vn>.2D . */
#define QL_V2SAME2D \
{ \
QLF2(V_2D, V_2D), \
}
/* e.g. SHA512SU1 <Vd>.2D, <Vn>.2D, <Vm>.2D>. */
#define QL_V3SAME2D \
{ \
QLF3(V_2D, V_2D, V_2D), \
}
/* e.g. EOR3 <Vd>.16B, <Vn>.16B, <Vm>.16B, <Va>.16B. */
#define QL_V4SAME16B \
{ \
QLF4(V_16B, V_16B, V_16B, V_16B), \
}
/* e.g. SM3SS1 <Vd>.4S, <Vn>.4S, <Vm>.4S, <Va>.4S. */
#define QL_V4SAME4S \
{ \
QLF4(V_4S, V_4S, V_4S, V_4S), \
}
/* e.g. XAR <Vd>.2D, <Vn>.2D, <Vm>.2D, #<imm6>. */
#define QL_XAR \
{ \
QLF4(V_2D, V_2D, V_2D, imm_0_63), \
}
/* e.g. SM3TT1A <Vd>.4S, <Vn>.4S, <Vm>.S[<imm2>]. */
#define QL_SM3TT \
{ \
QLF3(V_4S, V_4S, S_S),\
}
/* e.g. FMLAL <Vd>.2S, <Vn>.2H, <Vm>.2H. */
#define QL_V3FML2S \
{ \
QLF3(V_2S, V_2H, V_2H),\
}
/* e.g. FMLAL <Vd>.4S, <Vn>.4H, <Vm>.4H. */
#define QL_V3FML4S \
{ \
QLF3(V_4S, V_4H, V_4H),\
}
/* e.g. FMLAL <Vd>.2S, <Vn>.2H, <Vm>.H[<index>]. */
#define QL_V2FML2S \
{ \
QLF3(V_2S, V_2H, S_H),\
}
/* e.g. FMLAL <Vd>.4S, <Vn>.4H, <Vm>.H[<index>]. */
#define QL_V2FML4S \
{ \
QLF3(V_4S, V_4H, S_H),\
}
/* e.g. RMIF <Xn>, #<shift>, #<mask>. */
#define QL_RMIF \
{ \
QLF3(X, imm_0_63, imm_0_15),\
}
/* e.g. SETF8 <Wn>. */
#define QL_SETF \
{ \
QLF1(W), \
}
/* e.g. STLURB <Wt>, [<Xn|SP>{,#<simm>}]. */
#define QL_STLW \
{ \
QLF2(W, NIL), \
}
/* e.g. STLURB <Xt>, [<Xn|SP>{,#<simm>}]. */
#define QL_STLX \
{ \
QLF2(X, NIL), \
}
/* e.g. BFDOT <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb> */
#define QL_BFDOT64 \
{ \
QLF3(V_2S, V_4H, V_4H),\
QLF3(V_4S, V_8H, V_8H),\
}
/* e.g. BFDOT <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.2H[<index>] */
#define QL_BFDOT64I \
{ \
QLF3(V_2S, V_4H, S_2H),\
QLF3(V_4S, V_8H, S_2H),\
}
/* e.g. SMMLA <Vd>.4S, <Vn>.16B, <Vm>.16B */
#define QL_MMLA64 \
{ \
QLF3(V_4S, V_16B, V_16B),\
}
/* e.g. BFMMLA <Vd>.4s, <Vn>.8h, <Vm>.8h */
#define QL_BFMMLA \
{ \
QLF3(V_4S, V_8H, V_8H),\
}
/* e.g. BFCVT <Hd>, <Sn> */
#define QL_BFCVT64 \
{ \
QLF2(S_H,S_S), \
}
/* e.g. BFCVT <Hd>, <Sn> */
#define QL_BFCVTN64 \
{ \
QLF2(V_4H,V_4S), \
}
/* e.g. BFCVT <Hd>, <Sn> */
#define QL_BFCVTN2_64 \
{ \
QLF2(V_8H,V_4S), \
}
/* e.g. BFMLAL2 <Vd>.4s, <Vn>.8h, <Vm>.H[<index>] */
#define QL_V3BFML4S \
{ \
QLF3(V_4S, V_8H, S_H), \
}
/* Opcode table. */
static const aarch64_feature_set aarch64_feature_v8 =
AARCH64_FEATURE (AARCH64_FEATURE_V8, 0);
static const aarch64_feature_set aarch64_feature_fp =
AARCH64_FEATURE (AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_simd =
AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0);
static const aarch64_feature_set aarch64_feature_crc =
AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0);
static const aarch64_feature_set aarch64_feature_lse =
AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0);
static const aarch64_feature_set aarch64_feature_lor =
AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0);
static const aarch64_feature_set aarch64_feature_rdma =
AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0);
static const aarch64_feature_set aarch64_feature_v8_2 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2, 0);
static const aarch64_feature_set aarch64_feature_fp_f16 =
AARCH64_FEATURE (AARCH64_FEATURE_F16 | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_simd_f16 =
AARCH64_FEATURE (AARCH64_FEATURE_F16 | AARCH64_FEATURE_SIMD, 0);
static const aarch64_feature_set aarch64_feature_sve =
AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0);
static const aarch64_feature_set aarch64_feature_v8_3 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_3, 0);
static const aarch64_feature_set aarch64_feature_fp_v8_3 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_3 | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_pac =
AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0);
static const aarch64_feature_set aarch64_feature_compnum =
AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0);
static const aarch64_feature_set aarch64_feature_rcpc =
AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0);
static const aarch64_feature_set aarch64_feature_dotprod =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_DOTPROD, 0);
static const aarch64_feature_set aarch64_feature_sha2 =
AARCH64_FEATURE (AARCH64_FEATURE_V8 | AARCH64_FEATURE_SHA2, 0);
static const aarch64_feature_set aarch64_feature_aes =
AARCH64_FEATURE (AARCH64_FEATURE_V8 | AARCH64_FEATURE_AES, 0);
static const aarch64_feature_set aarch64_feature_v8_4 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_4, 0);
static const aarch64_feature_set aarch64_feature_sm4 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_SM4
| AARCH64_FEATURE_SIMD | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_sha3 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_SHA2
| AARCH64_FEATURE_SHA3 | AARCH64_FEATURE_SIMD | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_fp_16_v8_2 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_F16_FML
| AARCH64_FEATURE_F16 | AARCH64_FEATURE_FP, 0);
static const aarch64_feature_set aarch64_feature_v8_5 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_5, 0);
static const aarch64_feature_set aarch64_feature_flagmanip =
AARCH64_FEATURE (AARCH64_FEATURE_FLAGMANIP, 0);
static const aarch64_feature_set aarch64_feature_frintts =
AARCH64_FEATURE (AARCH64_FEATURE_FRINTTS, 0);
static const aarch64_feature_set aarch64_feature_sb =
AARCH64_FEATURE (AARCH64_FEATURE_SB, 0);
static const aarch64_feature_set aarch64_feature_predres =
AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0);
static const aarch64_feature_set aarch64_feature_memtag =
AARCH64_FEATURE (AARCH64_FEATURE_V8_5 | AARCH64_FEATURE_MEMTAG, 0);
static const aarch64_feature_set aarch64_feature_bfloat16 =
AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0);
static const aarch64_feature_set aarch64_feature_bfloat16_sve =
AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16 | AARCH64_FEATURE_SVE, 0);
static const aarch64_feature_set aarch64_feature_tme =
AARCH64_FEATURE (AARCH64_FEATURE_TME, 0);
static const aarch64_feature_set aarch64_feature_sve2 =
AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0);
static const aarch64_feature_set aarch64_feature_sve2aes =
AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_AES, 0);
static const aarch64_feature_set aarch64_feature_sve2sha3 =
AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_SHA3, 0);
static const aarch64_feature_set aarch64_feature_sve2sm4 =
AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_SM4, 0);
static const aarch64_feature_set aarch64_feature_sve2bitperm =
AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_BITPERM, 0);
static const aarch64_feature_set aarch64_feature_v8_6 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_6, 0);
static const aarch64_feature_set aarch64_feature_v8_7 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_7, 0);
static const aarch64_feature_set aarch64_feature_i8mm =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_I8MM, 0);
static const aarch64_feature_set aarch64_feature_i8mm_sve =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_I8MM
| AARCH64_FEATURE_SVE, 0);
static const aarch64_feature_set aarch64_feature_f32mm_sve =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_F32MM
| AARCH64_FEATURE_SVE, 0);
static const aarch64_feature_set aarch64_feature_f64mm_sve =
AARCH64_FEATURE (AARCH64_FEATURE_V8_2 | AARCH64_FEATURE_F64MM
| AARCH64_FEATURE_SVE, 0);
static const aarch64_feature_set aarch64_feature_v8_r =
AARCH64_FEATURE (AARCH64_FEATURE_V8_R, 0);
static const aarch64_feature_set aarch64_feature_ls64 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_6 | AARCH64_FEATURE_LS64, 0);
static const aarch64_feature_set aarch64_feature_flagm =
AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0);
#define CORE &aarch64_feature_v8
#define FP &aarch64_feature_fp
#define SIMD &aarch64_feature_simd
#define CRC &aarch64_feature_crc
#define LSE &aarch64_feature_lse
#define LOR &aarch64_feature_lor
#define RDMA &aarch64_feature_rdma
#define FP_F16 &aarch64_feature_fp_f16
#define SIMD_F16 &aarch64_feature_simd_f16
#define ARMV8_2 &aarch64_feature_v8_2
#define SVE &aarch64_feature_sve
#define ARMV8_3 &aarch64_feature_v8_3
#define FP_V8_3 &aarch64_feature_fp_v8_3
#define PAC &aarch64_feature_pac
#define COMPNUM &aarch64_feature_compnum
#define RCPC &aarch64_feature_rcpc
#define SHA2 &aarch64_feature_sha2
#define AES &aarch64_feature_aes
#define ARMV8_4 &aarch64_feature_v8_4
#define SHA3 &aarch64_feature_sha3
#define SM4 &aarch64_feature_sm4
#define FP_F16_V8_2 &aarch64_feature_fp_16_v8_2
#define DOTPROD &aarch64_feature_dotprod
#define ARMV8_5 &aarch64_feature_v8_5
#define FLAGMANIP &aarch64_feature_flagmanip
#define FRINTTS &aarch64_feature_frintts
#define SB &aarch64_feature_sb
#define PREDRES &aarch64_feature_predres
#define MEMTAG &aarch64_feature_memtag
#define TME &aarch64_feature_tme
#define SVE2 &aarch64_feature_sve2
#define SVE2_AES &aarch64_feature_sve2aes
#define SVE2_SHA3 &aarch64_feature_sve2sha3
#define SVE2_SM4 &aarch64_feature_sve2sm4
#define SVE2_BITPERM &aarch64_feature_sve2bitperm
#define ARMV8_6 &aarch64_feature_v8_6
#define ARMV8_6_SVE &aarch64_feature_v8_6
#define BFLOAT16_SVE &aarch64_feature_bfloat16_sve
#define BFLOAT16 &aarch64_feature_bfloat16
#define I8MM_SVE &aarch64_feature_i8mm_sve
#define F32MM_SVE &aarch64_feature_f32mm_sve
#define F64MM_SVE &aarch64_feature_f64mm_sve
#define I8MM &aarch64_feature_i8mm
#define ARMV8_R &aarch64_feature_v8_r
#define ARMV8_7 &aarch64_feature_v8_7
#define LS64 &aarch64_feature_ls64
#define FLAGM &aarch64_feature_flagm
#define CORE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, CORE, OPS, QUALS, FLAGS, 0, 0, NULL }
#define __FP_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, FP, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SIMD_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _SIMD_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,VERIFIER) \
{ NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, VERIFIER }
#define _CRC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, CRC, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _LSE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, LSE, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _LOR_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, LOR, OPS, QUALS, FLAGS, 0, 0, NULL }
#define RDMA_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, RDMA, OPS, QUALS, FLAGS, 0, 0, NULL }
#define FF16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, FP_F16, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SF16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SIMD_F16, OPS, QUALS, FLAGS, 0, 0, NULL }
#define V8_2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, ARMV8_2, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _SVE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define _SVE_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE, OPS, QUALS, \
FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL }
#define V8_3_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_3, OPS, QUALS, FLAGS, 0, 0, NULL }
#define PAC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, PAC, OPS, QUALS, FLAGS, 0, 0, NULL }
#define CNUM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, COMPNUM, OPS, QUALS, FLAGS, 0, 0, NULL }
#define RCPC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, RCPC, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SHA2_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SHA2, OPS, QUALS, FLAGS, 0, 0, NULL }
#define AES_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, AES, OPS, QUALS, FLAGS, 0, 0, NULL }
#define V8_4_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_4, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SHA3_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SHA3, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SM4_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SM4, OPS, QUALS, FLAGS, 0, 0, NULL }
#define FP16_V8_2_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, FP_F16_V8_2, OPS, QUALS, FLAGS, 0, 0, NULL }
#define DOT_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, DOTPROD, OPS, QUALS, FLAGS, 0, 0, NULL }
#define V8_5_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_5, OPS, QUALS, FLAGS, 0, 0, NULL }
#define FLAGMANIP_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, FLAGMANIP, OPS, QUALS, FLAGS, 0, 0, NULL }
#define FRINTTS_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, FRINTTS, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SB_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, SB, OPS, QUALS, FLAGS, 0, 0, NULL }
#define PREDRES_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, PREDRES, OPS, QUALS, FLAGS, 0, 0, NULL }
#define MEMTAG_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, MEMTAG, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _TME_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, TME, OPS, QUALS, FLAGS, 0, 0, NULL }
#define SVE2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define SVE2_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \
FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL }
#define SVE2AES_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_AES, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define SVE2SHA3_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_SHA3, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define SVE2SM4_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define SVE2SM4_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \
FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL }
#define SVE2BITPERM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, OP, SVE2_BITPERM, OPS, QUALS, \
FLAGS | F_STRICT, 0, TIED, NULL }
#define V8_6_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_6, OPS, QUALS, FLAGS, 0, 0, NULL }
#define BFLOAT16_SVE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, BFLOAT16_SVE, OPS, QUALS, FLAGS, 0, 0, NULL }
#define BFLOAT16_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \
{ NAME, OPCODE, MASK, CLASS, 0, BFLOAT16_SVE, OPS, QUALS, FLAGS | F_STRICT, \
CONSTRAINTS, TIED, NULL }
#define BFLOAT16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, BFLOAT16, OPS, QUALS, FLAGS, 0, 0, NULL }
#define INT8MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \
{ NAME, OPCODE, MASK, CLASS, 0, I8MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL }
#define INT8MATMUL_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, I8MM, OPS, QUALS, FLAGS, 0, 0, NULL }
#define F64MATMUL_SVE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS,TIED) \
{ NAME, OPCODE, MASK, CLASS, 0, F64MM_SVE, OPS, QUALS, FLAGS, 0, TIED, NULL }
#define F64MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \
{ NAME, OPCODE, MASK, CLASS, 0, F64MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL }
#define F32MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \
{ NAME, OPCODE, MASK, CLASS, 0, F32MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL }
#define V8_R_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_R, OPS, QUALS, FLAGS, 0, 0, NULL }
#define V8_7_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, ARMV8_7, OPS, QUALS, FLAGS, 0, 0, NULL }
#define _LS64_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, LS64, OPS, QUALS, FLAGS, 0, 0, NULL }
#define FLAGM_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, 0, FLAGM, OPS, QUALS, FLAGS, 0, 0, NULL }
const struct aarch64_opcode aarch64_opcode_table[] =
{
/* Add/subtract (with carry). */
CORE_INSN ("adc", 0x1a000000, 0x7fe0fc00, addsub_carry, 0, OP3 (Rd, Rn, Rm), QL_I3SAMER, F_SF),
CORE_INSN ("adcs", 0x3a000000, 0x7fe0fc00, addsub_carry, 0, OP3 (Rd, Rn, Rm), QL_I3SAMER, F_SF),
CORE_INSN ("sbc", 0x5a000000, 0x7fe0fc00, addsub_carry, 0, OP3 (Rd, Rn, Rm), QL_I3SAMER, F_HAS_ALIAS | F_SF),
CORE_INSN ("ngc", 0x5a0003e0, 0x7fe0ffe0, addsub_carry, 0, OP2 (Rd, Rm), QL_I2SAME, F_ALIAS | F_SF),
CORE_INSN ("sbcs", 0x7a000000, 0x7fe0fc00, addsub_carry, 0, OP3 (Rd, Rn, Rm), QL_I3SAMER, F_HAS_ALIAS | F_SF),
CORE_INSN ("ngcs", 0x7a0003e0, 0x7fe0ffe0, addsub_carry, 0, OP2 (Rd, Rm), QL_I2SAME, F_ALIAS | F_SF),
/* Add/subtract (extended register). */
CORE_INSN ("add", 0x0b200000, 0x7fe00000, addsub_ext, 0, OP3 (Rd_SP, Rn_SP, Rm_EXT), QL_I3_EXT, F_SF),
CORE_INSN ("adds", 0x2b200000, 0x7fe00000, addsub_ext, 0, OP3 (Rd, Rn_SP, Rm_EXT), QL_I3_EXT, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmn", 0x2b20001f, 0x7fe0001f, addsub_ext, 0, OP2 (Rn_SP, Rm_EXT), QL_I2_EXT, F_ALIAS | F_SF),
CORE_INSN ("sub", 0x4b200000, 0x7fe00000, addsub_ext, 0, OP3 (Rd_SP, Rn_SP, Rm_EXT), QL_I3_EXT, F_SF),
CORE_INSN ("subs", 0x6b200000, 0x7fe00000, addsub_ext, 0, OP3 (Rd, Rn_SP, Rm_EXT), QL_I3_EXT, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmp", 0x6b20001f, 0x7fe0001f, addsub_ext, 0, OP2 (Rn_SP, Rm_EXT), QL_I2_EXT, F_ALIAS | F_SF),
/* Add/subtract (immediate). */
CORE_INSN ("add", 0x11000000, 0x7f000000, addsub_imm, OP_ADD, OP3 (Rd_SP, Rn_SP, AIMM), QL_R2NIL, F_HAS_ALIAS | F_SF),
CORE_INSN ("mov", 0x11000000, 0x7ffffc00, addsub_imm, 0, OP2 (Rd_SP, Rn_SP), QL_I2SP, F_ALIAS | F_SF),
CORE_INSN ("adds", 0x31000000, 0x7f000000, addsub_imm, 0, OP3 (Rd, Rn_SP, AIMM), QL_R2NIL, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmn", 0x3100001f, 0x7f00001f, addsub_imm, 0, OP2 (Rn_SP, AIMM), QL_R1NIL, F_ALIAS | F_SF),
CORE_INSN ("sub", 0x51000000, 0x7f000000, addsub_imm, 0, OP3 (Rd_SP, Rn_SP, AIMM), QL_R2NIL, F_SF),
CORE_INSN ("subs", 0x71000000, 0x7f000000, addsub_imm, 0, OP3 (Rd, Rn_SP, AIMM), QL_R2NIL, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmp", 0x7100001f, 0x7f00001f, addsub_imm, 0, OP2 (Rn_SP, AIMM), QL_R1NIL, F_ALIAS | F_SF),
MEMTAG_INSN ("addg", 0x91800000, 0xffc0c000, addsub_imm, OP4 (Rd_SP, Rn_SP, UIMM10, UIMM4_ADDG), QL_ADDG, 0),
MEMTAG_INSN ("subg", 0xd1800000, 0xffc0c000, addsub_imm, OP4 (Rd_SP, Rn_SP, UIMM10, UIMM4_ADDG), QL_ADDG, 0),
/* Add/subtract (shifted register). */
CORE_INSN ("add", 0x0b000000, 0x7f200000, addsub_shift, 0, OP3 (Rd, Rn, Rm_SFT), QL_I3SAMER, F_SF),
CORE_INSN ("adds", 0x2b000000, 0x7f200000, addsub_shift, 0, OP3 (Rd, Rn, Rm_SFT), QL_I3SAMER, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmn", 0x2b00001f, 0x7f20001f, addsub_shift, 0, OP2 (Rn, Rm_SFT), QL_I2SAME, F_ALIAS | F_SF),
CORE_INSN ("sub", 0x4b000000, 0x7f200000, addsub_shift, 0, OP3 (Rd, Rn, Rm_SFT), QL_I3SAMER, F_HAS_ALIAS | F_SF),
CORE_INSN ("neg", 0x4b0003e0, 0x7f2003e0, addsub_shift, 0, OP2 (Rd, Rm_SFT), QL_I2SAME, F_ALIAS | F_SF),
CORE_INSN ("subs", 0x6b000000, 0x7f200000, addsub_shift, 0, OP3 (Rd, Rn, Rm_SFT), QL_I3SAMER, F_HAS_ALIAS | F_SF),
CORE_INSN ("cmp", 0x6b00001f, 0x7f20001f, addsub_shift, 0, OP2 (Rn, Rm_SFT), QL_I2SAME, F_ALIAS | F_SF | F_P1),
CORE_INSN ("negs", 0x6b0003e0, 0x7f2003e0, addsub_shift, 0, OP2 (Rd, Rm_SFT), QL_I2SAME, F_ALIAS | F_SF),
/* AdvSIMD across lanes. */
SIMD_INSN ("saddlv", 0x0e303800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_L, F_SIZEQ),
SIMD_INSN ("smaxv", 0x0e30a800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES, F_SIZEQ),
SIMD_INSN ("sminv", 0x0e31a800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES, F_SIZEQ),
SIMD_INSN ("addv", 0x0e31b800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES, F_SIZEQ),
SIMD_INSN ("uaddlv", 0x2e303800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_L, F_SIZEQ),
SIMD_INSN ("umaxv", 0x2e30a800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES, F_SIZEQ),
SIMD_INSN ("uminv", 0x2e31a800, 0xbf3ffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES, F_SIZEQ),
SIMD_INSN ("fmaxnmv",0x2e30c800, 0xbfbffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_FP, F_SIZEQ),
SF16_INSN ("fmaxnmv",0x0e30c800, 0xbffffc00, asimdall, OP2 (Fd, Vn), QL_XLANES_FP_H, F_SIZEQ),
SIMD_INSN ("fmaxv", 0x2e30f800, 0xbfbffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_FP, F_SIZEQ),
SF16_INSN ("fmaxv", 0x0e30f800, 0xbffffc00, asimdall, OP2 (Fd, Vn), QL_XLANES_FP_H, F_SIZEQ),
SIMD_INSN ("fminnmv",0x2eb0c800, 0xbfbffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_FP, F_SIZEQ),
SF16_INSN ("fminnmv",0x0eb0c800, 0xbffffc00, asimdall, OP2 (Fd, Vn), QL_XLANES_FP_H, F_SIZEQ),
SIMD_INSN ("fminv", 0x2eb0f800, 0xbfbffc00, asimdall, 0, OP2 (Fd, Vn), QL_XLANES_FP, F_SIZEQ),
SF16_INSN ("fminv", 0x0eb0f800, 0xbffffc00, asimdall, OP2 (Fd, Vn), QL_XLANES_FP_H, F_SIZEQ),
/* AdvSIMD three different. */
SIMD_INSN ("saddl", 0x0e200000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("saddl2", 0x4e200000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("saddw", 0x0e201000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3WIDEBHS, F_SIZEQ),
SIMD_INSN ("saddw2", 0x4e201000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3WIDEBHS2, F_SIZEQ),
SIMD_INSN ("ssubl", 0x0e202000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("ssubl2", 0x4e202000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("ssubw", 0x0e203000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3WIDEBHS, F_SIZEQ),
SIMD_INSN ("ssubw2", 0x4e203000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3WIDEBHS2, F_SIZEQ),
SIMD_INSN ("addhn", 0x0e204000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3NARRBHS, F_SIZEQ),
SIMD_INSN ("addhn2", 0x4e204000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3NARRBHS2, F_SIZEQ),
SIMD_INSN ("sabal", 0x0e205000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("sabal2", 0x4e205000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("subhn", 0x0e206000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3NARRBHS, F_SIZEQ),
SIMD_INSN ("subhn2", 0x4e206000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3NARRBHS2, F_SIZEQ),
SIMD_INSN ("sabdl", 0x0e207000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("sabdl2", 0x4e207000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("smlal", 0x0e208000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("smlal2", 0x4e208000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("sqdmlal", 0x0e209000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS, F_SIZEQ),
SIMD_INSN ("sqdmlal2",0x4e209000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS2, F_SIZEQ),
SIMD_INSN ("smlsl", 0x0e20a000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("smlsl2", 0x4e20a000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("sqdmlsl", 0x0e20b000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS, F_SIZEQ),
SIMD_INSN ("sqdmlsl2",0x4e20b000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS2, F_SIZEQ),
SIMD_INSN ("smull", 0x0e20c000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS, F_SIZEQ),
SIMD_INSN ("smull2", 0x4e20c000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGBHS2, F_SIZEQ),
SIMD_INSN ("sqdmull", 0x0e20d000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS, F_SIZEQ),
SIMD_INSN ("sqdmull2",0x4e20d000, 0xff20fc00, asimddiff, 0, OP3 (Vd, Vn, Vm), QL_V3LONGHS2, F_SIZEQ),