| /* aarch64-tbl.h -- AArch64 opcode description table and instruction |
| operand description table. |
| Copyright (C) 2012-2024 Free Software Foundation, Inc. |
| |
| This file is part of the GNU opcodes library. |
| |
| This library is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| It is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
| License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this file; see the file COPYING. If not, write to the |
| Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, |
| MA 02110-1301, USA. */ |
| |
| #include "aarch64-opc.h" |
| |
| #ifndef VERIFIER |
| #error VERIFIER must be defined. |
| #endif |
| |
| /* Operand type. */ |
| |
| #define OPND(x) AARCH64_OPND_##x |
| #define OP0() {} |
| #define OP1(a) {OPND(a)} |
| #define OP2(a,b) {OPND(a), OPND(b)} |
| #define OP3(a,b,c) {OPND(a), OPND(b), OPND(c)} |
| #define OP4(a,b,c,d) {OPND(a), OPND(b), OPND(c), OPND(d)} |
| #define OP5(a,b,c,d,e) {OPND(a), OPND(b), OPND(c), OPND(d), OPND(e)} |
| #define OP6(a,b,c,d,e,f) {OPND(a), OPND(b), OPND(c), OPND(d), OPND(e), OPND(f)} |
| |
| #define QLF(x) AARCH64_OPND_QLF_##x |
| #define QLF1(a) {QLF(a)} |
| #define QLF2(a,b) {QLF(a), QLF(b)} |
| #define QLF3(a,b,c) {QLF(a), QLF(b), QLF(c)} |
| #define QLF4(a,b,c,d) {QLF(a), QLF(b), QLF(c), QLF(d)} |
| #define QLF5(a,b,c,d,e) {QLF(a), QLF(b), QLF(c), QLF(d), QLF(e)} |
| #define QLF6(a,b,c,d,e,f) {QLF(a), QLF(b), QLF(c), QLF(d), QLF(e), QLF(f)} |
| |
| /* Qualifiers list. */ |
| |
| /* e.g. MSR <systemreg>, <Xt>. */ |
| #define QL_SRC_X \ |
| { \ |
| QLF2(NIL,X), \ |
| } |
| |
| /* e.g. MSRR <systemreg>, <Xt>, <Xt2>. */ |
| #define QL_SRC_X2 \ |
| { \ |
| QLF3(NIL,X,X), \ |
| } |
| |
| /* e.g. MRS <Xt>, <systemreg>. */ |
| #define QL_DST_X \ |
| { \ |
| QLF2(X,NIL), \ |
| } |
| |
| /* e.g. MRRS <Xt>, <Xt2>, <systemreg>. */ |
| #define QL_DST_X2 \ |
| { \ |
| QLF3(X,X,NIL), \ |
| } |
| |
| /* e.g. SYS #<op1>, <Cn>, <Cm>, #<op2>{, <Xt>}. */ |
| #define QL_SYS \ |
| { \ |
| QLF5(NIL,CR,CR,NIL,X), \ |
| } |
| |
| /* e.g. SYSL <Xt>, #<op1>, <Cn>, <Cm>, #<op2>. */ |
| #define QL_SYSL \ |
| { \ |
| QLF5(X,NIL,CR,CR,NIL), \ |
| } |
| |
| /* e.g. SYSP #<op1>, <Cn>, <Cm>, #<op2>{, <Xt>, <Xt+1>}. */ |
| #define QL_SYSP \ |
| { \ |
| QLF6(NIL,CR,CR,NIL,X,X), \ |
| } |
| |
| /* e.g. ADRP <Xd>, <label>. */ |
| #define QL_ADRP \ |
| { \ |
| QLF2(X,NIL), \ |
| } |
| |
| /* e.g. TCANCEL #<imm>. */ |
| #define QL_IMM_NIL \ |
| { \ |
| QLF1(NIL), \ |
| } |
| |
| #define QL_IMM_NIL_NIL \ |
| { \ |
| QLF2(NIL, NIL), \ |
| } |
| |
| /* e.g. B.<cond> <label>. */ |
| #define QL_PCREL_NIL \ |
| { \ |
| QLF1(NIL), \ |
| } |
| |
| /* e.g. TBZ <Xt>, #<imm>, <label>. */ |
| #define QL_PCREL_14 \ |
| { \ |
| QLF3(X,imm_0_63,NIL), \ |
| } |
| |
| /* e.g. BL <label>. */ |
| #define QL_PCREL_26 \ |
| { \ |
| QLF1(NIL), \ |
| } |
| |
| /* e.g. LDRSW <Xt>, <label>. */ |
| #define QL_X_PCREL \ |
| { \ |
| QLF2(X,NIL), \ |
| } |
| |
| /* e.g. LDR <Wt>, <label>. */ |
| #define QL_R_PCREL \ |
| { \ |
| QLF2(W,NIL), \ |
| QLF2(X,NIL), \ |
| } |
| |
| /* e.g. LDR <Dt>, <label>. */ |
| #define QL_FP_PCREL \ |
| { \ |
| QLF2(S_S,NIL), \ |
| QLF2(S_D,NIL), \ |
| QLF2(S_Q,NIL), \ |
| } |
| |
| /* e.g. PRFM <prfop>, <label>. */ |
| #define QL_PRFM_PCREL \ |
| { \ |
| QLF2(NIL,NIL), \ |
| } |
| |
| /* e.g. BR <Xn>. */ |
| #define QL_I1X \ |
| { \ |
| QLF1(X), \ |
| } |
| |
| /* e.g. STG <Xt|SP>, [<Xn|SP>, #<imm9>]. */ |
| #define QL_LDST_AT \ |
| { \ |
| QLF2(X, imm_tag), \ |
| QLF2(SP, imm_tag), \ |
| } |
| |
| /* e.g. RBIT <Wd>, <Wn>. */ |
| #define QL_I2SAME \ |
| { \ |
| QLF2(W,W), \ |
| QLF2(X,X), \ |
| } |
| |
| /* e.g. CMN <Wn|WSP>, <Wm>{, <extend> {#<amount>}}. */ |
| #define QL_I2_EXT \ |
| { \ |
| QLF2(W,W), \ |
| QLF2(X,W), \ |
| QLF2(X,X), \ |
| } |
| |
| /* e.g. MOV <Wd|WSP>, <Wn|WSP>, at least one SP. */ |
| #define QL_I2SP \ |
| { \ |
| QLF2(WSP,W), \ |
| QLF2(W,WSP), \ |
| QLF2(SP,X), \ |
| QLF2(X,SP), \ |
| } |
| |
| /* e.g. REV <Wd>, <Wn>. */ |
| #define QL_I2SAMEW \ |
| { \ |
| QLF2(W,W), \ |
| } |
| |
| /* e.g. REV32 <Xd>, <Xn>. */ |
| #define QL_I2SAMEX \ |
| { \ |
| QLF2(X,X), \ |
| } |
| |
| #define QL_I2SAMER \ |
| { \ |
| QLF2(W,W), \ |
| QLF2(X,X), \ |
| } |
| |
| /* e.g. CRC32B <Wd>, <Wn>, <Wm>. */ |
| #define QL_I3SAMEW \ |
| { \ |
| QLF3(W,W,W), \ |
| } |
| |
| /* e.g. SMULH <Xd>, <Xn>, <Xm>. */ |
| #define QL_I3SAMEX \ |
| { \ |
| QLF3(X,X,X), \ |
| } |
| |
| /* e.g. CRC32X <Wd>, <Wn>, <Xm>. */ |
| #define QL_I3WWX \ |
| { \ |
| QLF3(W,W,X), \ |
| } |
| |
| /* e.g. UDIV <Xd>, <Xn>, <Xm>. */ |
| #define QL_I3SAMER \ |
| { \ |
| QLF3(W,W,W), \ |
| QLF3(X,X,X), \ |
| } |
| |
| /* e.g. ADDS <Xd>, <Xn|SP>, <R><m>{, <extend> {#<amount>}}. */ |
| #define QL_I3_EXT \ |
| { \ |
| QLF3(W,W,W), \ |
| QLF3(X,X,W), \ |
| QLF3(X,X,X), \ |
| } |
| |
| /* e.g. MADD <Xd>, <Xn>, <Xm>, <Xa>. */ |
| #define QL_I4SAMER \ |
| { \ |
| QLF4(W,W,W,W), \ |
| QLF4(X,X,X,X), \ |
| } |
| |
| /* e.g. MADDPT <Xd>, <Xn>, <Xm>, <Xa>. */ |
| #define QL_I4SAMEX \ |
| { \ |
| QLF4(X,X,X,X), \ |
| } |
| |
| /* e.g. SMADDL <Xd>, <Wn>, <Wm>, <Xa>. */ |
| #define QL_I3SAMEL \ |
| { \ |
| QLF3(X,W,W), \ |
| } |
| |
| /* e.g. SMADDL <Xd>, <Wn>, <Wm>, <Xa>. */ |
| #define QL_I4SAMEL \ |
| { \ |
| QLF4(X,W,W,X), \ |
| } |
| |
| /* e.g. CSINC <Xd>, <Xn>, <Xm>, <cond>. */ |
| #define QL_CSEL \ |
| { \ |
| QLF4(W, W, W, NIL), \ |
| QLF4(X, X, X, NIL), \ |
| } |
| |
| /* e.g. CSET <Wd>, <cond>. */ |
| #define QL_DST_R \ |
| { \ |
| QLF2(W, NIL), \ |
| QLF2(X, NIL), \ |
| } |
| |
| /* e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>. */ |
| #define QL_BF \ |
| { \ |
| QLF4(W,W,imm_0_31,imm_0_31), \ |
| QLF4(X,X,imm_0_63,imm_0_63), \ |
| } |
| |
| /* e.g. ADDG <Xd>, <Xn>, #<uimm10>, #<uimm4>. */ |
| #define QL_ADDG \ |
| { \ |
| QLF4(X,X,NIL,imm_0_15), \ |
| } \ |
| |
| /* e.g. BFC <Wd>, #<immr>, #<imms>. */ |
| #define QL_BF1 \ |
| { \ |
| QLF3 (W, imm_0_31, imm_1_32), \ |
| QLF3 (X, imm_0_63, imm_1_64), \ |
| } |
| |
| /* e.g. UBFIZ <Wd>, <Wn>, #<lsb>, #<width>. */ |
| #define QL_BF2 \ |
| { \ |
| QLF4(W,W,imm_0_31,imm_1_32), \ |
| QLF4(X,X,imm_0_63,imm_1_64), \ |
| } |
| |
| /* e.g. SCVTF <Sd>, <Xn>, #<fbits>. */ |
| #define QL_FIX2FP \ |
| { \ |
| QLF3(S_D,W,imm_1_32), \ |
| QLF3(S_S,W,imm_1_32), \ |
| QLF3(S_D,X,imm_1_64), \ |
| QLF3(S_S,X,imm_1_64), \ |
| } |
| |
| /* e.g. SCVTF <Hd>, <Xn>, #<fbits>. */ |
| #define QL_FIX2FP_H \ |
| { \ |
| QLF3 (S_H, W, imm_1_32), \ |
| QLF3 (S_H, X, imm_1_64), \ |
| } |
| |
| /* e.g. FCVTZS <Wd>, <Dn>, #<fbits>. */ |
| #define QL_FP2FIX \ |
| { \ |
| QLF3(W,S_D,imm_1_32), \ |
| QLF3(W,S_S,imm_1_32), \ |
| QLF3(X,S_D,imm_1_64), \ |
| QLF3(X,S_S,imm_1_64), \ |
| } |
| |
| /* e.g. FCVTZS <Wd>, <Hn>, #<fbits>. */ |
| #define QL_FP2FIX_H \ |
| { \ |
| QLF3 (W, S_H, imm_1_32), \ |
| QLF3 (X, S_H, imm_1_64), \ |
| } |
| |
| /* e.g. SCVTF <Dd>, <Wn>. */ |
| #define QL_INT2FP \ |
| { \ |
| QLF2(S_D,W), \ |
| QLF2(S_S,W), \ |
| QLF2(S_D,X), \ |
| QLF2(S_S,X), \ |
| } |
| |
| /* e.g. FMOV <Dd>, <Xn>. */ |
| #define QL_INT2FP_FMOV \ |
| { \ |
| QLF2(S_S,W), \ |
| QLF2(S_D,X), \ |
| } |
| |
| /* e.g. SCVTF <Hd>, <Wn>. */ |
| #define QL_INT2FP_H \ |
| { \ |
| QLF2 (S_H, W), \ |
| QLF2 (S_H, X), \ |
| } |
| |
| /* e.g. FCVTNS <Xd>, <Dn>. */ |
| #define QL_FP2INT \ |
| { \ |
| QLF2(W,S_D), \ |
| QLF2(W,S_S), \ |
| QLF2(X,S_D), \ |
| QLF2(X,S_S), \ |
| } |
| |
| /* e.g. FMOV <Xd>, <Dn>. */ |
| #define QL_FP2INT_FMOV \ |
| { \ |
| QLF2(W,S_S), \ |
| QLF2(X,S_D), \ |
| } |
| |
| /* e.g. FCVTNS <Hd>, <Wn>. */ |
| #define QL_FP2INT_H \ |
| { \ |
| QLF2 (W, S_H), \ |
| QLF2 (X, S_H), \ |
| } |
| |
| /* e.g. FJCVTZS <Wd>, <Dn>. */ |
| #define QL_FP2INT_W_D \ |
| { \ |
| QLF2 (W, S_D), \ |
| } |
| |
| /* e.g. FMOV <Xd>, <Vn>.D[1]. */ |
| #define QL_XVD1 \ |
| { \ |
| QLF2(X,S_D), \ |
| } |
| |
| /* e.g. FMOV <Vd>.D[1], <Xn>. */ |
| #define QL_VD1X \ |
| { \ |
| QLF2(S_D,X), \ |
| } |
| |
| /* e.g. EXTR <Xd>, <Xn>, <Xm>, #<lsb>. */ |
| #define QL_EXTR \ |
| { \ |
| QLF4(W,W,W,imm_0_31), \ |
| QLF4(X,X,X,imm_0_63), \ |
| } |
| |
| /* e.g. LSL <Wd>, <Wn>, #<uimm>. */ |
| #define QL_SHIFT \ |
| { \ |
| QLF3(W,W,imm_0_31), \ |
| QLF3(X,X,imm_0_63), \ |
| } |
| |
| /* e.g. UXTH <Xd>, <Wn>. */ |
| #define QL_EXT \ |
| { \ |
| QLF2(W,W), \ |
| QLF2(X,W), \ |
| } |
| |
| /* e.g. UXTW <Xd>, <Wn>. */ |
| #define QL_EXT_W \ |
| { \ |
| QLF2(X,W), \ |
| } |
| |
| /* e.g. SQSHL <V><d>, <V><n>, #<shift>. */ |
| #define QL_SSHIFT \ |
| { \ |
| QLF3(S_B , S_B , S_B ), \ |
| QLF3(S_H , S_H , S_H ), \ |
| QLF3(S_S , S_S , S_S ), \ |
| QLF3(S_D , S_D , S_D ) \ |
| } |
| |
| /* e.g. SSHR <V><d>, <V><n>, #<shift>. */ |
| #define QL_SSHIFT_D \ |
| { \ |
| QLF3(S_D , S_D , S_D ) \ |
| } |
| |
| /* e.g. UCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */ |
| #define QL_SSHIFT_SD \ |
| { \ |
| QLF3(S_S , S_S , S_S ), \ |
| QLF3(S_D , S_D , S_D ) \ |
| } |
| |
| /* e.g. UCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */ |
| #define QL_SSHIFT_H \ |
| { \ |
| QLF3 (S_H, S_H, S_H) \ |
| } |
| |
| /* e.g. SQSHRUN <Vb><d>, <Va><n>, #<shift>. */ |
| #define QL_SSHIFTN \ |
| { \ |
| QLF3(S_B , S_H , S_B ), \ |
| QLF3(S_H , S_S , S_H ), \ |
| QLF3(S_S , S_D , S_S ), \ |
| } |
| |
| /* e.g. SSHR <Vd>.<T>, <Vn>.<T>, #<shift>. |
| The register operand variant qualifiers are deliberately used for the |
| immediate operand to ease the operand encoding/decoding and qualifier |
| sequence matching. */ |
| #define QL_VSHIFT \ |
| { \ |
| QLF3(V_8B , V_8B , V_8B ), \ |
| QLF3(V_16B, V_16B, V_16B), \ |
| QLF3(V_4H , V_4H , V_4H ), \ |
| QLF3(V_8H , V_8H , V_8H ), \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| QLF3(V_2D , V_2D , V_2D ) \ |
| } |
| |
| /* e.g. SCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */ |
| #define QL_VSHIFT_SD \ |
| { \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| QLF3(V_2D , V_2D , V_2D ) \ |
| } |
| |
| /* e.g. SCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */ |
| #define QL_VSHIFT_H \ |
| { \ |
| QLF3 (V_4H, V_4H, V_4H), \ |
| QLF3 (V_8H, V_8H, V_8H) \ |
| } |
| |
| /* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */ |
| #define QL_VSHIFTN \ |
| { \ |
| QLF3(V_8B , V_8H , V_8B ), \ |
| QLF3(V_4H , V_4S , V_4H ), \ |
| QLF3(V_2S , V_2D , V_2S ), \ |
| } |
| |
| /* e.g. SHRN<Q> <Vd>.<Tb>, <Vn>.<Ta>, #<shift>. */ |
| #define QL_VSHIFTN2 \ |
| { \ |
| QLF3(V_16B, V_8H, V_16B), \ |
| QLF3(V_8H , V_4S , V_8H ), \ |
| QLF3(V_4S , V_2D , V_4S ), \ |
| } |
| |
| /* e.g. SSHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. |
| the 3rd qualifier is used to help the encoding. */ |
| #define QL_VSHIFTL \ |
| { \ |
| QLF3(V_8H , V_8B , V_8B ), \ |
| QLF3(V_4S , V_4H , V_4H ), \ |
| QLF3(V_2D , V_2S , V_2S ), \ |
| } |
| |
| /* e.g. SSHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */ |
| #define QL_VSHIFTL2 \ |
| { \ |
| QLF3(V_8H , V_16B, V_16B), \ |
| QLF3(V_4S , V_8H , V_8H ), \ |
| QLF3(V_2D , V_4S , V_4S ), \ |
| } |
| |
| /* e.g. TBL. */ |
| #define QL_TABLE \ |
| { \ |
| QLF3(V_8B , V_16B, V_8B ), \ |
| QLF3(V_16B, V_16B, V_16B), \ |
| } |
| |
| /* e.g. SHA1H. */ |
| #define QL_2SAMES \ |
| { \ |
| QLF2(S_S, S_S), \ |
| } |
| |
| /* e.g. ABS <V><d>, <V><n>. */ |
| #define QL_2SAMED \ |
| { \ |
| QLF2(S_D, S_D), \ |
| } |
| |
| /* e.g. CMGT <V><d>, <V><n>, #0. */ |
| #define QL_SISD_CMP_0 \ |
| { \ |
| QLF3(S_D, S_D, NIL), \ |
| } |
| |
| /* e.g. FCMEQ <V><d>, <V><n>, #0. */ |
| #define QL_SISD_FCMP_0 \ |
| { \ |
| QLF3(S_S, S_S, NIL), \ |
| QLF3(S_D, S_D, NIL), \ |
| } |
| |
| /* e.g. FCMEQ <V><d>, <V><n>, #0. */ |
| #define QL_SISD_FCMP_H_0 \ |
| { \ |
| QLF3 (S_H, S_H, NIL), \ |
| } |
| |
| /* e.g. FMAXNMP <V><d>, <Vn>.<T>. */ |
| #define QL_SISD_PAIR \ |
| { \ |
| QLF2(S_S, V_2S), \ |
| QLF2(S_D, V_2D), \ |
| } |
| |
| /* e.g. FMAXNMP <V><d>, <Vn>.<T>. */ |
| #define QL_SISD_PAIR_H \ |
| { \ |
| QLF2 (S_H, V_2H), \ |
| } |
| |
| /* e.g. ADDP <V><d>, <Vn>.<T>. */ |
| #define QL_SISD_PAIR_D \ |
| { \ |
| QLF2(S_D, V_2D), \ |
| } |
| |
| /* e.g. DUP <V><d>, <Vn>.<T>[<index>]. */ |
| #define QL_S_2SAME \ |
| { \ |
| QLF2(S_B, S_B), \ |
| QLF2(S_H, S_H), \ |
| QLF2(S_S, S_S), \ |
| QLF2(S_D, S_D), \ |
| } |
| |
| /* e.g. FCVTNS <V><d>, <V><n>. */ |
| #define QL_S_2SAMESD \ |
| { \ |
| QLF2(S_S, S_S), \ |
| QLF2(S_D, S_D), \ |
| } |
| |
| /* e.g. FCVTNS <V><d>, <V><n>. */ |
| #define QL_S_2SAMEH \ |
| { \ |
| QLF2 (S_H, S_H), \ |
| } |
| |
| /* e.g. SQXTN <Vb><d>, <Va><n>. */ |
| #define QL_SISD_NARROW \ |
| { \ |
| QLF2(S_B, S_H), \ |
| QLF2(S_H, S_S), \ |
| QLF2(S_S, S_D), \ |
| } |
| |
| /* e.g. FCVTXN <Vb><d>, <Va><n>. */ |
| #define QL_SISD_NARROW_S \ |
| { \ |
| QLF2(S_S, S_D), \ |
| } |
| |
| /* e.g. FCVT. */ |
| #define QL_FCVT \ |
| { \ |
| QLF2(S_S, S_H), \ |
| QLF2(S_S, S_D), \ |
| QLF2(S_D, S_H), \ |
| QLF2(S_D, S_S), \ |
| QLF2(S_H, S_S), \ |
| QLF2(S_H, S_D), \ |
| } |
| |
| /* FMOV <Dd>, <Dn>. */ |
| #define QL_FP2 \ |
| { \ |
| QLF2(S_S, S_S), \ |
| QLF2(S_D, S_D), \ |
| } |
| |
| /* FMOV <Hd>, <Hn>. */ |
| #define QL_FP2_H \ |
| { \ |
| QLF2 (S_H, S_H), \ |
| } |
| |
| /* e.g. SQADD <V><d>, <V><n>, <V><m>. */ |
| #define QL_S_3SAME \ |
| { \ |
| QLF3(S_B, S_B, S_B), \ |
| QLF3(S_H, S_H, S_H), \ |
| QLF3(S_S, S_S, S_S), \ |
| QLF3(S_D, S_D, S_D), \ |
| } |
| |
| /* e.g. CMGE <V><d>, <V><n>, <V><m>. */ |
| #define QL_S_3SAMED \ |
| { \ |
| QLF3(S_D, S_D, S_D), \ |
| } |
| |
| /* e.g. SQDMULH <V><d>, <V><n>, <V><m>. */ |
| #define QL_SISD_HS \ |
| { \ |
| QLF3(S_H, S_H, S_H), \ |
| QLF3(S_S, S_S, S_S), \ |
| } |
| |
| /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vb><m>. */ |
| #define QL_SISDL_HS \ |
| { \ |
| QLF3(S_S, S_H, S_H), \ |
| QLF3(S_D, S_S, S_S), \ |
| } |
| |
| /* FMUL <Sd>, <Sn>, <Sm>. */ |
| #define QL_FP3 \ |
| { \ |
| QLF3(S_S, S_S, S_S), \ |
| QLF3(S_D, S_D, S_D), \ |
| } |
| |
| /* FMUL <Hd>, <Hn>, <Hm>. */ |
| #define QL_FP3_H \ |
| { \ |
| QLF3 (S_H, S_H, S_H), \ |
| } |
| |
| /* FMADD <Dd>, <Dn>, <Dm>, <Da>. */ |
| #define QL_FP4 \ |
| { \ |
| QLF4(S_S, S_S, S_S, S_S), \ |
| QLF4(S_D, S_D, S_D, S_D), \ |
| } |
| |
| /* FMADD <Hd>, <Hn>, <Hm>, <Ha>. */ |
| #define QL_FP4_H \ |
| { \ |
| QLF4 (S_H, S_H, S_H, S_H), \ |
| } |
| |
| /* e.g. FCMP <Dn>, #0.0. */ |
| #define QL_DST_SD \ |
| { \ |
| QLF2(S_S, NIL), \ |
| QLF2(S_D, NIL), \ |
| } |
| |
| /* e.g. FCMP <Hn>, #0.0. */ |
| #define QL_DST_H \ |
| { \ |
| QLF2 (S_H, NIL), \ |
| } |
| |
| /* FCSEL <Sd>, <Sn>, <Sm>, <cond>. */ |
| #define QL_FP_COND \ |
| { \ |
| QLF4(S_S, S_S, S_S, NIL), \ |
| QLF4(S_D, S_D, S_D, NIL), \ |
| } |
| |
| /* FCSEL <Hd>, <Hn>, <Hm>, <cond>. */ |
| #define QL_FP_COND_H \ |
| { \ |
| QLF4 (S_H, S_H, S_H, NIL), \ |
| } |
| |
| /* e.g. CCMN <Xn>, <Xm>, #<nzcv>, <cond>. */ |
| #define QL_CCMP \ |
| { \ |
| QLF4(W, W, NIL, NIL), \ |
| QLF4(X, X, NIL, NIL), \ |
| } |
| |
| /* e.g. CCMN <Xn>, #<imm>, #<nzcv>, <cond>, */ |
| #define QL_CCMP_IMM \ |
| { \ |
| QLF4(W, NIL, NIL, NIL), \ |
| QLF4(X, NIL, NIL, NIL), \ |
| } |
| |
| /* e.g. FCCMP <Sn>, <Sm>, #<nzcv>, <cond>. */ |
| #define QL_FCCMP \ |
| { \ |
| QLF4(S_S, S_S, NIL, NIL), \ |
| QLF4(S_D, S_D, NIL, NIL), \ |
| } |
| |
| /* e.g. FCCMP <Sn>, <Sm>, #<nzcv>, <cond>. */ |
| #define QL_FCCMP_H \ |
| { \ |
| QLF4 (S_H, S_H, NIL, NIL), \ |
| } |
| |
| /* e.g. DUP <Vd>.<T>, <Vn>.<Ts>[<index>]. */ |
| #define QL_DUP_VX \ |
| { \ |
| QLF2(V_8B , S_B ), \ |
| QLF2(V_16B, S_B ), \ |
| QLF2(V_4H , S_H ), \ |
| QLF2(V_8H , S_H ), \ |
| QLF2(V_2S , S_S ), \ |
| QLF2(V_4S , S_S ), \ |
| QLF2(V_2D , S_D ), \ |
| } |
| |
| /* e.g. DUP <Vd>.<T>, <Wn>. */ |
| #define QL_DUP_VR \ |
| { \ |
| QLF2(V_8B , W ), \ |
| QLF2(V_16B, W ), \ |
| QLF2(V_4H , W ), \ |
| QLF2(V_8H , W ), \ |
| QLF2(V_2S , W ), \ |
| QLF2(V_4S , W ), \ |
| QLF2(V_2D , X ), \ |
| } |
| |
| /* e.g. INS <Vd>.<Ts>[<index>], <Wn>. */ |
| #define QL_INS_XR \ |
| { \ |
| QLF2(S_H , W ), \ |
| QLF2(S_S , W ), \ |
| QLF2(S_D , X ), \ |
| QLF2(S_B , W ), \ |
| } |
| |
| /* e.g. SMOV <Wd>, <Vn>.<Ts>[<index>]. */ |
| #define QL_SMOV \ |
| { \ |
| QLF2(W , S_H), \ |
| QLF2(X , S_H), \ |
| QLF2(X , S_S), \ |
| QLF2(W , S_B), \ |
| QLF2(X , S_B), \ |
| } |
| |
| /* e.g. UMOV <Wd>, <Vn>.<Ts>[<index>]. */ |
| #define QL_UMOV \ |
| { \ |
| QLF2(W , S_H), \ |
| QLF2(W , S_S), \ |
| QLF2(X , S_D), \ |
| QLF2(W , S_B), \ |
| } |
| |
| /* e.g. MOV <Wd>, <Vn>.<Ts>[<index>]. */ |
| #define QL_MOV \ |
| { \ |
| QLF2(W , S_S), \ |
| QLF2(X , S_D), \ |
| } |
| |
| /* e.g. SUQADD <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAME \ |
| { \ |
| QLF2(V_8B , V_8B ), \ |
| QLF2(V_16B, V_16B), \ |
| QLF2(V_4H , V_4H ), \ |
| QLF2(V_8H , V_8H ), \ |
| QLF2(V_2S , V_2S ), \ |
| QLF2(V_4S , V_4S ), \ |
| QLF2(V_2D , V_2D ), \ |
| } |
| |
| /* e.g. URSQRTE <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAMES \ |
| { \ |
| QLF2(V_2S , V_2S ), \ |
| QLF2(V_4S , V_4S ), \ |
| } |
| |
| /* e.g. REV32 <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAMEBH \ |
| { \ |
| QLF2(V_8B , V_8B ), \ |
| QLF2(V_16B, V_16B), \ |
| QLF2(V_4H , V_4H ), \ |
| QLF2(V_8H , V_8H ), \ |
| } |
| |
| /* e.g. FRINTN <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAMESD \ |
| { \ |
| QLF2(V_2S , V_2S ), \ |
| QLF2(V_4S , V_4S ), \ |
| QLF2(V_2D , V_2D ), \ |
| } |
| |
| /* e.g. REV64 <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAMEBHS \ |
| { \ |
| QLF2(V_8B , V_8B ), \ |
| QLF2(V_16B, V_16B), \ |
| QLF2(V_4H , V_4H ), \ |
| QLF2(V_8H , V_8H ), \ |
| QLF2(V_2S , V_2S ), \ |
| QLF2(V_4S , V_4S ), \ |
| } |
| |
| /* e.g. FCMGT <Vd>.<T>, <Vd>.<T>>, #0.0. */ |
| #define QL_V2SAMEH \ |
| { \ |
| QLF2 (V_4H, V_4H), \ |
| QLF2 (V_8H, V_8H), \ |
| } |
| |
| /* e.g. REV16 <Vd>.<T>, <Vn>.<T>. */ |
| #define QL_V2SAMEB \ |
| { \ |
| QLF2(V_8B , V_8B ), \ |
| QLF2(V_16B, V_16B), \ |
| } |
| |
| /* e.g. SADDLP <Vd>.<Ta>, <Vn>.<Tb>. */ |
| #define QL_V2PAIRWISELONGBHS \ |
| { \ |
| QLF2(V_4H , V_8B ), \ |
| QLF2(V_8H , V_16B), \ |
| QLF2(V_2S , V_4H ), \ |
| QLF2(V_4S , V_8H ), \ |
| QLF2(V_1D , V_2S ), \ |
| QLF2(V_2D , V_4S ), \ |
| } |
| |
| /* e.g. SHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */ |
| #define QL_V2LONGBHS \ |
| { \ |
| QLF2(V_8H , V_8B ), \ |
| QLF2(V_4S , V_4H ), \ |
| QLF2(V_2D , V_2S ), \ |
| } |
| |
| /* e.g. SHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #<shift>. */ |
| #define QL_V2LONGBHS2 \ |
| { \ |
| QLF2(V_8H , V_16B), \ |
| QLF2(V_4S , V_8H ), \ |
| QLF2(V_2D , V_4S ), \ |
| } |
| |
| /* */ |
| #define QL_V3SAME \ |
| { \ |
| QLF3(V_8B , V_8B , V_8B ), \ |
| QLF3(V_16B, V_16B, V_16B), \ |
| QLF3(V_4H , V_4H , V_4H ), \ |
| QLF3(V_8H , V_8H , V_8H ), \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| QLF3(V_2D , V_2D , V_2D ) \ |
| } |
| |
| /* e.g. SHADD. */ |
| #define QL_V3SAMEBHS \ |
| { \ |
| QLF3(V_8B , V_8B , V_8B ), \ |
| QLF3(V_16B, V_16B, V_16B), \ |
| QLF3(V_4H , V_4H , V_4H ), \ |
| QLF3(V_8H , V_8H , V_8H ), \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| } |
| |
| /* e.g. FCVTXN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRS \ |
| { \ |
| QLF2(V_2S , V_2D ), \ |
| } |
| |
| /* e.g. FCVTXN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRS2 \ |
| { \ |
| QLF2(V_4S , V_2D ), \ |
| } |
| |
| /* e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRHS \ |
| { \ |
| QLF2(V_4H , V_4S ), \ |
| QLF2(V_2S , V_2D ), \ |
| } |
| |
| /* e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRHS2 \ |
| { \ |
| QLF2(V_8H , V_4S ), \ |
| QLF2(V_4S , V_2D ), \ |
| } |
| |
| /* e.g. FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */ |
| #define QL_V2LONGHS \ |
| { \ |
| QLF2(V_4S , V_4H ), \ |
| QLF2(V_2D , V_2S ), \ |
| } |
| |
| /* e.g. FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */ |
| #define QL_V2LONGHS2 \ |
| { \ |
| QLF2(V_4S , V_8H ), \ |
| QLF2(V_2D , V_4S ), \ |
| } |
| |
| /* e.g. XTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRBHS \ |
| { \ |
| QLF2(V_8B , V_8H ), \ |
| QLF2(V_4H , V_4S ), \ |
| QLF2(V_2S , V_2D ), \ |
| } |
| |
| /* e.g. XTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ |
| #define QL_V2NARRBHS2 \ |
| { \ |
| QLF2(V_16B, V_8H ), \ |
| QLF2(V_8H , V_4S ), \ |
| QLF2(V_4S , V_2D ), \ |
| } |
| |
| /* e.g. ORR. */ |
| #define QL_V2SAMEB \ |
| { \ |
| QLF2(V_8B , V_8B ), \ |
| QLF2(V_16B, V_16B), \ |
| } |
| |
| /* e.g. AESE. */ |
| #define QL_V2SAME16B \ |
| { \ |
| QLF2(V_16B, V_16B), \ |
| } |
| |
| /* e.g. SHA1SU1. */ |
| #define QL_V2SAME4S \ |
| { \ |
| QLF2(V_4S, V_4S), \ |
| } |
| |
| /* e.g. SHA1SU0. */ |
| #define QL_V3SAME4S \ |
| { \ |
| QLF3(V_4S, V_4S, V_4S), \ |
| } |
| |
| /* e.g. SHADD. */ |
| #define QL_V3SAMEB \ |
| { \ |
| QLF3(V_8B , V_8B , V_8B ), \ |
| QLF3(V_16B, V_16B, V_16B), \ |
| } |
| |
| /* e.g. luti2 <Vd>.16B, { <Vn>.16B }, <Vm>[index]. */ |
| /* The third operand is an AdvSIMD vector with a bit index |
| and without a type qualifier and is checked separately |
| based on operand enum. */ |
| #define QL_VVUB \ |
| { \ |
| QLF3(V_16B , V_16B , NIL), \ |
| } |
| |
| /* e.g. luti2 <Vd>.8H, { <Vn>.8H }, <Vm>[index]. */ |
| /* The third operand is an AdvSIMD vector with a bit index |
| and without a type qualifier and is checked separately |
| based on operand enum. */ |
| #define QL_VVUH \ |
| { \ |
| QLF3(V_8H , V_8H , NIL), \ |
| } |
| |
| /* e.g. EXT <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<index>. */ |
| #define QL_VEXT \ |
| { \ |
| QLF4(V_8B , V_8B , V_8B , imm_0_7), \ |
| QLF4(V_16B, V_16B, V_16B, imm_0_15), \ |
| } |
| |
| /* e.g. . */ |
| #define QL_V3SAMEHS \ |
| { \ |
| QLF3(V_4H , V_4H , V_4H ), \ |
| QLF3(V_8H , V_8H , V_8H ), \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| } |
| |
| /* */ |
| #define QL_V3SAMESD \ |
| { \ |
| QLF3(V_2S , V_2S , V_2S ), \ |
| QLF3(V_4S , V_4S , V_4S ), \ |
| QLF3(V_2D , V_2D , V_2D ) \ |
| } |
| |
| /* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */ |
| #define QL_V3SAMEHSD_ROT \ |
| { \ |
| QLF4 (V_4H, V_4H, V_4H, NIL), \ |
| QLF4 (V_8H, V_8H, V_8H, NIL), \ |
| QLF4 (V_2S, V_2S, V_2S, NIL), \ |
| QLF4 (V_4S, V_4S, V_4S, NIL), \ |
| QLF4 (V_2D, V_2D, V_2D, NIL), \ |
| } |
| |
| /* e.g. FMAXNM <Vd>.<T>, <Vn>.<T>, <Vm>.<T>. */ |
| #define QL_V3SAMEH \ |
| { \ |
| QLF3 (V_4H , V_4H , V_4H ), \ |
| QLF3 (V_8H , V_8H , V_8H ), \ |
| } |
| |
| /* e.g. SQDMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */ |
| #define QL_V3LONGHS \ |
| { \ |
| QLF3(V_4S , V_4H , V_4H ), \ |
| QLF3(V_2D , V_2S , V_2S ), \ |
| } |
| |
| /* e.g. SQDMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */ |
| #define QL_V3LONGHS2 \ |
| { \ |
| QLF3(V_4S , V_8H , V_8H ), \ |
| QLF3(V_2D , V_4S , V_4S ), \ |
| } |
| |
| /* e.g. SADDL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */ |
| #define QL_V3LONGBHS \ |
| { \ |
| QLF3(V_8H , V_8B , V_8B ), \ |
| QLF3(V_4S , V_4H , V_4H ), \ |
| QLF3(V_2D , V_2S , V_2S ), \ |
| } |
| |
| /* e.g. SADDL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb>. */ |
| #define QL_V3LONGBHS2 \ |
| { \ |
| QLF3(V_8H , V_16B , V_16B ), \ |
| QLF3(V_4S , V_8H , V_8H ), \ |
| QLF3(V_2D , V_4S , V_4S ), \ |
| } |
| |
| /* e.g. SADDW<Q> <Vd>.<Ta>, <Vn>.<Ta>, <Vm>.<Tb>. */ |
| #define QL_V3WIDEBHS \ |
| { \ |
| QLF3(V_8H , V_8H , V_8B ), \ |
| QLF3(V_4S , V_4S , V_4H ), \ |
| QLF3(V_2D , V_2D , V_2S ), \ |
| } |
| |
| /* e.g. SADDW<Q> <Vd>.<Ta>, <Vn>.<Ta>, <Vm>.<Tb>. */ |
| #define QL_V3WIDEBHS2 \ |
| { \ |
| QLF3(V_8H , V_8H , V_16B ), \ |
| QLF3(V_4S , V_4S , V_8H ), \ |
| QLF3(V_2D , V_2D , V_4S ), \ |
| } |
| |
| /* e.g. ADDHN<Q> <Vd>.<Tb>, <Vn>.<Ta>, <Vm>.<Ta>. */ |
| #define QL_V3NARRBHS \ |
| { \ |
| QLF3(V_8B , V_8H , V_8H ), \ |
| QLF3(V_4H , V_4S , V_4S ), \ |
| QLF3(V_2S , V_2D , V_2D ), \ |
| } |
| |
| /* e.g. ADDHN<Q> <Vd>.<Tb>, <Vn>.<Ta>, <Vm>.<Ta>. */ |
| #define QL_V3NARRBHS2 \ |
| { \ |
| QLF3(V_16B , V_8H , V_8H ), \ |
| QLF3(V_8H , V_4S , V_4S ), \ |
| QLF3(V_4S , V_2D , V_2D ), \ |
| } |
| |
| /* e.g. PMULL. */ |
| #define QL_V3LONGB \ |
| { \ |
| QLF3(V_8H , V_8B , V_8B ), \ |
| } |
| |
| /* e.g. PMULL crypto. */ |
| #define QL_V3LONGD \ |
| { \ |
| QLF3(V_1Q , V_1D , V_1D ), \ |
| } |
| |
| /* e.g. PMULL2. */ |
| #define QL_V3LONGB2 \ |
| { \ |
| QLF3(V_8H , V_16B, V_16B), \ |
| } |
| |
| /* e.g. PMULL2 crypto. */ |
| #define QL_V3LONGD2 \ |
| { \ |
| QLF3(V_1Q , V_2D , V_2D ), \ |
| } |
| |
| /* e.g. SHA1C. */ |
| #define QL_SHAUPT \ |
| { \ |
| QLF3(S_Q, S_S, V_4S), \ |
| } |
| |
| /* e.g. SHA256H2. */ |
| #define QL_SHA256UPT \ |
| { \ |
| QLF3(S_Q, S_Q, V_4S), \ |
| } |
| |
| /* e.g. LDXRB <Wt>, [<Xn|SP>{,#0}]. */ |
| #define QL_W1_LDST_EXC \ |
| { \ |
| QLF2(W, NIL), \ |
| } |
| |
| /* e.g. LDXR <Xt>, [<Xn|SP>{,#0}]. */ |
| #define QL_R1NIL \ |
| { \ |
| QLF2(W, NIL), \ |
| QLF2(X, NIL), \ |
| } |
| |
| /* e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */ |
| #define QL_W2_LDST_EXC \ |
| { \ |
| QLF3(W, W, NIL), \ |
| } |
| |
| /* e.g. STXR <Ws>, <Xt>, [<Xn|SP>{,#0}]. */ |
| #define QL_R2_LDST_EXC \ |
| { \ |
| QLF3(W, W, NIL), \ |
| QLF3(W, X, NIL), \ |
| } |
| |
| /* e.g. ST64B <Xs>, <Xt>, [<Xn|SP>]. */ |
| #define QL_X2NIL \ |
| { \ |
| QLF3(X, X, NIL), \ |
| } |
| |
| /* e.g. LDRAA <Xt>, [<Xn|SP>{,#imm}]. */ |
| #define QL_X1NIL \ |
| { \ |
| QLF2(X, NIL), \ |
| } |
| |
| /* e.g. LDXP <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */ |
| #define QL_R2NIL \ |
| { \ |
| QLF3(W, W, NIL), \ |
| QLF3(X, X, NIL), \ |
| } |
| |
| /* e.g. CASP <Xt1>, <Xt1+1>, <Xt2>, <Xt2+1>, [<Xn|SP>{,#0}]. */ |
| #define QL_R4NIL \ |
| { \ |
| QLF5(W, W, W, W, NIL), \ |
| QLF5(X, X, X, X, NIL), \ |
| } |
| |
| /* e.g. RCWCASP <Xt1>, <Xt1+1>, <Xt2>, <Xt2+1>, [<Xn|SP>{,#0}]. */ |
| #define QL_X4NIL \ |
| { \ |
| QLF5(X, X, X, X, NIL), \ |
| } |
| |
| /* e.g. STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */ |
| #define QL_R3_LDST_EXC \ |
| { \ |
| QLF4(W, W, W, NIL), \ |
| QLF4(W, X, X, NIL), \ |
| } |
| |
| /* e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_FP \ |
| { \ |
| QLF2(S_B, S_B), \ |
| QLF2(S_H, S_H), \ |
| QLF2(S_S, S_S), \ |
| QLF2(S_D, S_D), \ |
| QLF2(S_Q, S_Q), \ |
| } |
| |
| /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_R \ |
| { \ |
| QLF2(W, S_S), \ |
| QLF2(X, S_D), \ |
| } |
| |
| /* e.g. STRB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_W8 \ |
| { \ |
| QLF2(W, S_B), \ |
| } |
| |
| /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_R8 \ |
| { \ |
| QLF2(W, S_B), \ |
| QLF2(X, S_B), \ |
| } |
| |
| /* e.g. STRH <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_W16 \ |
| { \ |
| QLF2(W, S_H), \ |
| } |
| |
| /* e.g. LDRSW <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_X32 \ |
| { \ |
| QLF2(X, S_S), \ |
| } |
| |
| /* e.g. LDRSH <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_R16 \ |
| { \ |
| QLF2(W, S_H), \ |
| QLF2(X, S_H), \ |
| } |
| |
| /* e.g. PRFM <prfop>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ |
| #define QL_LDST_PRFM \ |
| { \ |
| QLF2(NIL, S_D), \ |
| } |
| |
| /* e.g. LDG <Xt>, [<Xn|SP>{, #<simm>}]. */ |
| #define QL_LDG \ |
| { \ |
| QLF2(X, imm_tag), \ |
| } |
| |
| /* e.g. LDPSW <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]. */ |
| #define QL_LDST_PAIR_X32 \ |
| { \ |
| QLF3(X, X, S_S), \ |
| } |
| |
| /* e.g. STGP <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]. */ |
| #define QL_STGP \ |
| { \ |
| QLF3(X, X, imm_tag), \ |
| } |
| |
| /* e.g. STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!. */ |
| #define QL_LDST_PAIR_R \ |
| { \ |
| QLF3(W, W, S_S), \ |
| QLF3(X, X, S_D), \ |
| } |
| |
| /* e.g. STNP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */ |
| #define QL_LDST_PAIR_FP \ |
| { \ |
| QLF3(S_S, S_S, S_S), \ |
| QLF3(S_D, S_D, S_D), \ |
| QLF3(S_Q, S_Q, S_Q), \ |
| } |
| |
| /* e.g. LD3 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>]. */ |
| #define QL_SIMD_LDST \ |
| { \ |
| QLF2(V_8B, NIL), \ |
| QLF2(V_16B, NIL), \ |
| QLF2(V_4H, NIL), \ |
| QLF2(V_8H, NIL), \ |
| QLF2(V_2S, NIL), \ |
| QLF2(V_4S, NIL), \ |
| QLF2(V_2D, NIL), \ |
| } |
| |
| /* e.g. LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>]. */ |
| #define QL_SIMD_LDST_ANY \ |
| { \ |
| QLF2(V_8B, NIL), \ |
| QLF2(V_16B, NIL), \ |
| QLF2(V_4H, NIL), \ |
| QLF2(V_8H, NIL), \ |
| QLF2(V_2S, NIL), \ |
| QLF2(V_4S, NIL), \ |
| QLF2(V_1D, NIL), \ |
| QLF2(V_2D, NIL), \ |
| } |
| |
| /* e.g. LD4 {<Vt>.<T>, <Vt2a>.<T>, <Vt3a>.<T>, <Vt4a>.<T>}[<index>], [<Xn|SP>]. */ |
| #define QL_SIMD_LDSTONE \ |
| { \ |
| QLF2(S_B, NIL), \ |
| QLF2(S_H, NIL), \ |
| QLF2(S_S, NIL), \ |
| QLF2(S_D, NIL), \ |
| } |
| |
| /* e.g. ADDV <V><d>, <Vn>.<T>. */ |
| #define QL_XLANES \ |
| { \ |
| QLF2(S_B, V_8B), \ |
| QLF2(S_B, V_16B), \ |
| QLF2(S_H, V_4H), \ |
| QLF2(S_H, V_8H), \ |
| QLF2(S_S, V_4S), \ |
| } |
| |
| /* e.g. FMINV <V><d>, <Vn>.<T>. */ |
| #define QL_XLANES_FP \ |
| { \ |
| QLF2(S_S, V_4S), \ |
| } |
| |
| /* e.g. FMINV <V><d>, <Vn>.<T>. */ |
| #define QL_XLANES_FP_H \ |
| { \ |
| QLF2 (S_H, V_4H), \ |
| QLF2 (S_H, V_8H), \ |
| } |
| |
| /* e.g. SADDLV <V><d>, <Vn>.<T>. */ |
| #define QL_XLANES_L \ |
| { \ |
| QLF2(S_H, V_8B), \ |
| QLF2(S_H, V_16B), \ |
| QLF2(S_S, V_4H), \ |
| QLF2(S_S, V_8H), \ |
| QLF2(S_D, V_4S), \ |
| } |
| |
| /* e.g. MUL <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>]. */ |
| #define QL_ELEMENT \ |
| { \ |
| QLF3(V_4H, V_4H, S_H), \ |
| QLF3(V_8H, V_8H, S_H), \ |
| QLF3(V_2S, V_2S, S_S), \ |
| QLF3(V_4S, V_4S, S_S), \ |
| } |
| |
| /* e.g. SMLAL <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. */ |
| #define QL_ELEMENT_L \ |
| { \ |
| QLF3(V_4S, V_4H, S_H), \ |
| QLF3(V_2D, V_2S, S_S), \ |
| } |
| |
| /* e.g. SMLAL2 <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. */ |
| #define QL_ELEMENT_L2 \ |
| { \ |
| QLF3(V_4S, V_8H, S_H), \ |
| QLF3(V_2D, V_4S, S_S), \ |
| } |
| |
| /* e.g. FMLA <V><d>, <V><n>, <Vm>.<Ts>[<index>]. */ |
| #define QL_ELEMENT_FP \ |
| { \ |
| QLF3(V_2S, V_2S, S_S), \ |
| QLF3(V_4S, V_4S, S_S), \ |
| QLF3(V_2D, V_2D, S_D), \ |
| } |
| |
| /* e.g. FMLA <V><d>, <V><n>, <Vm>.<Ts>[<index>]. */ |
| #define QL_ELEMENT_FP_H \ |
| { \ |
| QLF3 (V_4H, V_4H, S_H), \ |
| QLF3 (V_8H, V_8H, S_H), \ |
| } |
| |
| /* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>], #<rotate>. */ |
| #define QL_ELEMENT_ROT \ |
| { \ |
| QLF4 (V_4H, V_4H, S_H, NIL), \ |
| QLF4 (V_8H, V_8H, S_H, NIL), \ |
| QLF4 (V_4S, V_4S, S_S, NIL), \ |
| } |
| |
| /* e.g. MOVI <Vd>.4S, #<imm8> {, LSL #<amount>}. */ |
| #define QL_SIMD_IMM_S0W \ |
| { \ |
| QLF2(V_2S, LSL), \ |
| QLF2(V_4S, LSL), \ |
| } |
| |
| /* e.g. MOVI <Vd>.4S, #<imm8>, MSL #<amount>. */ |
| #define QL_SIMD_IMM_S1W \ |
| { \ |
| QLF2(V_2S, MSL), \ |
| QLF2(V_4S, MSL), \ |
| } |
| |
| /* e.g. MOVI <Vd>.4H, #<imm8> {, LSL #<amount>}. */ |
| #define QL_SIMD_IMM_S0H \ |
| { \ |
| QLF2(V_4H, LSL), \ |
| QLF2(V_8H, LSL), \ |
| } |
| |
| /* e.g. FMOV <Vd>.<T>, #<imm>. */ |
| #define QL_SIMD_IMM_S \ |
| { \ |
| QLF2(V_2S, NIL), \ |
| QLF2(V_4S, NIL), \ |
| } |
| |
| /* e.g. MOVI <Vd>.8B, #<imm8> {, LSL #<amount>}. */ |
| #define QL_SIMD_IMM_B \ |
| { \ |
| QLF2(V_8B, LSL), \ |
| QLF2(V_16B, LSL), \ |
| } |
| /* e.g. MOVI <Dd>, #<imm>. */ |
| #define QL_SIMD_IMM_D \ |
| { \ |
| QLF2(S_D, NIL), \ |
| } |
| |
| /* e.g. FMOV <Vd>.<T>, #<imm>. */ |
| #define QL_SIMD_IMM_H \ |
| { \ |
| QLF2 (V_4H, NIL), \ |
| QLF2 (V_8H, NIL), \ |
| } |
| |
| /* e.g. MOVI <Vd>.2D, #<imm>. */ |
| #define QL_SIMD_IMM_V2D \ |
| { \ |
| QLF2(V_2D, NIL), \ |
| } |
| |
| /* The naming convention for SVE macros is: |
| |
| OP_SVE_<operands>[_<sizes>]* |
| |
| <operands> contains one character per operand, using the following scheme: |
| |
| - U: the operand is unqualified (NIL). |
| |
| - [BHSDQ]: the operand has a S_[BHSDQ] qualifier and the choice of |
| qualifier is the same for all variants. This is used for: |
| |
| - .[BHSDQ] suffixes on an SVE vector or predicate |
| - .[BHSDQ] suffixes on an SME ZA operand |
| - vector registers and scalar FPRs of the form [BHSDQ]<number> |
| |
| - [WX]: the operand has a [WX] qualifier and the choice of qualifier |
| is the same for all variants. |
| |
| - [ZM]: the operand has a /[ZM] suffix and the choice of suffix |
| is the same for all variants. |
| |
| - V: the operand has a S_[BHSD] qualifier and the choice of qualifier |
| is not the same for all variants. This is used for the same kinds |
| of operand as [BHSDQ] above. |
| |
| - R: the operand has a [WX] qualifier and the choice of qualifier is |
| not the same for all variants. |
| |
| - P: the operand has a /[ZM] suffix and the choice of suffix is not |
| the same for all variants. |
| |
| - v: the operand has a V_[16B|8H|4S|2D] qualifier and the choice of |
| qualifier suffix is not the same for all variants. This is used for |
| the same kinds of operands as [BHSD] above. |
| |
| The _<sizes>, if present, give the subset of [BHSD] that are accepted |
| by the V entries in <operands>. */ |
| #define OP_SVE_B \ |
| { \ |
| QLF1(S_B), \ |
| } |
| #define OP_SVE_BB \ |
| { \ |
| QLF2(S_B,S_B), \ |
| } |
| #define OP_SVE_HH \ |
| { \ |
| QLF2(S_H,S_H), \ |
| } |
| /* e.g. luti2 <Zd>.B, { <Zn>.B }, <Zm>[index]. */ |
| /* The third operand is an index (e.g. immediate or bit) |
| without a type qualifier and is checked separately |
| based on operand enum. */ |
| #define OP_SVE_BBU \ |
| { \ |
| QLF3(S_B,S_B,NIL), \ |
| } |
| /* e.g. luti2 <Zd>.H, { <Zn>.H }, <Zm>[index]. */ |
| /* The third operand is an index (e.g. immediate or bit) |
| without a type qualifier and is checked separately |
| based on operand enum. */ |
| #define OP_SVE_HHU \ |
| { \ |
| QLF3(S_H,S_H,NIL), \ |
| } |
| #define OP_SVE_BBB \ |
| { \ |
| QLF3(S_B,S_B,S_B), \ |
| } |
| #define OP_SVE_BBBU \ |
| { \ |
| QLF4(S_B,S_B,S_B,NIL), \ |
| } |
| #define OP_SVE_BMB \ |
| { \ |
| QLF3(S_B,P_M,S_B), \ |
| } |
| #define OP_SVE_BPB \ |
| { \ |
| QLF3(S_B,P_Z,S_B), \ |
| QLF3(S_B,P_M,S_B), \ |
| } |
| #define OP_SVE_BU \ |
| { \ |
| QLF2(S_B,NIL), \ |
| } |
| #define OP_SVE_BUB \ |
| { \ |
| QLF3(S_B,NIL,S_B), \ |
| } |
| #define OP_SVE_BUBB \ |
| { \ |
| QLF4(S_B,NIL,S_B,S_B), \ |
| } |
| #define OP_SVE_BUU \ |
| { \ |
| QLF3(S_B,NIL,NIL), \ |
| } |
| #define OP_SVE_BZ \ |
| { \ |
| QLF2(S_B,P_Z), \ |
| } |
| #define OP_SVE_BZB \ |
| { \ |
| QLF3(S_B,P_Z,S_B), \ |
| } |
| #define OP_SVE_NN_BHSD \ |
| { \ |
| QLF3(NIL,NIL,S_B), \ |
| QLF3(NIL,NIL,S_H), \ |
| QLF3(NIL,NIL,S_S), \ |
| QLF3(NIL,NIL,S_D) \ |
| } |
| #define OP_SVE_BZBB \ |
| { \ |
| QLF4(S_B,P_Z,S_B,S_B), \ |
| } |
| #define OP_SVE_BZU \ |
| { \ |
| QLF3(S_B,P_Z,NIL), \ |
| } |
| #define OP_SVE_D \ |
| { \ |
| QLF1(S_D), \ |
| } |
| #define OP_SVE_DD \ |
| { \ |
| QLF2(S_D,S_D), \ |
| } |
| #define OP_SVE_DDD \ |
| { \ |
| QLF3(S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_DHH \ |
| { \ |
| QLF3(S_D,S_H,S_H), \ |
| } |
| #define OP_SVE_DMMD \ |
| { \ |
| QLF4(S_D,P_M,P_M,S_D), \ |
| } |
| #define OP_SVE_DMMDD \ |
| { \ |
| QLF5(S_D,P_M,P_M,S_D,S_D) \ |
| } |
| #define OP_SVE_DMMHH \ |
| { \ |
| QLF5(S_D,P_M,P_M,S_H,S_H) \ |
| } |
| #define OP_SVE_DDDD \ |
| { \ |
| QLF4(S_D,S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_DMD \ |
| { \ |
| QLF3(S_D,P_M,S_D), \ |
| } |
| #define OP_SVE_DMH \ |
| { \ |
| QLF3(S_D,P_M,S_H), \ |
| } |
| #define OP_SVE_DMS \ |
| { \ |
| QLF3(S_D,P_M,S_S), \ |
| } |
| #define OP_SVE_DU \ |
| { \ |
| QLF2(S_D,NIL), \ |
| } |
| #define OP_SVE_DUD \ |
| { \ |
| QLF3(S_D,NIL,S_D), \ |
| } |
| #define OP_SVE_DUU \ |
| { \ |
| QLF3(S_D,NIL,NIL), \ |
| } |
| #define OP_SVE_DUV_BHS \ |
| { \ |
| QLF3(S_D,NIL,S_B), \ |
| QLF3(S_D,NIL,S_H), \ |
| QLF3(S_D,NIL,S_S), \ |
| } |
| #define OP_SVE_DUV_BHSD \ |
| { \ |
| QLF3(S_D,NIL,S_B), \ |
| QLF3(S_D,NIL,S_H), \ |
| QLF3(S_D,NIL,S_S), \ |
| QLF3(S_D,NIL,S_D), \ |
| } |
| #define OP_SVE_DZD \ |
| { \ |
| QLF3(S_D,P_Z,S_D), \ |
| } |
| #define OP_SVE_DZU \ |
| { \ |
| QLF3(S_D,P_Z,NIL), \ |
| } |
| #define OP_SVE_HB \ |
| { \ |
| QLF2(S_H,S_B), \ |
| } |
| #define OP_SVE_BH \ |
| { \ |
| QLF2(S_B,S_H), \ |
| } |
| #define OP_SVE_BS \ |
| { \ |
| QLF2(S_B,S_S), \ |
| } |
| #define OP_SVE_HHH \ |
| { \ |
| QLF3(S_H,S_H,S_H), \ |
| } |
| #define OP_SVE_HHHU \ |
| { \ |
| QLF4(S_H,S_H,S_H,NIL), \ |
| } |
| #define OP_SVE_HMH \ |
| { \ |
| QLF3(S_H,P_M,S_H), \ |
| } |
| #define OP_SVE_HMD \ |
| { \ |
| QLF3(S_H,P_M,S_D), \ |
| } |
| #define OP_SVE_HMMBB \ |
| { \ |
| QLF5(S_H,P_M,P_M,S_B,S_B) \ |
| } |
| #define OP_SVE_HMS \ |
| { \ |
| QLF3(S_H,P_M,S_S), \ |
| } |
| #define OP_SVE_HS \ |
| { \ |
| QLF2(S_H,S_S), \ |
| } |
| #define OP_SVE_HSU \ |
| { \ |
| QLF3(S_H,S_S,NIL), \ |
| } |
| #define OP_SVE_HU \ |
| { \ |
| QLF2(S_H,NIL), \ |
| } |
| #define OP_SVE_HUU \ |
| { \ |
| QLF3(S_H,NIL,NIL), \ |
| } |
| #define OP_SVE_HZU \ |
| { \ |
| QLF3(S_H,P_Z,NIL), \ |
| } |
| #define OP_SVE_QMQ \ |
| { \ |
| QLF3(S_Q,P_M,S_Q), \ |
| } |
| #define OP_SVE_QQ \ |
| { \ |
| QLF2(S_Q,S_Q), \ |
| } |
| #define OP_SVE_QQQ \ |
| { \ |
| QLF3(S_Q,S_Q,S_Q), \ |
| } |
| #define OP_SVE_QUU \ |
| { \ |
| QLF3(S_Q,NIL,NIL), \ |
| } |
| #define OP_SVE_QZU \ |
| { \ |
| QLF3(S_Q,P_Z,NIL), \ |
| } |
| #define OP_SVE_RR \ |
| { \ |
| QLF2(W,W), \ |
| QLF2(X,X), \ |
| } |
| #define OP_SVE_RURV_BHSD \ |
| { \ |
| QLF4(W,NIL,W,S_B), \ |
| QLF4(W,NIL,W,S_H), \ |
| QLF4(W,NIL,W,S_S), \ |
| QLF4(X,NIL,X,S_D), \ |
| } |
| #define OP_SVE_RUV_BHSD \ |
| { \ |
| QLF3(W,NIL,S_B), \ |
| QLF3(W,NIL,S_H), \ |
| QLF3(W,NIL,S_S), \ |
| QLF3(X,NIL,S_D), \ |
| } |
| #define OP_SVE_SMD \ |
| { \ |
| QLF3(S_S,P_M,S_D), \ |
| } |
| #define OP_SVE_SMMBB \ |
| { \ |
| QLF5(S_S,P_M,P_M,S_B,S_B) \ |
| } |
| #define OP_SVE_SMMHH \ |
| { \ |
| QLF5(S_S,P_M,P_M,S_H,S_H), \ |
| } |
| #define OP_SVE_SMMS \ |
| { \ |
| QLF4(S_S,P_M,P_M,S_S), \ |
| } |
| #define OP_SVE_SMMSS \ |
| { \ |
| QLF5(S_S,P_M,P_M,S_S,S_S) \ |
| } |
| #define OP_SVE_SSS \ |
| { \ |
| QLF3(S_S,S_S,S_S), \ |
| } |
| #define OP_SVE_SSSU \ |
| { \ |
| QLF4(S_S,S_S,S_S,NIL), \ |
| } |
| #define OP_SVE_SMH \ |
| { \ |
| QLF3(S_S,P_M,S_H), \ |
| } |
| #define OP_SVE_SHH \ |
| { \ |
| QLF3(S_S,S_H,S_H), \ |
| } |
| #define OP_SVE_SMS \ |
| { \ |
| QLF3(S_S,P_M,S_S), \ |
| } |
| #define OP_SVE_SS \ |
| { \ |
| QLF2(S_S,S_S), \ |
| } |
| #define OP_SVE_SU \ |
| { \ |
| QLF2(S_S,NIL), \ |
| } |
| /* e.g. movt ZT0{[<offs>, MUL VL]}, <Zt> */ |
| /* The second operand doesn't have a qualifier and |
| is checked separetely during encoding. */ |
| #define OP_SVE_SU_Q \ |
| { \ |
| QLF2(S_Q,NIL), \ |
| } |
| #define OP_SVE_SUS \ |
| { \ |
| QLF3(S_S,NIL,S_S), \ |
| } |
| #define OP_SVE_SMSS \ |
| { \ |
| QLF4(S_H,P_M,S_H,S_H), \ |
| } |
| #define OP_SVE_SUU \ |
| { \ |
| QLF3(S_S,NIL,NIL), \ |
| } |
| #define OP_SVE_SZS \ |
| { \ |
| QLF3(S_S,P_Z,S_S), \ |
| } |
| #define OP_SVE_QZD \ |
| { \ |
| QLF3(S_Q,P_Z,S_D), \ |
| } |
| #define OP_SVE_QUD \ |
| { \ |
| QLF3(S_Q,NIL,S_D), \ |
| } |
| #define OP_SVE_SBB \ |
| { \ |
| QLF3(S_S,S_B,S_B), \ |
| } |
| #define OP_SVE_SBBU \ |
| { \ |
| QLF4(S_S,S_B,S_B,NIL), \ |
| } |
| #define OP_SVE_DSS \ |
| { \ |
| QLF3(S_D,S_S,S_S), \ |
| } |
| #define OP_SVE_DHHU \ |
| { \ |
| QLF4(S_D,S_H,S_H,NIL), \ |
| } |
| #define OP_SVE_SZU \ |
| { \ |
| QLF3(S_S,P_Z,NIL), \ |
| } |
| #define OP_SVE_UB \ |
| { \ |
| QLF2(NIL,S_B), \ |
| } |
| #define OP_SVE_UD \ |
| { \ |
| QLF2(NIL,S_D), \ |
| } |
| #define OP_SVE_UH \ |
| { \ |
| QLF2(NIL,S_H), \ |
| } |
| #define OP_SVE_US \ |
| { \ |
| QLF2(NIL,S_S), \ |
| } |
| #define OP_SVE_UUD \ |
| { \ |
| QLF3(NIL,NIL,S_D), \ |
| } |
| #define OP_SVE_UUS \ |
| { \ |
| QLF3(NIL,NIL,S_S), \ |
| } |
| #define OP_SVE_UX \ |
| { \ |
| QLF2(NIL,X), \ |
| } |
| #define OP_SVE_UXU \ |
| { \ |
| QLF3(NIL,X,NIL), \ |
| } |
| #define OP_SVE_VMR_BHSD \ |
| { \ |
| QLF3(S_B,P_M,W), \ |
| QLF3(S_H,P_M,W), \ |
| QLF3(S_S,P_M,W), \ |
| QLF3(S_D,P_M,X), \ |
| } |
| #define OP_SVE_VMU_HSD \ |
| { \ |
| QLF3(S_H,P_M,NIL), \ |
| QLF3(S_S,P_M,NIL), \ |
| QLF3(S_D,P_M,NIL), \ |
| } |
| #define OP_SVE_VMVD_BHS \ |
| { \ |
| QLF4(S_B,P_M,S_B,S_D), \ |
| QLF4(S_H,P_M,S_H,S_D), \ |
| QLF4(S_S,P_M,S_S,S_D), \ |
| } |
| #define OP_SVE_VMVU_BHSD \ |
| { \ |
| QLF4(S_B,P_M,S_B,NIL), \ |
| QLF4(S_H,P_M,S_H,NIL), \ |
| QLF4(S_S,P_M,S_S,NIL), \ |
| QLF4(S_D,P_M,S_D,NIL), \ |
| } |
| #define OP_SVE_VMVU_HSD \ |
| { \ |
| QLF4(S_H,P_M,S_H,NIL), \ |
| QLF4(S_S,P_M,S_S,NIL), \ |
| QLF4(S_D,P_M,S_D,NIL), \ |
| } |
| #define OP_SVE_VMVV_BHSD \ |
| { \ |
| QLF4(S_B,P_M,S_B,S_B), \ |
| QLF4(S_H,P_M,S_H,S_H), \ |
| QLF4(S_S,P_M,S_S,S_S), \ |
| QLF4(S_D,P_M,S_D,S_D), \ |
| } |
| #define OP_SVE_VMVV_HSD \ |
| { \ |
| QLF4(S_H,P_M,S_H,S_H), \ |
| QLF4(S_S,P_M,S_S,S_S), \ |
| QLF4(S_D,P_M,S_D,S_D), \ |
| } |
| #define OP_SVE_VMVV_SD \ |
| { \ |
| QLF4(S_S,P_M,S_S,S_S), \ |
| QLF4(S_D,P_M,S_D,S_D), \ |
| } |
| #define OP_SVE_VMVV_D \ |
| { \ |
| QLF4(S_D,P_M,S_D,S_D), \ |
| } |
| #define OP_SVE_VMVVU_HSD \ |
| { \ |
| QLF5(S_H,P_M,S_H,S_H,NIL), \ |
| QLF5(S_S,P_M,S_S,S_S,NIL), \ |
| QLF5(S_D,P_M,S_D,S_D,NIL), \ |
| } |
| #define OP_SVE_VMV_BHSD \ |
| { \ |
| QLF3(S_B,P_M,S_B), \ |
| QLF3(S_H,P_M,S_H), \ |
| QLF3(S_S,P_M,S_S), \ |
| QLF3(S_D,P_M,S_D), \ |
| } |
| #define OP_SVE_VMV_BHSDQ \ |
| { \ |
| QLF3(S_B,P_M,S_B), \ |
| QLF3(S_H,P_M,S_H), \ |
| QLF3(S_S,P_M,S_S), \ |
| QLF3(S_D,P_M,S_D), \ |
| QLF3(S_Q,P_M,S_Q) \ |
| } |
| #define OP_SVE_VMV_HSD \ |
| { \ |
| QLF3(S_H,P_M,S_H), \ |
| QLF3(S_S,P_M,S_S), \ |
| QLF3(S_D,P_M,S_D), \ |
| } |
| #define OP_SVE_VMV_HSD_BHS \ |
| { \ |
| QLF3(S_H,P_M,S_B), \ |
| QLF3(S_S,P_M,S_H), \ |
| QLF3(S_D,P_M,S_S), \ |
| } |
| #define OP_SVE_VVU_BH_SD \ |
| { \ |
| QLF3(S_B,S_S,NIL), \ |
| QLF3(S_H,S_D,NIL), \ |
| } |
| #define OP_SVE_VVU_HSD_BHS \ |
| { \ |
| QLF3(S_H,S_B,NIL), \ |
| QLF3(S_S,S_H,NIL), \ |
| QLF3(S_D,S_S,NIL), \ |
| } |
| #define OP_SVE_vUS_BHSD_BHSD \ |
| { \ |
| QLF3(V_16B,NIL,S_B), \ |
| QLF3(V_8H,NIL,S_H), \ |
| QLF3(V_4S,NIL,S_S), \ |
| QLF3(V_2D,NIL,S_D), \ |
| } |
| #define OP_SVE_vUS_HSD_HSD \ |
| { \ |
| QLF3(V_8H,NIL,S_H), \ |
| QLF3(V_4S,NIL,S_S), \ |
| QLF3(V_2D,NIL,S_D), \ |
| } |
| #define OP_SVE_VMV_SD \ |
| { \ |
| QLF3(S_S,P_M,S_S), \ |
| QLF3(S_D,P_M,S_D), \ |
| } |
| #define OP_SVE_VM_HSD \ |
| { \ |
| QLF2(S_H,P_M), \ |
| QLF2(S_S,P_M), \ |
| QLF2(S_D,P_M), \ |
| } |
| #define OP_SVE_VPU_BHSD \ |
| { \ |
| QLF3(S_B,P_Z,NIL), \ |
| QLF3(S_B,P_M,NIL), \ |
| QLF3(S_H,P_Z,NIL), \ |
| QLF3(S_H,P_M,NIL), \ |
| QLF3(S_S,P_Z,NIL), \ |
| QLF3(S_S,P_M,NIL), \ |
| QLF3(S_D,P_Z,NIL), \ |
| QLF3(S_D,P_M,NIL), \ |
| } |
| #define OP_SVE_VPV_BHSD \ |
| { \ |
| QLF3(S_B,P_Z,S_B), \ |
| QLF3(S_B,P_M,S_B), \ |
| QLF3(S_H,P_Z,S_H), \ |
| QLF3(S_H,P_M,S_H), \ |
| QLF3(S_S,P_Z,S_S), \ |
| QLF3(S_S,P_M,S_S), \ |
| QLF3(S_D,P_Z,S_D), \ |
| QLF3(S_D,P_M,S_D), \ |
| } |
| #define OP_SVE_VRR_BHSD \ |
| { \ |
| QLF3(S_B,W,W), \ |
| QLF3(S_H,W,W), \ |
| QLF3(S_S,W,W), \ |
| QLF3(S_D,X,X), \ |
| } |
| #define OP_SVE_VRU_BHSD \ |
| { \ |
| QLF3(S_B,W,NIL), \ |
| QLF3(S_H,W,NIL), \ |
| QLF3(S_S,W,NIL), \ |
| QLF3(S_D,X,NIL), \ |
| } |
| #define OP_SVE_VR_BHSD \ |
| { \ |
| QLF2(S_B,W), \ |
| QLF2(S_H,W), \ |
| QLF2(S_S,W), \ |
| QLF2(S_D,X), \ |
| } |
| #define OP_SVE_VUR_BHSD \ |
| { \ |
| QLF3(S_B,NIL,W), \ |
| QLF3(S_H,NIL,W), \ |
| QLF3(S_S,NIL,W), \ |
| QLF3(S_D,NIL,X), \ |
| } |
| /* e.g. luti4 { <Zd1>.B-<Zd4>.B }, ZT0, { <Zn1>-<Zn2> } */ |
| /* The second and third operands don't have qualifiers and |
| are checked separetely during encoding. */ |
| #define OP_SVE_VUU_B \ |
| { \ |
| QLF3(S_B,NIL,NIL), \ |
| } |
| #define OP_SVE_VUU_BH \ |
| { \ |
| QLF3(S_B,NIL,NIL), \ |
| QLF3(S_H,NIL,NIL), \ |
| } |
| #define OP_SVE_VUU_BHS \ |
| { \ |
| QLF3(S_B,NIL,NIL), \ |
| QLF3(S_H,NIL,NIL), \ |
| QLF3(S_S,NIL,NIL), \ |
| } |
| #define OP_SVE_VUU_BHSD \ |
| { \ |
| QLF3(S_B,NIL,NIL), \ |
| QLF3(S_H,NIL,NIL), \ |
| QLF3(S_S,NIL,NIL), \ |
| QLF3(S_D,NIL,NIL), \ |
| } |
| #define OP_SVE_VUVV_BHSD \ |
| { \ |
| QLF4(S_B,NIL,S_B,S_B), \ |
| QLF4(S_H,NIL,S_H,S_H), \ |
| QLF4(S_S,NIL,S_S,S_S), \ |
| QLF4(S_D,NIL,S_D,S_D), \ |
| } |
| #define OP_SVE_VUU_HS \ |
| { \ |
| QLF3(S_H,NIL,NIL), \ |
| QLF3(S_S,NIL,NIL), \ |
| } |
| #define OP_SVE_VUVV_HSD \ |
| { \ |
| QLF4(S_H,NIL,S_H,S_H), \ |
| QLF4(S_S,NIL,S_S,S_S), \ |
| QLF4(S_D,NIL,S_D,S_D), \ |
| } |
| #define OP_SVE_VUV_BHSD \ |
| { \ |
| QLF3(S_B,NIL,S_B), \ |
| QLF3(S_H,NIL,S_H), \ |
| QLF3(S_S,NIL,S_S), \ |
| QLF3(S_D,NIL,S_D), \ |
| } |
| #define OP_SVE_VUV_HSD \ |
| { \ |
| QLF3(S_H,NIL,S_H), \ |
| QLF3(S_S,NIL,S_S), \ |
| QLF3(S_D,NIL,S_D), \ |
| } |
| #define OP_SVE_VUV_SD \ |
| { \ |
| QLF3(S_S,NIL,S_S), \ |
| QLF3(S_D,NIL,S_D), \ |
| } |
| #define OP_SVE_VU_BHSD \ |
| { \ |
| QLF2(S_B,NIL), \ |
| QLF2(S_H,NIL), \ |
| QLF2(S_S,NIL), \ |
| QLF2(S_D,NIL), \ |
| } |
| #define OP_SVE_VU_HSD \ |
| { \ |
| QLF2(S_H,NIL), \ |
| QLF2(S_S,NIL), \ |
| QLF2(S_D,NIL), \ |
| } |
| #define OP_SVE_VU_HSD \ |
| { \ |
| QLF2(S_H,NIL), \ |
| QLF2(S_S,NIL), \ |
| QLF2(S_D,NIL), \ |
| } |
| #define OP_SVE_Vv_HSD \ |
| { \ |
| QLF2(S_H,S_H), \ |
| QLF2(S_S,S_S), \ |
| QLF2(S_D,S_D), \ |
| QLF2(S_H,NIL), \ |
| QLF2(S_S,NIL), \ |
| QLF2(S_D,NIL), \ |
| } |
| #define OP_SVE_VVD_BHS \ |
| { \ |
| QLF3(S_B,S_B,S_D), \ |
| QLF3(S_H,S_H,S_D), \ |
| QLF3(S_S,S_S,S_D), \ |
| } |
| #define OP_SVE_VVU_BHSD \ |
| { \ |
| QLF3(S_B,S_B,NIL), \ |
| QLF3(S_H,S_H,NIL), \ |
| QLF3(S_S,S_S,NIL), \ |
| QLF3(S_D,S_D,NIL), \ |
| } |
| #define OP_SVE_VVVU_H \ |
| { \ |
| QLF4(S_H,S_H,S_H,NIL), \ |
| } |
| #define OP_SVE_VVVU_S \ |
| { \ |
| QLF4(S_S,S_S,S_S,NIL), \ |
| } |
| #define OP_SVE_VVVU_SD_BH \ |
| { \ |
| QLF4(S_S,S_B,S_B,NIL), \ |
| QLF4(S_D,S_H,S_H,NIL), \ |
| } |
| #define OP_SVE_VVVU_HSD \ |
| { \ |
| QLF4(S_H,S_H,S_H,NIL), \ |
| QLF4(S_S,S_S,S_S,NIL), \ |
| QLF4(S_D,S_D,S_D,NIL), \ |
| } |
| #define OP_SVE_VVVU_BHSD \ |
| { \ |
| QLF4(S_B,S_B,S_B,NIL), \ |
| QLF4(S_H,S_H,S_H,NIL), \ |
| QLF4(S_S,S_S,S_S,NIL), \ |
| QLF4(S_D,S_D,S_D,NIL), \ |
| } |
| #define OP_SVE_VVV_BHSD \ |
| { \ |
| QLF3(S_B,S_B,S_B), \ |
| QLF3(S_H,S_H,S_H), \ |
| QLF3(S_S,S_S,S_S), \ |
| QLF3(S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_VVV_D \ |
| { \ |
| QLF3(S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_VVV_D_H \ |
| { \ |
| QLF3(S_D,S_H,S_H), \ |
| } |
| #define OP_SVE_VVV_H \ |
| { \ |
| QLF3(S_H,S_H,S_H), \ |
| } |
| #define OP_SVE_VVV_HSD \ |
| { \ |
| QLF3(S_H,S_H,S_H), \ |
| QLF3(S_S,S_S,S_S), \ |
| QLF3(S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_VVV_S \ |
| { \ |
| QLF3(S_S,S_S,S_S), \ |
| } |
| #define OP_SVE_VVV_HD_BS \ |
| { \ |
| QLF3(S_H,S_B,S_B), \ |
| QLF3(S_D,S_S,S_S), \ |
| } |
| #define OP_SVE_VVV_S_B \ |
| { \ |
| QLF3(S_S,S_B,S_B), \ |
| } |
| #define OP_SVE_VVV_H_B \ |
| { \ |
| QLF3(S_H,S_B,S_B), \ |
| } |
| #define OP_SVE_VVV_Q_D \ |
| { \ |
| QLF3(S_Q,S_D,S_D), \ |
| } |
| #define OP_SVE_VVV_HSD_BHS \ |
| { \ |
| QLF3(S_H,S_B,S_B), \ |
| QLF3(S_S,S_H,S_H), \ |
| QLF3(S_D,S_S,S_S), \ |
| } |
| #define OP_SVE_VVV_HSD_BHS2 \ |
| { \ |
| QLF3(S_H,S_H,S_B), \ |
| QLF3(S_S,S_S,S_H), \ |
| QLF3(S_D,S_D,S_S), \ |
| } |
| #define OP_SVE_VVV_BHS_HSD \ |
| { \ |
| QLF3(S_B,S_H,S_H), \ |
| QLF3(S_H,S_S,S_S), \ |
| QLF3(S_S,S_D,S_D), \ |
| } |
| #define OP_SVE_VV_D \ |
| { \ |
| QLF2(S_D, S_D) \ |
| } |
| #define OP_SVE_VV_BHS_HSD \ |
| { \ |
| QLF2(S_B,S_H), \ |
| QLF2(S_H,S_S), \ |
| QLF2(S_S,S_D), \ |
| } |
| #define OP_SVE_VVV_SD_BH \ |
| { \ |
| QLF3(S_S,S_B,S_B), \ |
| QLF3(S_D,S_H,S_H), \ |
| } |
| #define OP_SVE_VVV_SD \ |
| { \ |
| QLF3(S_S,S_S,S_S), \ |
| QLF3(S_D,S_D,S_D), \ |
| } |
| #define OP_SVE_VV_BHSD \ |
| { \ |
| QLF2(S_B,S_B), \ |
| QLF2(S_H,S_H), \ |
| QLF2(S_S,S_S), \ |
| QLF2(S_D,S_D), \ |
| } |
| #define OP_SVE_VV_BHSDQ \ |
| { \ |
| QLF2(S_B,S_B), \ |
| QLF2(S_H,S_H), \ |
| QLF2(S_S,S_S), \ |
| QLF2(S_D,S_D), \ |
| QLF2(S_Q,S_Q), \ |
| } |
| #define OP_SVE_VV_BH_SD \ |
| { \ |
| QLF2(S_B,S_S), \ |
| QLF2(S_H,S_D), \ |
| } |
| #define OP_SVE_VV_HSD \ |
| { \ |
| QLF2(S_H,S_H), \ |
| QLF2(S_S,S_S), \ |
| QLF2(S_D,S_D), \ |
| } |
| #define OP_SVE_VVU_BHS_HSD \ |
| { \ |
| QLF3(S_B,S_H,NIL), \ |
| QLF3(S_H,S_S,NIL), \ |
| QLF3(S_S,S_D,NIL), \ |
| } |
| #define OP_SVE_VV_HSD_BHS \ |
| { \ |
| QLF2(S_H,S_B), \ |
| QLF2(S_S,S_H), \ |
| QLF2(S_D,S_S), \ |
| } |
| #define OP_SVE_VV_SD \ |
| { \ |
| QLF2(S_S,S_S), \ |
| QLF2(S_D,S_D), \ |
| } |
| #define OP_SVE_VWW_BHSD \ |
| { \ |
| QLF3(S_B,W,W), \ |
| QLF3(S_H,W,W), \ |
| QLF3(S_S,W,W), \ |
| QLF3(S_D,W,W), \ |
| } |
| #define OP_SVE_VXX_BHSD \ |
| { \ |
| QLF3(S_B,X,X), \ |
| QLF3(S_H,X,X), \ |
| QLF3(S_S,X,X), \ |
| QLF3(S_D,X,X), \ |
| } |
| #define OP_SVE_VXXU_BHSD \ |
| { \ |
| QLF4(S_B,X,X,NIL), \ |
| QLF4(S_H,X,X,NIL), \ |
| QLF4(S_S,X,X,NIL), \ |
| QLF4(S_D,X,X,NIL), \ |
| } |
| #define OP_SVE_VZVD_BHS \ |
| { \ |
| QLF4(S_B,P_Z,S_B,S_D), \ |
| QLF4(S_H,P_Z,S_H,S_D), \ |
| QLF4(S_S,P_Z,S_S,S_D), \ |
| } |
| #define OP_SVE_VZVU_BHSD \ |
| { \ |
| QLF4(S_B,P_Z,S_B,NIL), \ |
| QLF4(S_H,P_Z,S_H,NIL), \ |
| QLF4(S_S,P_Z,S_S,NIL), \ |
| QLF4(S_D,P_Z,S_D,NIL), \ |
| } |
| #define OP_SVE_VZVV_BHSD \ |
| { \ |
| QLF4(S_B,P_Z,S_B,S_B), \ |
| QLF4(S_H,P_Z,S_H,S_H), \ |
| QLF4(S_S,P_Z,S_S,S_S), \ |
| QLF4(S_D,P_Z,S_D,S_D), \ |
| } |
| #define OP_SVE_VZVV_HSD \ |
| { \ |
| QLF4(S_H,P_Z,S_H,S_H), \ |
| QLF4(S_S,P_Z,S_S,S_S), \ |
| QLF4(S_D,P_Z,S_D,S_D), \ |
| } |
| #define OP_SVE_VZVV_SD \ |
| { \ |
| QLF4(S_S,P_Z,S_S,S_S), \ |
| QLF4(S_D,P_Z,S_D,S_D), \ |
| } |
| #define OP_SVE_VZVV_BH \ |
| { \ |
| QLF4(S_B,P_Z,S_B,S_B), \ |
| QLF4(S_H,P_Z,S_H,S_H), \ |
| } |
| #define OP_SVE_VZV_SD \ |
| { \ |
| QLF3(S_S,P_Z,S_S), \ |
| QLF3(S_D,P_Z,S_D), \ |
| } |
| #define OP_SVE_VZV_HSD \ |
| { \ |
| QLF3(S_H,P_Z,S_H), \ |
| QLF3(S_S,P_Z,S_S), \ |
| QLF3(S_D,P_Z,S_D), \ |
| } |
| #define OP_SVE_V_BHSD \ |
| { \ |
| QLF1(S_B), \ |
| QLF1(S_H), \ |
| QLF1(S_S), \ |
| QLF1(S_D), \ |
| } |
| #define OP_SVE_V_HSD \ |
| { \ |
| QLF1(S_H), \ |
| QLF1(S_S), \ |
| QLF1(S_D), \ |
| } |
| #define OP_SVE_WU \ |
| { \ |
| QLF2(W,NIL), \ |
| } |
| #define OP_SVE_WV_BHSD \ |
| { \ |
| QLF2(W,S_B), \ |
| QLF2(W,S_H), \ |
| QLF2(W,S_S), \ |
| QLF2(W,S_D), \ |
| } |
| #define OP_SVE_XU \ |
| { \ |
| QLF2(X,NIL), \ |
| } |
| #define OP_SVE_XUV_BHSD \ |
| { \ |
| QLF3(X,NIL,S_B), \ |
| QLF3(X,NIL,S_H), \ |
| QLF3(X,NIL,S_S), \ |
| QLF3(X,NIL,S_D), \ |
| } |
| #define OP_SVE_XVW_BHSD \ |
| { \ |
| QLF3(X,S_B,W), \ |
| QLF3(X,S_H,W), \ |
| QLF3(X,S_S,W), \ |
| QLF3(X,S_D,W), \ |
| } |
| #define OP_SVE_XV_BHSD \ |
| { \ |
| QLF2(X,S_B), \ |
| QLF2(X,S_H), \ |
| QLF2(X,S_S), \ |
| QLF2(X,S_D), \ |
| } |
| #define OP_SVE_XWU \ |
| { \ |
| QLF3(X,W,NIL), \ |
| } |
| #define OP_SVE_XXU \ |
| { \ |
| QLF3(X,X,NIL), \ |
| } |
| |
| #define QL_V3_BSS_LOWER \ |
| { \ |
| QLF3(V_8B, V_4S, V_4S), \ |
| } |
| |
| #define QL_V3_BSS_FULL \ |
| { \ |
| QLF3(V_16B, V_4S, V_4S), \ |
| } |
| |
| #define QL_V3_BHH \ |
| { \ |
| QLF3(V_8B, V_4H, V_4H), \ |
| QLF3(V_16B, V_8H, V_8H), \ |
| } |
| |
| /* e.g. BF1CVTL <Vd>.8H, <Vn>.8B. */ |
| #define QL_V2_HB_LOWER \ |
| { \ |
| QLF2(V_8H, V_8B), \ |
| } |
| |
| /* e.g. BF1CVTL2 <Vd>.8H, <Vn>.16B. */ |
| #define QL_V2_HB_FULL \ |
| { \ |
| QLF2(V_8H, V_16B), \ |
| } |
| |
| /* e.g. UDOT <Vd>.2S, <Vn>.8B, <Vm>.8B. */ |
| #define QL_V3DOT \ |
| { \ |
| QLF3(V_2S, V_8B, V_8B), \ |
| QLF3(V_4S, V_16B, V_16B),\ |
| } |
| |
| /* e.g. UDOT <Vd>.2S, <Vn>.8B, <Vm>.4B[<index>]. */ |
| #define QL_V2DOT \ |
| { \ |
| QLF3(V_2S, V_8B, S_4B),\ |
| QLF3(V_4S, V_16B, S_4B),\ |
| } |
| |
| /* e.g. FDOT <Vd>.4H, <Vn>.8B, <Vm>.8B. */ |
| #define QL_V3DOTH \ |
| { \ |
| QLF3(V_4H, V_8B, V_8B), \ |
| QLF3(V_8H, V_16B, V_16B),\ |
| } |
| |
| /* e.g. FDOT <Vd>.4H, <Vn>.8B, <Vm>.2B[<index>]. */ |
| #define QL_V2DOTH \ |
| { \ |
| QLF3(V_4H, V_8B, S_2B),\ |
| QLF3(V_8H, V_16B, S_2B),\ |
| } |
| |
| /* e.g. SHA512H <Qd>, <Qn>, <Vm>.2D. */ |
| #define QL_SHA512UPT \ |
| { \ |
| QLF3(S_Q, S_Q, V_2D), \ |
| } |
| |
| /* e.g. SHA512SU0 <Vd.2D>, <Vn>.2D. */ |
| #define QL_V2SAME2D \ |
| { \ |
| QLF2(V_2D, V_2D), \ |
| } |
| |
| /* e.g. SHA512SU1 <Vd>.2D, <Vn>.2D, <Vm>.2D>. */ |
| #define QL_V3SAME2D \ |
| { \ |
| QLF3(V_2D, V_2D, V_2D), \ |
| } |
| |
| /* e.g. EOR3 <Vd>.16B, <Vn>.16B, <Vm>.16B, <Va>.16B. */ |
| #define QL_V4SAME16B \ |
| { \ |
| QLF4(V_16B, V_16B, V_16B, V_16B), \ |
| } |
| |
| /* e.g. SM3SS1 <Vd>.4S, <Vn>.4S, <Vm>.4S, <Va>.4S. */ |
| #define QL_V4SAME4S \ |
| { \ |
| QLF4(V_4S, V_4S, V_4S, V_4S), \ |
| } |
| |
| /* e.g. XAR <Vd>.2D, <Vn>.2D, <Vm>.2D, #<imm6>. */ |
| #define QL_XAR \ |
| { \ |
| QLF4(V_2D, V_2D, V_2D, imm_0_63), \ |
| } |
| |
| /* e.g. SM3TT1A <Vd>.4S, <Vn>.4S, <Vm>.S[<imm2>]. */ |
| #define QL_SM3TT \ |
| { \ |
| QLF3(V_4S, V_4S, S_S),\ |
| } |
| |
| /* e.g. FMLAL <Vd>.2S, <Vn>.2H, <Vm>.2H. */ |
| #define QL_V3FML2S \ |
| { \ |
| QLF3(V_2S, V_2H, V_2H),\ |
| } |
| |
| /* e.g. FMLAL <Vd>.4S, <Vn>.4H, <Vm>.4H. */ |
| #define QL_V3FML4S \ |
| { \ |
| QLF3(V_4S, V_4H, V_4H),\ |
| } |
| |
| /* e.g. FMLAL <Vd>.2S, <Vn>.2H, <Vm>.H[<index>]. */ |
| #define QL_V2FML2S \ |
| { \ |
| QLF3(V_2S, V_2H, S_H),\ |
| } |
| |
| /* e.g. FMLAL <Vd>.4S, <Vn>.4H, <Vm>.H[<index>]. */ |
| #define QL_V2FML4S \ |
| { \ |
| QLF3(V_4S, V_4H, S_H),\ |
| } |
| |
| /* e.g. FMLALB <Vd>.8H, <Vn>.16B, <Vm>.16B. */ |
| #define QL_V3FML8H \ |
| { \ |
| QLF3(V_8H, V_16B, V_16B),\ |
| } |
| |
| /* e.g. FMLALB <Vd>.8H, <Vn>.16B, <Vm>.B. */ |
| #define QL_V2FML8H \ |
| { \ |
| QLF3(V_8H, V_16B, S_B),\ |
| } |
| |
| /* e.g. FMLALLBB <Vd>.4S, <Vn>.16B, <Vm>.16B. */ |
| #define QL_V3FMLL4S \ |
| { \ |
| QLF3(V_4S, V_16B, V_16B),\ |
| } |
| |
| /* e.g. FMLALLBB <Vd>.4S, <Vn>.16B, <Vm>.B. */ |
| #define QL_V2FMLL4S \ |
| { \ |
| QLF3(V_4S, V_16B, S_B),\ |
| } |
| |
| /* e.g. RMIF <Xn>, #<shift>, #<mask>. */ |
| #define QL_RMIF \ |
| { \ |
| QLF3(X, imm_0_63, imm_0_15),\ |
| } |
| |
| /* e.g. SETF8 <Wn>. */ |
| #define QL_SETF \ |
| { \ |
| QLF1(W), \ |
| } |
| |
| /* e.g. STLURB <Wt>, [<Xn|SP>{,#<simm>}]. */ |
| #define QL_STLW \ |
| { \ |
| QLF2(W, NIL), \ |
| } |
| |
| /* e.g. STLURB <Xt>, [<Xn|SP>{,#<simm>}]. */ |
| #define QL_STLX \ |
| { \ |
| QLF2(X, NIL), \ |
| } |
| |
| /* e.g. BFDOT <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Tb> */ |
| #define QL_BFDOT64 \ |
| { \ |
| QLF3(V_2S, V_4H, V_4H),\ |
| QLF3(V_4S, V_8H, V_8H),\ |
| } |
| |
| /* e.g. BFDOT <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.2H[<index>] */ |
| #define QL_BFDOT64I \ |
| { \ |
| QLF3(V_2S, V_4H, S_2H),\ |
| QLF3(V_4S, V_8H, S_2H),\ |
| } |
| |
| /* e.g. SMMLA <Vd>.4S, <Vn>.16B, <Vm>.16B */ |
| #define QL_MMLA64 \ |
| { \ |
| QLF3(V_4S, V_16B, V_16B),\ |
| } |
| |
| /* e.g. BFMMLA <Vd>.4s, <Vn>.8h, <Vm>.8h */ |
| #define QL_BFMMLA \ |
| { \ |
| QLF3(V_4S, V_8H, V_8H),\ |
| } |
| |
| /* e.g. BFCVT <Hd>, <Sn> */ |
| #define QL_BFCVT64 \ |
| { \ |
| QLF2(S_H,S_S), \ |
| } |
| |
| /* e.g. BFCVT <Hd>, <Sn> */ |
| #define QL_BFCVTN64 \ |
| { \ |
| QLF2(V_4H,V_4S), \ |
| } |
| |
| /* e.g. BFCVT <Hd>, <Sn> */ |
| #define QL_BFCVTN2_64 \ |
| { \ |
| QLF2(V_8H,V_4S), \ |
| } |
| |
| /* e.g. BFMLAL2 <Vd>.4s, <Vn>.8h, <Vm>.H[<index>] */ |
| #define QL_V3BFML4S \ |
| { \ |
| QLF3(V_4S, V_8H, S_H), \ |
| } |
| |
| /* Opcode table. |
| |
| Any SVE or SVE2 feature must include AARCH64_FEATURE_{SVE|SVE2} in its |
| bitmask, even if this is implied by other selected feature bits. This |
| allows verify_constraints to identify SVE instructions when selecting an |
| error message for MOVPRFX constraint violations. */ |
| |
| static const aarch64_feature_set aarch64_feature_v8 = |
| AARCH64_FEATURE (V8); |
| static const aarch64_feature_set aarch64_feature_fp = |
| AARCH64_FEATURE (FP); |
| static const aarch64_feature_set aarch64_feature_simd = |
| AARCH64_FEATURE (SIMD); |
| static const aarch64_feature_set aarch64_feature_crc = |
| AARCH64_FEATURE (CRC); |
| static const aarch64_feature_set aarch64_feature_lse = |
| AARCH64_FEATURE (LSE); |
| static const aarch64_feature_set aarch64_feature_lse128 = |
| AARCH64_FEATURES (2, LSE, LSE128); |
| static const aarch64_feature_set aarch64_feature_lor = |
| AARCH64_FEATURE (LOR); |
| static const aarch64_feature_set aarch64_feature_rdma = |
| AARCH64_FEATURE (RDMA); |
| static const aarch64_feature_set aarch64_feature_fp_f16 = |
| AARCH64_FEATURES (2, F16, FP); |
| static const aarch64_feature_set aarch64_feature_simd_f16 = |
| AARCH64_FEATURES (2, F16, SIMD); |
| static const aarch64_feature_set aarch64_feature_sve = |
| AARCH64_FEATURE (SVE); |
| static const aarch64_feature_set aarch64_feature_pauth = |
| AARCH64_FEATURE (PAUTH); |
| static const aarch64_feature_set aarch64_feature_compnum = |
| AARCH64_FEATURE (COMPNUM); |
| static const aarch64_feature_set aarch64_feature_jscvt = |
| AARCH64_FEATURE (JSCVT); |
| static const aarch64_feature_set aarch64_feature_rcpc = |
| AARCH64_FEATURE (RCPC); |
| static const aarch64_feature_set aarch64_feature_rcpc2 = |
| AARCH64_FEATURE (RCPC2); |
| static const aarch64_feature_set aarch64_feature_dotprod = |
| AARCH64_FEATURE (DOTPROD); |
| static const aarch64_feature_set aarch64_feature_sha2 = |
| AARCH64_FEATURES (2, V8, SHA2); |
| static const aarch64_feature_set aarch64_feature_aes = |
| AARCH64_FEATURES (2, V8, AES); |
| static const aarch64_feature_set aarch64_feature_sm4 = |
| AARCH64_FEATURES (3, SM4, SIMD, FP); |
| static const aarch64_feature_set aarch64_feature_sha3 = |
| AARCH64_FEATURES (4, SHA2, SHA3, SIMD, FP); |
| static const aarch64_feature_set aarch64_feature_fp_16_v8_2a = |
| AARCH64_FEATURES (3, F16_FML, F16, FP); |
| static const aarch64_feature_set aarch64_feature_flagmanip = |
| AARCH64_FEATURE (FLAGMANIP); |
| static const aarch64_feature_set aarch64_feature_frintts = |
| AARCH64_FEATURE (FRINTTS); |
| static const aarch64_feature_set aarch64_feature_sb = |
| AARCH64_FEATURE (SB); |
| static const aarch64_feature_set aarch64_feature_predres = |
| AARCH64_FEATURE (PREDRES); |
| static const aarch64_feature_set aarch64_feature_predres2 = |
| AARCH64_FEATURES (2, PREDRES, PREDRES2); |
| static const aarch64_feature_set aarch64_feature_memtag = |
| AARCH64_FEATURE (MEMTAG); |
| static const aarch64_feature_set aarch64_feature_bfloat16 = |
| AARCH64_FEATURE (BFLOAT16); |
| static const aarch64_feature_set aarch64_feature_bfloat16_sve = |
| AARCH64_FEATURES (2, BFLOAT16, SVE); |
| static const aarch64_feature_set aarch64_feature_tme = |
| AARCH64_FEATURE (TME); |
| static const aarch64_feature_set aarch64_feature_sve2 = |
| AARCH64_FEATURE (SVE2); |
| static const aarch64_feature_set aarch64_feature_sve2aes = |
| AARCH64_FEATURES (2, SVE2, SVE2_AES); |
| static const aarch64_feature_set aarch64_feature_sve2sha3 = |
| AARCH64_FEATURES (2, SVE2, SVE2_SHA3); |
| static const aarch64_feature_set aarch64_feature_sve2sm4 = |
| AARCH64_FEATURES (2, SVE2, SVE2_SM4); |
| static const aarch64_feature_set aarch64_feature_sve2bitperm = |
| AARCH64_FEATURES (2, SVE2, SVE2_BITPERM); |
| static const aarch64_feature_set aarch64_feature_sme = |
| AARCH64_FEATURES (2, SVE2, SME); |
| static const aarch64_feature_set aarch64_feature_sme_f64f64 = |
| AARCH64_FEATURES (3, SVE2, SME, SME_F64F64); |
| static const aarch64_feature_set aarch64_feature_sme_i16i64 = |
| AARCH64_FEATURES (3, SVE2, SME, SME_I16I64); |
| static const aarch64_feature_set aarch64_feature_sme2 = |
| AARCH64_FEATURES (3, SVE2, SME, SME2); |
| static const aarch64_feature_set aarch64_feature_sme2_i16i64 = |
| AARCH64_FEATURES (2, SME2, SME_I16I64); |
| static const aarch64_feature_set aarch64_feature_sme2_f64f64 = |
| AARCH64_FEATURES (2, SME2, SME_F64F64); |
| static const aarch64_feature_set aarch64_feature_i8mm = |
| AARCH64_FEATURE (I8MM); |
| static const aarch64_feature_set aarch64_feature_i8mm_sve = |
| AARCH64_FEATURES (2, I8MM, SVE); |
| static const aarch64_feature_set aarch64_feature_f32mm_sve = |
| AARCH64_FEATURES (2, F32MM, SVE); |
| static const aarch64_feature_set aarch64_feature_f64mm_sve = |
| AARCH64_FEATURES (2, F64MM, SVE); |
| static const aarch64_feature_set aarch64_feature_v8r = |
| AARCH64_FEATURE (V8R); |
| static const aarch64_feature_set aarch64_feature_ls64 = |
| AARCH64_FEATURE (LS64); |
| static const aarch64_feature_set aarch64_feature_flagm = |
| AARCH64_FEATURE (FLAGM); |
| static const aarch64_feature_set aarch64_feature_xs = |
| AARCH64_FEATURE (XS); |
| static const aarch64_feature_set aarch64_feature_wfxt = |
| AARCH64_FEATURE (WFXT); |
| static const aarch64_feature_set aarch64_feature_mops = |
| AARCH64_FEATURE (MOPS); |
| static const aarch64_feature_set aarch64_feature_mops_memtag = |
| AARCH64_FEATURES (2, MOPS, MEMTAG); |
| static const aarch64_feature_set aarch64_feature_hbc = |
| AARCH64_FEATURE (HBC); |
| static const aarch64_feature_set aarch64_feature_cssc = |
| AARCH64_FEATURE (CSSC); |
| static const aarch64_feature_set aarch64_feature_chk = |
| AARCH64_FEATURE (CHK); |
| static const aarch64_feature_set aarch64_feature_gcs = |
| AARCH64_FEATURE (GCS); |
| static const aarch64_feature_set aarch64_feature_ite = |
| AARCH64_FEATURE (ITE); |
| static const aarch64_feature_set aarch64_feature_d128 = |
| AARCH64_FEATURE (D128); |
| static const aarch64_feature_set aarch64_feature_the = |
| AARCH64_FEATURE (THE); |
| static const aarch64_feature_set aarch64_feature_d128_the = |
| AARCH64_FEATURES (2, D128, THE); |
| static const aarch64_feature_set aarch64_feature_b16b16_sve2 = |
| AARCH64_FEATURES (2, B16B16, SVE2); |
| static const aarch64_feature_set aarch64_feature_sme2p1 = |
| AARCH64_FEATURE (SME2p1); |
| static const aarch64_feature_set aarch64_feature_sve2p1 = |
| AARCH64_FEATURE (SVE2p1); |
| static const aarch64_feature_set aarch64_feature_rcpc3 = |
| AARCH64_FEATURE (RCPC3); |
| static const aarch64_feature_set aarch64_feature_cpa = |
| AARCH64_FEATURE (CPA); |
| static const aarch64_feature_set aarch64_feature_cpa_sve = |
| AARCH64_FEATURES (2, CPA, SVE); |
| static const aarch64_feature_set aarch64_feature_faminmax = |
| AARCH64_FEATURE (FAMINMAX); |
| static const aarch64_feature_set aarch64_feature_faminmax_sve2 = |
| AARCH64_FEATURES (2, FAMINMAX, SVE2); |
| static const aarch64_feature_set aarch64_feature_faminmax_sme2 = |
| AARCH64_FEATURES (3, SVE2, FAMINMAX, SME2); |
| static const aarch64_feature_set aarch64_feature_fp8 = |
| AARCH64_FEATURE (FP8); |
| static const aarch64_feature_set aarch64_feature_fp8_sve2 = |
| AARCH64_FEATURES (2, FP8, SVE2); |
| static const aarch64_feature_set aarch64_feature_fp8_sme2 = |
| AARCH64_FEATURES (2, FP8, SME2); |
| static const aarch64_feature_set aarch64_feature_lut = |
| AARCH64_FEATURE (LUT); |
| static const aarch64_feature_set aarch64_feature_lut_sve2 = |
| AARCH64_FEATURES (2, LUT, SVE2); |
| static const aarch64_feature_set aarch64_feature_brbe = |
| AARCH64_FEATURE (BRBE); |
| static const aarch64_feature_set aarch64_feature_sme_lutv2 = |
| AARCH64_FEATURES (3, SME_LUTv2, SME2, SME2p1); |
| static const aarch64_feature_set aarch64_feature_fp8fma = |
| AARCH64_FEATURE (FP8FMA); |
| static const aarch64_feature_set aarch64_feature_fp8dot4 = |
| AARCH64_FEATURE (FP8DOT4); |
| static const aarch64_feature_set aarch64_feature_fp8dot2 = |
| AARCH64_FEATURE (FP8DOT2); |
| static const aarch64_feature_set aarch64_feature_fp8fma_sve = |
| AARCH64_FEATURES (2, FP8FMA_SVE, SVE); |
| static const aarch64_feature_set aarch64_feature_fp8dot4_sve = |
| AARCH64_FEATURES (2, FP8DOT4_SVE, SVE); |
| static const aarch64_feature_set aarch64_feature_fp8dot2_sve = |
| AARCH64_FEATURES (2, FP8DOT2_SVE, SVE); |
| static const aarch64_feature_set aarch64_feature_sme_f8f32 = |
| AARCH64_FEATURES (2, SME_F8F32, SME2); |
| static const aarch64_feature_set aarch64_feature_sme_f8f16 = |
| AARCH64_FEATURES (2, SME_F8F32, SME2); |
| static const aarch64_feature_set aarch64_feature_sme_f16f16_f8f16 = |
| AARCH64_FEATURES (2, SME_F16F16_F8F16, SME2); |
| |
| #define CORE &aarch64_feature_v8 |
| #define FP &aarch64_feature_fp |
| #define SIMD &aarch64_feature_simd |
| #define CRC &aarch64_feature_crc |
| #define LSE &aarch64_feature_lse |
| #define LSE128 &aarch64_feature_lse128 |
| #define LOR &aarch64_feature_lor |
| #define RDMA &aarch64_feature_rdma |
| #define FP_F16 &aarch64_feature_fp_f16 |
| #define SIMD_F16 &aarch64_feature_simd_f16 |
| #define SVE &aarch64_feature_sve |
| #define PAUTH &aarch64_feature_pauth |
| #define COMPNUM &aarch64_feature_compnum |
| #define JSCVT &aarch64_feature_jscvt |
| #define RCPC &aarch64_feature_rcpc |
| #define RCPC2 &aarch64_feature_rcpc2 |
| #define SHA2 &aarch64_feature_sha2 |
| #define AES &aarch64_feature_aes |
| #define SHA3 &aarch64_feature_sha3 |
| #define SM4 &aarch64_feature_sm4 |
| #define FP_F16_V8_2A &aarch64_feature_fp_16_v8_2a |
| #define DOTPROD &aarch64_feature_dotprod |
| #define FLAGMANIP &aarch64_feature_flagmanip |
| #define FRINTTS &aarch64_feature_frintts |
| #define SB &aarch64_feature_sb |
| #define PREDRES &aarch64_feature_predres |
| #define PREDRES2 &aarch64_feature_predres2 |
| #define MEMTAG &aarch64_feature_memtag |
| #define TME &aarch64_feature_tme |
| #define SVE2 &aarch64_feature_sve2 |
| #define SVE2_AES &aarch64_feature_sve2aes |
| #define SVE2_SHA3 &aarch64_feature_sve2sha3 |
| #define SVE2_SM4 &aarch64_feature_sve2sm4 |
| #define SVE2_BITPERM &aarch64_feature_sve2bitperm |
| #define SME &aarch64_feature_sme |
| #define SME_F64F64 &aarch64_feature_sme_f64f64 |
| #define SME_I16I64 &aarch64_feature_sme_i16i64 |
| #define SME2 &aarch64_feature_sme2 |
| #define SME2_I16I64 &aarch64_feature_sme2_i16i64 |
| #define SME2_F64F64 &aarch64_feature_sme2_f64f64 |
| #define BFLOAT16_SVE &aarch64_feature_bfloat16_sve |
| #define BFLOAT16 &aarch64_feature_bfloat16 |
| #define I8MM_SVE &aarch64_feature_i8mm_sve |
| #define F32MM_SVE &aarch64_feature_f32mm_sve |
| #define F64MM_SVE &aarch64_feature_f64mm_sve |
| #define I8MM &aarch64_feature_i8mm |
| #define ARMV8R &aarch64_feature_v8r |
| #define LS64 &aarch64_feature_ls64 |
| #define FLAGM &aarch64_feature_flagm |
| #define XS &aarch64_feature_xs |
| #define WFXT &aarch64_feature_wfxt |
| #define MOPS &aarch64_feature_mops |
| #define MOPS_MEMTAG &aarch64_feature_mops_memtag |
| #define HBC &aarch64_feature_hbc |
| #define CSSC &aarch64_feature_cssc |
| #define CHK &aarch64_feature_chk |
| #define GCS &aarch64_feature_gcs |
| #define ITE &aarch64_feature_ite |
| #define D128 &aarch64_feature_d128 |
| #define THE &aarch64_feature_the |
| #define D128_THE &aarch64_feature_d128_the |
| #define B16B16_SVE2 &aarch64_feature_b16b16_sve2 |
| #define SME2p1 &aarch64_feature_sme2p1 |
| #define SVE2p1 &aarch64_feature_sve2p1 |
| #define RCPC3 &aarch64_feature_rcpc3 |
| #define CPA &aarch64_feature_cpa |
| #define CPA_SVE &aarch64_feature_cpa_sve |
| #define FAMINMAX &aarch64_feature_faminmax |
| #define FAMINMAX_SVE2 &aarch64_feature_faminmax_sve2 |
| #define FAMINMAX_SME2 &aarch64_feature_faminmax_sme2 |
| #define FP8 &aarch64_feature_fp8 |
| #define FP8_SVE2 &aarch64_feature_fp8_sve2 |
| #define FP8_SME2 &aarch64_feature_fp8_sme2 |
| #define LUT &aarch64_feature_lut |
| #define LUT_SVE2 &aarch64_feature_lut_sve2 |
| #define BRBE &aarch64_feature_brbe |
| #define LUTv2_SME2 &aarch64_feature_sme_lutv2 |
| #define FP8FMA &aarch64_feature_fp8fma |
| #define FP8DOT4 &aarch64_feature_fp8dot4 |
| #define FP8DOT2 &aarch64_feature_fp8dot2 |
| #define FP8FMA_SVE &aarch64_feature_fp8fma_sve |
| #define FP8DOT4_SVE &aarch64_feature_fp8dot4_sve |
| #define FP8DOT2_SVE &aarch64_feature_fp8dot2_sve |
| #define SME_F8F32 &aarch64_feature_sme_f8f32 |
| #define SME_F8F16 &aarch64_feature_sme_f8f16 |
| #define SME_F16F16_F8F16 &aarch64_feature_sme_f16f16_f8f16 |
| |
| #define CORE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, OP, CORE, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define __FP_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, OP, FP, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SIMD_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _SIMD_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,VERIFIER) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SIMD, OPS, QUALS, FLAGS, 0, 0, VERIFIER } |
| #define _CRC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, CRC, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _LSE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, LSE, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _LSE128_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, LSE128, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _LOR_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, LOR, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define RDMA_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, RDMA, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define FF16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, FP_F16, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SF16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, SIMD_F16, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _SVE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define _SVE_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define PAUTH_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, PAUTH, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define CNUM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, OP, COMPNUM, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define JSCVT_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, JSCVT, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define RCPC_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, RCPC, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define RCPC2_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, RCPC2, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SHA2_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, SHA2, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define AES_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, AES, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SHA3_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, SHA3, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SM4_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, SM4, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define FP16_V8_2A_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, FP_F16_V8_2A, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define DOT_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, DOTPROD, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define FLAGMANIP_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, FLAGMANIP, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define FRINTTS_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, FRINTTS, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SB_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, SB, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define PREDRES_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, PREDRES, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define MEMTAG_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, MEMTAG, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _TME_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, OP, TME, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define SVE2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SME2p1_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME2p1, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SVE2_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define B16B16_SVE2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, B16B16_SVE2, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define B16B16_SVE2_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, B16B16_SVE2, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define SVE2p1_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2p1, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SVE2p1_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2p1, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define SVE2AES_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2_AES, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SVE2SHA3_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2_SHA3, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SVE2SM4_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define SVE2SM4_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define SME_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SME_F64F64_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME_F64F64, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SME_I16I64_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME_I16I64, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SME_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME, OPS, QUALS, \ |
| F_STRICT | FLAGS, CONSTRAINTS, TIED, NULL } |
| #define SME2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME2, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SME2_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME2, OPS, QUALS, \ |
| FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL } |
| #define SME2_I16I64_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME2_I16I64, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SME2_F64F64_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SME2_F64F64, OPS, QUALS, \ |
| F_STRICT | FLAGS, 0, TIED, NULL } |
| #define SVE2BITPERM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, OP, SVE2_BITPERM, OPS, QUALS, \ |
| FLAGS | F_STRICT, 0, TIED, NULL } |
| #define BFLOAT16_SVE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, BFLOAT16_SVE, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define BFLOAT16_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \ |
| { NAME, OPCODE, MASK, CLASS, 0, BFLOAT16_SVE, OPS, QUALS, FLAGS | F_STRICT, \ |
| CONSTRAINTS, TIED, NULL } |
| #define BFLOAT16_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, BFLOAT16, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define INT8MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \ |
| { NAME, OPCODE, MASK, CLASS, 0, I8MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL } |
| #define INT8MATMUL_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, I8MM, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define F64MATMUL_SVE_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS,TIED) \ |
| { NAME, OPCODE, MASK, CLASS, 0, F64MM_SVE, OPS, QUALS, FLAGS, 0, TIED, NULL } |
| #define F64MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \ |
| { NAME, OPCODE, MASK, CLASS, 0, F64MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL } |
| #define F32MATMUL_SVE_INSNC(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS, CONSTRAINTS, TIED) \ |
| { NAME, OPCODE, MASK, CLASS, 0, F32MM_SVE, OPS, QUALS, FLAGS, CONSTRAINTS, TIED, NULL } |
| #define V8R_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, ARMV8R, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define XS_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, XS, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define WFXT_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, WFXT, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define _LS64_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, LS64, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define FLAGM_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \ |
| { NAME, OPCODE, MASK, CLASS, 0, FLAGM, OPS, QUALS, FLAGS, 0, 0, NULL } |
| #define MOPS_INSN(NAME, OPCODE, MASK, CLASS, OPS, QUALS, FLAGS, CONSTRAINTS, VERIFIER) \ |
| { NAME,
|