@@ -5561,6 +5561,9 @@ parse_operands (char *str, const aarch64_opcode *opcode)
case AARCH64_OPND_SVE_UIMM7:
case AARCH64_OPND_SVE_UIMM8:
case AARCH64_OPND_SVE_UIMM8_53:
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ case AARCH64_OPND_IMM_ROT3:
po_imm_nc_or_fail ();
info->imm.value = val;
break;
new file mode 100644
@@ -0,0 +1,33 @@
+#as: -march=armv8.3-a
+#objdump: -dr
+
+.*: file format .*
+
+Disassembly of section \.text:
+
+0000000000000000 <.*>:
+[^:]+: 6ec3c441 fcmla v1.2d, v2.2d, v3.2d, #0
+[^:]+: 6ec3cc41 fcmla v1.2d, v2.2d, v3.2d, #90
+[^:]+: 6ec3d441 fcmla v1.2d, v2.2d, v3.2d, #180
+[^:]+: 6ec3dc41 fcmla v1.2d, v2.2d, v3.2d, #270
+[^:]+: 2e83cc41 fcmla v1.2s, v2.2s, v3.2s, #90
+[^:]+: 6e83cc41 fcmla v1.4s, v2.4s, v3.4s, #90
+[^:]+: 2e43cc41 fcmla v1.4h, v2.4h, v3.4h, #90
+[^:]+: 6e43cc41 fcmla v1.8h, v2.8h, v3.8h, #90
+[^:]+: 6f831041 fcmla v1.4s, v2.4s, v3.s\[0\], #0
+[^:]+: 6f833041 fcmla v1.4s, v2.4s, v3.s\[0\], #90
+[^:]+: 6f835041 fcmla v1.4s, v2.4s, v3.s\[0\], #180
+[^:]+: 6f837041 fcmla v1.4s, v2.4s, v3.s\[0\], #270
+[^:]+: 6f833841 fcmla v1.4s, v2.4s, v3.s\[1\], #90
+[^:]+: 2f433041 fcmla v1.4h, v2.4h, v3.h\[0\], #90
+[^:]+: 2f633041 fcmla v1.4h, v2.4h, v3.h\[1\], #90
+[^:]+: 6f433041 fcmla v1.8h, v2.8h, v3.h\[0\], #90
+[^:]+: 6f633041 fcmla v1.8h, v2.8h, v3.h\[1\], #90
+[^:]+: 6f433841 fcmla v1.8h, v2.8h, v3.h\[2\], #90
+[^:]+: 6f633841 fcmla v1.8h, v2.8h, v3.h\[3\], #90
+[^:]+: 6ec3e441 fcadd v1.2d, v2.2d, v3.2d, #90
+[^:]+: 6ec3f441 fcadd v1.2d, v2.2d, v3.2d, #270
+[^:]+: 2e83e441 fcadd v1.2s, v2.2s, v3.2s, #90
+[^:]+: 6e83e441 fcadd v1.4s, v2.4s, v3.4s, #90
+[^:]+: 2e43e441 fcadd v1.4h, v2.4h, v3.4h, #90
+[^:]+: 6e43e441 fcadd v1.8h, v2.8h, v3.8h, #90
new file mode 100644
@@ -0,0 +1,36 @@
+/* Test file for ARMv8.3 complex arithmetics instructions. */
+ .text
+
+ /* Three-same operands FCMLA. */
+ fcmla v1.2d, v2.2d, v3.2d, #0
+ fcmla v1.2d, v2.2d, v3.2d, #90
+ fcmla v1.2d, v2.2d, v3.2d, #180
+ fcmla v1.2d, v2.2d, v3.2d, #270
+
+ fcmla v1.2s, v2.2s, v3.2s, #90
+ fcmla v1.4s, v2.4s, v3.4s, #90
+ fcmla v1.4h, v2.4h, v3.4h, #90
+ fcmla v1.8h, v2.8h, v3.8h, #90
+
+ /* Indexed element FCMLA. */
+ fcmla v1.4s, v2.4s, v3.s[0], #0
+ fcmla v1.4s, v2.4s, v3.s[0], #90
+ fcmla v1.4s, v2.4s, v3.s[0], #180
+ fcmla v1.4s, v2.4s, v3.s[0], #270
+ fcmla v1.4s, v2.4s, v3.s[1], #90
+
+ fcmla v1.4h, v2.4h, v3.h[0], #90
+ fcmla v1.4h, v2.4h, v3.h[1], #90
+ fcmla v1.8h, v2.8h, v3.h[0], #90
+ fcmla v1.8h, v2.8h, v3.h[1], #90
+ fcmla v1.8h, v2.8h, v3.h[2], #90
+ fcmla v1.8h, v2.8h, v3.h[3], #90
+
+ /* Three-same operands FADD. */
+ fcadd v1.2d, v2.2d, v3.2d, #90
+ fcadd v1.2d, v2.2d, v3.2d, #270
+
+ fcadd v1.2s, v2.2s, v3.2s, #90
+ fcadd v1.4s, v2.4s, v3.4s, #90
+ fcadd v1.4h, v2.4h, v3.4h, #90
+ fcadd v1.8h, v2.8h, v3.8h, #90
new file mode 100644
@@ -0,0 +1,2 @@
+#as: -march=armv8.3-a -mno-verbose-error
+#error-output: illegal-fcmla.l
new file mode 100644
@@ -0,0 +1,17 @@
+[^:]*: Assembler messages:
+[^:]+:10: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.s\[0\],#-90'
+[^:]+:11: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.s\[0\],#30'
+[^:]+:12: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.s\[0\],#360'
+[^:]+:13: Error: register element index out of range 0 to 1 at operand 3 -- `fcmla v0\.4h,v1\.4h,v2\.h\[2\],#90'
+[^:]+:14: Error: register element index out of range 0 to 3 at operand 3 -- `fcmla v0\.8h,v1\.8h,v2\.h\[4\],#90'
+[^:]+:15: Error: register element index out of range 0 to 1 at operand 3 -- `fcmla v0\.4s,v1\.4s,v2\.s\[2\],#90'
+[^:]+:16: Error: operand mismatch -- `fcmla v0\.2s,v1\.2s,v2\.s\[0\],#90'
+[^:]+:17: Error: operand mismatch -- `fcmla v0\.4s,v1\.4s,v2\.d\[0\],#90'
+[^:]+:18: Error: operand mismatch -- `fcmla v0\.2d,v1\.2d,v2\.d\[0\],#0'
+[^:]+:19: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.4s,#-90'
+[^:]+:20: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.4s,#30'
+[^:]+:21: Error: rotate expected to be 0, 90, 180 or 270 at operand 4 -- `fcmla v0\.4s,v1\.4s,v2\.4s,#360'
+[^:]+:22: Error: invalid element size 8 and vector size combination s at operand 1 -- `fcmla v0\.8s,v1\.8s,v2\.8s,#0'
+[^:]+:23: Error: operand mismatch -- `fcmla v0\.1d,v1\.1d,v2\.1d,#0'
+[^:]+:24: Error: rotate expected to be 90 or 270 at operand 4 -- `fcadd v0\.4h,v1\.4h,v2\.4h,#0'
+[^:]+:25: Error: rotate expected to be 90 or 270 at operand 4 -- `fcadd v0\.4h,v1\.4h,v2\.4h,#180'
new file mode 100644
@@ -0,0 +1,25 @@
+// Test illegal ARMv8.3 FCMLA and FCADD instructions with -march=armv8.3-a.
+.text
+
+ // Good.
+ fcmla v0.4s, v1.4s, v2.s[0], #90
+ fcmla v0.4s, v1.4s, v2.4s, #90
+ fcadd v0.4h, v1.4h, v2.4h, #90
+
+ // Bad.
+ fcmla v0.4s, v1.4s, v2.s[0], #-90
+ fcmla v0.4s, v1.4s, v2.s[0], #30
+ fcmla v0.4s, v1.4s, v2.s[0], #360
+ fcmla v0.4h, v1.4h, v2.h[2], #90
+ fcmla v0.8h, v1.8h, v2.h[4], #90
+ fcmla v0.4s, v1.4s, v2.s[2], #90
+ fcmla v0.2s, v1.2s, v2.s[0], #90
+ fcmla v0.4s, v1.4s, v2.d[0], #90
+ fcmla v0.2d, v1.2d, v2.d[0], #0
+ fcmla v0.4s, v1.4s, v2.4s, #-90
+ fcmla v0.4s, v1.4s, v2.4s, #30
+ fcmla v0.4s, v1.4s, v2.4s, #360
+ fcmla v0.8s, v1.8s, v2.8s, #0
+ fcmla v0.1d, v1.1d, v2.1d, #0
+ fcadd v0.4h, v1.4h, v2.4h, #0
+ fcadd v0.4h, v1.4h, v2.4h, #180
@@ -204,6 +204,9 @@ enum aarch64_opnd
AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
+ AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
+ AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
+ AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
AARCH64_OPND_COND, /* Standard condition as the last operand. */
AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
@@ -596,6 +599,8 @@ enum aarch64_op
OP_NOTS_P_P_P_Z,
OP_NOT_P_P_P_Z,
+ OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
+
OP_TOTAL_NUM, /* Pseudo. */
};
@@ -125,19 +125,28 @@ aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
{
/* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
+ unsigned index = info->reglane.index;
+
+ if (inst->opcode->op == OP_FCMLA_ELEM)
+ /* Complex operand takes two elements. */
+ index *= 2;
+
switch (info->qualifier)
{
case AARCH64_OPND_QLF_S_H:
/* H:L:M */
- insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
+ assert (index < 8);
+ insert_fields (code, index, 0, 3, FLD_M, FLD_L, FLD_H);
break;
case AARCH64_OPND_QLF_S_S:
/* H:L */
- insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
+ assert (index < 4);
+ insert_fields (code, index, 0, 2, FLD_L, FLD_H);
break;
case AARCH64_OPND_QLF_S_D:
/* H */
- insert_field (FLD_H, code, info->reglane.index, 0);
+ assert (index < 2);
+ insert_field (FLD_H, code, index, 0);
break;
default:
assert (0);
@@ -427,6 +436,41 @@ aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
return NULL;
}
+/* Insert field rot for the rotate immediate in
+ FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */
+const char *
+aarch64_ins_imm_rotate (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code, const aarch64_inst *inst)
+{
+ uint64_t rot = info->imm.value / 90;
+
+ switch (info->type)
+ {
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ /* value rot
+ 0 0
+ 90 1
+ 180 2
+ 270 3 */
+ assert (rot < 4U);
+ break;
+ case AARCH64_OPND_IMM_ROT3:
+ /* value rot
+ 90 0
+ 270 1 */
+ rot = (rot - 1) / 2;
+ assert (rot < 2U);
+ break;
+ default:
+ assert (0);
+ }
+ insert_field (self->fields[0], code, rot, inst->opcode->mask);
+
+ return NULL;
+}
+
/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
const char *
@@ -92,6 +92,7 @@ AARCH64_DECL_OPD_INSERTER (ins_sve_reglist);
AARCH64_DECL_OPD_INSERTER (ins_sve_scale);
AARCH64_DECL_OPD_INSERTER (ins_sve_shlimm);
AARCH64_DECL_OPD_INSERTER (ins_sve_shrimm);
+AARCH64_DECL_OPD_INSERTER (ins_imm_rotate);
#undef AARCH64_DECL_OPD_INSERTER
@@ -351,6 +351,14 @@ aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
default:
return 0;
}
+
+ if (inst->opcode->op == OP_FCMLA_ELEM)
+ {
+ /* Complex operand takes two elements. */
+ if (info->reglane.index & 1)
+ return 0;
+ info->reglane.index /= 2;
+ }
}
return 1;
@@ -703,6 +711,40 @@ aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
return 1;
}
+/* Decode rotate immediate for FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #rotate. */
+int
+aarch64_ext_imm_rotate (const aarch64_operand *self, aarch64_opnd_info *info,
+ const aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ uint64_t rot = extract_field (self->fields[0], code, 0);
+
+ switch (info->type)
+ {
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ /* rot value
+ 0 0
+ 1 90
+ 2 180
+ 3 270 */
+ assert (rot < 4U);
+ break;
+ case AARCH64_OPND_IMM_ROT3:
+ /* rot value
+ 0 90
+ 1 270 */
+ assert (rot < 2U);
+ rot = 2 * rot + 1;
+ break;
+ default:
+ assert (0);
+ return 0;
+ }
+ info->imm.value = rot * 90;
+ return 1;
+}
+
/* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
int
aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
@@ -114,6 +114,7 @@ AARCH64_DECL_OPD_EXTRACTOR (ext_sve_reglist);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_scale);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_shlimm);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_shrimm);
+AARCH64_DECL_OPD_EXTRACTOR (ext_imm_rotate);
#undef AARCH64_DECL_OPD_EXTRACTOR
@@ -309,7 +309,10 @@ const aarch64_field fields[] =
{ 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
{ 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
{ 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
- { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
+ { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
+ { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
+ { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
+ { 12, 1 }, /* rotate3: FCADD immediate rotate. */
};
enum aarch64_operand_class
@@ -2097,6 +2100,28 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
}
break;
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ if (opnd->imm.value != 0
+ && opnd->imm.value != 90
+ && opnd->imm.value != 180
+ && opnd->imm.value != 270)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("rotate expected to be 0, 90, 180 or 270"));
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_IMM_ROT3:
+ if (opnd->imm.value != 90 && opnd->imm.value != 270)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("rotate expected to be 90 or 270"));
+ return 0;
+ }
+ break;
+
case AARCH64_OPND_SHLL_IMM:
assert (idx == 2);
size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
@@ -2436,7 +2461,15 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
case AARCH64_OPND_CLASS_SIMD_ELEMENT:
/* Get the upper bound for the element index. */
- num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
+ if (opcode->op == OP_FCMLA_ELEM)
+ /* FCMLA index range depends on the vector size of other operands
+ and is halfed because complex numbers take two elements. */
+ num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
+ * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
+ else
+ num = 16;
+ num = num / aarch64_get_qualifier_esize (qualifier) - 1;
+
/* Index out-of-range. */
if (!value_in_range_p (opnd->reglane.index, 0, num))
{
@@ -3185,6 +3218,9 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
case AARCH64_OPND_SVE_UIMM7:
case AARCH64_OPND_SVE_UIMM8:
case AARCH64_OPND_SVE_UIMM8_53:
+ case AARCH64_OPND_IMM_ROT1:
+ case AARCH64_OPND_IMM_ROT2:
+ case AARCH64_OPND_IMM_ROT3:
snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
break;
@@ -137,6 +137,9 @@ enum aarch64_field_kind
FLD_SVE_tszl_19,
FLD_SVE_xs_14,
FLD_SVE_xs_22,
+ FLD_rotate1,
+ FLD_rotate2,
+ FLD_rotate3,
};
/* Field description. */
@@ -969,6 +969,16 @@
QLF3(V_2D , V_2D , V_2D ) \
}
+/* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */
+#define QL_V3SAMEHSD_ROT \
+{ \
+ QLF4 (V_4H, V_4H, V_4H, NIL), \
+ QLF4 (V_8H, V_8H, V_8H, NIL), \
+ QLF4 (V_2S, V_2S, V_2S, NIL), \
+ QLF4 (V_4S, V_4S, V_4S, NIL), \
+ QLF4 (V_2D, V_2D, V_2D, NIL), \
+}
+
/* e.g. FMAXNM <Vd>.<T>, <Vn>.<T>, <Vm>.<T>. */
#define QL_V3SAMEH \
{ \
@@ -1308,6 +1318,14 @@
QLF3 (V_8H, V_8H, S_H), \
}
+/* e.g. FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>], #<rotate>. */
+#define QL_ELEMENT_ROT \
+{ \
+ QLF4 (V_4H, V_4H, S_H, NIL), \
+ QLF4 (V_8H, V_8H, S_H, NIL), \
+ QLF4 (V_4S, V_4S, S_S, NIL), \
+}
+
/* e.g. MOVI <Vd>.4S, #<imm8> {, LSL #<amount>}. */
#define QL_SIMD_IMM_S0W \
{ \
@@ -1920,6 +1938,8 @@ static const aarch64_feature_set aarch64_feature_v8_3 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_3, 0);
static const aarch64_feature_set aarch64_feature_fp_v8_3 =
AARCH64_FEATURE (AARCH64_FEATURE_V8_3 | AARCH64_FEATURE_FP, 0);
+static const aarch64_feature_set aarch64_feature_simd_v8_3 =
+ AARCH64_FEATURE (AARCH64_FEATURE_V8_3 | AARCH64_FEATURE_SIMD, 0);
#define CORE &aarch64_feature_v8
#define FP &aarch64_feature_fp
@@ -1937,6 +1957,7 @@ static const aarch64_feature_set aarch64_feature_fp_v8_3 =
#define SVE &aarch64_feature_sve
#define ARMV8_3 &aarch64_feature_v8_3
#define FP_V8_3 &aarch64_feature_fp_v8_3
+#define SIMD_V8_3 &aarch64_feature_simd_v8_3
#define CORE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
{ NAME, OPCODE, MASK, CLASS, OP, CORE, OPS, QUALS, FLAGS, 0, NULL }
@@ -2104,6 +2125,7 @@ struct aarch64_opcode aarch64_opcode_table[] =
SF16_INSN ("fmulx", 0x2f009000, 0xbfc0f400, asimdelem, OP3 (Vd, Vn, Em), QL_ELEMENT_FP_H, F_SIZEQ),
RDMA_INSN ("sqrdmlah",0x2f00d000, 0xbf00f400, asimdelem, OP3 (Vd, Vn, Em), QL_ELEMENT, F_SIZEQ),
RDMA_INSN ("sqrdmlsh",0x2f00f000, 0xbf00f400, asimdelem, OP3 (Vd, Vn, Em), QL_ELEMENT, F_SIZEQ),
+ {"fcmla", 0x2f001000, 0xbf009400, asimdelem, OP_FCMLA_ELEM, SIMD_V8_3, OP4 (Vd, Vn, Em, IMM_ROT2), QL_ELEMENT_ROT, F_SIZEQ, 0, NULL},
/* AdvSIMD EXT. */
SIMD_INSN ("ext", 0x2e000000, 0xbfe08400, asimdext, 0, OP4 (Vd, Vn, Vm, IDX), QL_VEXT, F_SIZEQ),
/* AdvSIMD modified immediate. */
@@ -2347,6 +2369,8 @@ struct aarch64_opcode aarch64_opcode_table[] =
/* AdvSIMD three same extension. */
RDMA_INSN ("sqrdmlah",0x2e008400, 0xbf20fe00, asimdsame, OP3 (Vd, Vn, Vm), QL_V3SAMEHS, F_SIZEQ),
RDMA_INSN ("sqrdmlsh",0x2e008c00, 0xbf20fe00, asimdsame, OP3 (Vd, Vn, Vm), QL_V3SAMEHS, F_SIZEQ),
+ {"fcmla", 0x2e00c400, 0xbf20e400, asimdsame, 0, SIMD_V8_3, OP4 (Vd, Vn, Vm, IMM_ROT1), QL_V3SAMEHSD_ROT, F_SIZEQ, 0, NULL},
+ {"fcadd", 0x2e00e400, 0xbf20ec00, asimdsame, 0, SIMD_V8_3, OP4 (Vd, Vn, Vm, IMM_ROT3), QL_V3SAMEHSD_ROT, F_SIZEQ, 0, NULL},
/* AdvSIMD shift by immediate. */
SIMD_INSN ("sshr", 0xf000400, 0xbf80fc00, asimdshf, 0, OP3 (Vd, Vn, IMM_VLSR), QL_VSHIFT, 0),
SIMD_INSN ("ssra", 0xf001400, 0xbf80fc00, asimdshf, 0, OP3 (Vd, Vn, IMM_VLSR), QL_VSHIFT, 0),
@@ -4116,6 +4140,12 @@ struct aarch64_opcode aarch64_opcode_table[] =
Y(IMMEDIATE, fbits, "FBITS", 0, F(FLD_scale), \
"the number of bits after the binary point in the fixed-point value")\
X(IMMEDIATE, 0, 0, "IMM_MOV", 0, F(), "an immediate") \
+ Y(IMMEDIATE, imm_rotate, "IMM_ROT1", 0, F(FLD_rotate1), \
+ "a 2-bit rotation specifier for complex arithmetic operations") \
+ Y(IMMEDIATE, imm_rotate, "IMM_ROT2", 0, F(FLD_rotate2), \
+ "a 2-bit rotation specifier for complex arithmetic operations") \
+ Y(IMMEDIATE, imm_rotate, "IMM_ROT3", 0, F(FLD_rotate3), \
+ "a 1-bit rotation specifier for complex arithmetic operations") \
Y(COND, cond, "COND", 0, F(), "a condition") \
Y(COND, cond, "COND1", 0, F(), \
"one of the standard conditions, excluding AL and NV.") \