@@ -2731,3 +2731,8 @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
@@ -1568,3 +1568,9 @@ SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
# SVE2 crypto constructive binary operations
SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0
RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
+
+### SVE2 floating-point multiply-add long
+FMLALB_zzzw 01100100 .. 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm
+FMLALT_zzzw 01100100 .. 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm
+FMLSLB_zzzw 01100100 .. 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm
+FMLSLT_zzzw 01100100 .. 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm
@@ -7622,3 +7622,30 @@ void HELPER(fmmla_d)(void *vd, void *va, void *vn, void *vm,
d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
}
}
+
+void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ intptr_t sel1 = simd_data(desc) * sizeof(float16);
+ for (i = 0; i < opr_sz; i += sizeof(float32)) {
+ float32 nn = *(float16 *)(vn + H1_2(i + sel1));
+ float32 mm = *(float16 *)(vm + H1_2(i + sel1));
+ float32 aa = *(float32 *)(va + H1_4(i));
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ }
+}
+
+void HELPER(sve2_fmlsl_zzzw_s)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ intptr_t sel1 = simd_data(desc) * sizeof(float16);
+ for (i = 0; i < opr_sz; i += sizeof(float32)) {
+ float32 nn = *(float16 *)(vn + H1_2(i + sel1));
+ float32 mm = *(float16 *)(vm + H1_2(i + sel1));
+ float32 aa = *(float32 *)(va + H1_4(i));
+ nn = float32_set_sign(nn, float32_is_neg(nn) ^ 1);
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ }
+}
@@ -8253,3 +8253,61 @@ static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a)
}
return true;
}
+
+/*
+ * SVE2 Floating Point Multiply-Add Vector Group
+ */
+static bool do_sve2_zzzz_fp(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4_ptr *fn, int data)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = get_fpstatus_ptr(a->esz == MO_16);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, data, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_sve2_fmlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s) || a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zzzz_fp(s, a, gen_helper_sve2_fmlal_zzzw_s, sel);
+}
+
+static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_fmlal_zzzw(s, a, false);
+}
+
+static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_fmlal_zzzw(s, a, true);
+}
+
+static bool do_sve2_fmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s) || a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zzzz_fp(s, a, gen_helper_sve2_fmlsl_zzzw_s, sel);
+}
+
+static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_fmlsl_zzzw(s, a, false);
+}
+
+static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_fmlsl_zzzw(s, a, true);
+}
Implements FMLALB, FMLALT, FMLSLB, FMLSLT Signed-off-by: Stephen Long <steplong@quicinc.com> --- target/arm/helper-sve.h | 5 ++++ target/arm/sve.decode | 6 ++++ target/arm/sve_helper.c | 27 ++++++++++++++++++ target/arm/translate-sve.c | 58 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+)