@@ -4079,3 +4079,44 @@ float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
float_status *fpst = fpstp;
return float64_muladd(a, b, c, 0, fpst);
}
+
+/* ARMv8 VMAXNM/VMINNM */
+float32 VFP_HELPER(maxnm, s)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float32_is_quiet_nan(a) && !float32_is_quiet_nan(b))
+ return b;
+ else if (float32_is_quiet_nan(b) && !float32_is_quiet_nan(a))
+ return a;
+ return float32_max(a, b, fpst);
+}
+
+float64 VFP_HELPER(maxnm, d)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float64_is_quiet_nan(a) && !float64_is_quiet_nan(b))
+ return b;
+ else if (float64_is_quiet_nan(b) && !float64_is_quiet_nan(a))
+ return a;
+ return float64_max(a, b, fpst);
+}
+
+float32 VFP_HELPER(minnm, s)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float32_is_quiet_nan(a) && !float32_is_quiet_nan(b))
+ return b;
+ else if (float32_is_quiet_nan(b) && !float32_is_quiet_nan(a))
+ return a;
+ return float32_min(a, b, fpst);
+}
+
+float64 VFP_HELPER(minnm, d)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float64_is_quiet_nan(a) && !float64_is_quiet_nan(b))
+ return b;
+ else if (float64_is_quiet_nan(b) && !float64_is_quiet_nan(a))
+ return a;
+ return float64_min(a, b, fpst);
+}
@@ -132,6 +132,11 @@ DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env)
DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxnmd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxnms, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_minnmd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_minnms, f32, f32, f32, ptr)
+
DEF_HELPER_3(recps_f32, f32, f32, f32, env)
DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
DEF_HELPER_2(recpe_f32, f32, f32, env)
@@ -2738,6 +2738,49 @@ static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
}
return 0;
+ } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
+ /* vmaxnm/vminnm */
+ uint32_t vmin = (insn >> 6) & 1;
+ TCGv_ptr fpst;
+ fpst = get_fpstatus_ptr(0);
+ if (dp) {
+ TCGv_i64 ftmp1, ftmp2, ftmp3;
+
+ ftmp1 = tcg_temp_new_i64();
+ ftmp2 = tcg_temp_new_i64();
+ ftmp3 = tcg_temp_new_i64();
+
+ tcg_gen_ld_f64(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f64(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+ if (vmin)
+ gen_helper_vfp_minnmd(ftmp3, ftmp1, ftmp2, fpst);
+ else
+ gen_helper_vfp_maxnmd(ftmp3, ftmp1, ftmp2, fpst);
+ tcg_gen_st_f64(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i64(ftmp1);
+ tcg_temp_free_i64(ftmp2);
+ tcg_temp_free_i64(ftmp3);
+ } else {
+ TCGv_i32 ftmp1, ftmp2, ftmp3;
+
+ ftmp1 = tcg_temp_new_i32();
+ ftmp2 = tcg_temp_new_i32();
+ ftmp3 = tcg_temp_new_i32();
+
+ tcg_gen_ld_f32(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f32(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+ if (vmin)
+ gen_helper_vfp_minnms(ftmp3, ftmp1, ftmp2, fpst);
+ else
+ gen_helper_vfp_maxnms(ftmp3, ftmp1, ftmp2, fpst);
+ tcg_gen_st_f32(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i32(ftmp1);
+ tcg_temp_free_i32(ftmp2);
+ tcg_temp_free_i32(ftmp3);
+ }
+
+ tcg_temp_free_ptr(fpst);
+ return 0;
}
return 1;
}
This adds support for the ARMv8 floating point VMAXNM and VMINNM instructions. Signed-off-by: Will Newton <will.newton@linaro.org> --- target-arm/helper.c | 41 +++++++++++++++++++++++++++++++++++++++++ target-arm/helper.h | 5 +++++ target-arm/translate.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+) Changes in v6: - New patch