@@ -2880,6 +2880,119 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
rm = VFP_SREG_M(insn);
}
+ if ((insn & 0x0f800e50) == 0x0e000a00) {
+ /* vsel */
+ uint32_t cc = (insn >> 20) & 3;
+
+ /* ARMv8 VFP. */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ return 1;
+ }
+
+ if (dp) {
+ TCGv_i64 ftmp1, ftmp2, ftmp3;
+ TCGv_i64 tmp, zero, zf, nf, vf;
+
+ zero = tcg_const_i64(0);
+
+ ftmp1 = tcg_temp_new_i64();
+ ftmp2 = tcg_temp_new_i64();
+ ftmp3 = tcg_temp_new_i64();
+
+ zf = tcg_temp_new_i64();
+ nf = tcg_temp_new_i64();
+ vf = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(zf, cpu_ZF);
+ tcg_gen_extu_i32_i64(nf, cpu_NF);
+ tcg_gen_extu_i32_i64(vf, cpu_VF);
+
+ tcg_gen_ld_f64(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f64(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+ switch (cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i64(TCG_COND_EQ, ftmp3, zf, zero,
+ ftmp1, ftmp2);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i64(TCG_COND_LT, ftmp3, vf, zero,
+ ftmp1, ftmp2);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+ ftmp1, ftmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i64(TCG_COND_NE, ftmp3, zf, zero,
+ ftmp1, ftmp2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+ ftmp3, ftmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ tcg_gen_st_f64(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i64(ftmp1);
+ tcg_temp_free_i64(ftmp2);
+ tcg_temp_free_i64(ftmp3);
+
+ tcg_temp_free_i64(zf);
+ tcg_temp_free_i64(nf);
+ tcg_temp_free_i64(vf);
+
+ tcg_temp_free_i64(zero);
+ } else {
+ TCGv_i32 ftmp1, ftmp2, ftmp3;
+ TCGv_i32 tmp, zero;
+
+ zero = tcg_const_i32(0);
+
+ ftmp1 = tcg_temp_new_i32();
+ ftmp2 = tcg_temp_new_i32();
+ ftmp3 = tcg_temp_new_i32();
+ tcg_gen_ld_f32(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f32(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+ switch (cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i32(TCG_COND_EQ, ftmp3, cpu_ZF, zero,
+ ftmp1, ftmp2);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i32(TCG_COND_LT, ftmp3, cpu_VF, zero,
+ ftmp1, ftmp2);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+ ftmp1, ftmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i32(TCG_COND_NE, ftmp3, cpu_ZF, zero,
+ ftmp1, ftmp2);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+ ftmp3, ftmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ }
+ tcg_gen_st_f32(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i32(ftmp1);
+ tcg_temp_free_i32(ftmp2);
+ tcg_temp_free_i32(ftmp3);
+
+ tcg_temp_free_i32(zero);
+ }
+
+ return 0;
+ }
+
veclen = s->vec_len;
if (op == 15 && rn > 3)
veclen = 0;
This adds support for the VSEL floating point selection instruction which was added in ARMv8. Signed-off-by: Will Newton <will.newton@linaro.org> --- target-arm/translate.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) Changes in v4: - Fix leak of temporaries - Extend condition values to 64bit in the DP case