@@ -1124,7 +1124,6 @@ struct CPUArchState {
/* used to speed-up TLB assist handlers */
target_ulong nip; /* next instruction pointer */
- uint64_t retxh; /* high part of 128-bit helper return */
/* when a memory exception occurs, the access type is stored here */
int access_type;
@@ -810,12 +810,3 @@ DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
-
-#ifdef TARGET_PPC64
-DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
-DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
-DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
- void, env, tl, i64, i64, i32)
-DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
- void, env, tl, i64, i64, i32)
-#endif
@@ -367,54 +367,6 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
return i;
}
-#ifdef TARGET_PPC64
-uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
- uint32_t opidx)
-{
- Int128 ret;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
- env->retxh = int128_gethi(ret);
- return int128_getlo(ret);
-}
-
-uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
- uint32_t opidx)
-{
- Int128 ret;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
- env->retxh = int128_gethi(ret);
- return int128_getlo(ret);
-}
-
-void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t lo, uint64_t hi, uint32_t opidx)
-{
- Int128 val;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- val = int128_make128(lo, hi);
- cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
-}
-
-void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t lo, uint64_t hi, uint32_t opidx)
-{
- Int128 val;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- val = int128_make128(lo, hi);
- cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
-}
-#endif
-
/*****************************************************************************/
/* Altivec extension helpers */
#if HOST_BIG_ENDIAN
@@ -3757,6 +3757,7 @@ static void gen_lqarx(DisasContext *ctx)
{
int rd = rD(ctx->opcode);
TCGv EA, hi, lo;
+ TCGv_i128 t16;
if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
(rd == rB(ctx->opcode)))) {
@@ -3772,36 +3773,9 @@ static void gen_lqarx(DisasContext *ctx)
lo = cpu_gpr[rd + 1];
hi = cpu_gpr[rd];
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_ATOMIC128) {
- TCGv_i32 oi = tcg_temp_new_i32();
- if (ctx->le_mode) {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
- ctx->mem_idx));
- gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
- } else {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
- ctx->mem_idx));
- gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
- }
- tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- return;
- }
- } else if (ctx->le_mode) {
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16);
- tcg_gen_mov_tl(cpu_reserve, EA);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ);
- } else {
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16);
- tcg_gen_mov_tl(cpu_reserve, EA);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ);
- }
+ t16 = tcg_temp_new_i128();
+ tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
+ tcg_gen_extr_i128_i64(lo, hi, t16);
tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
@@ -72,7 +72,7 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
#if defined(TARGET_PPC64)
TCGv ea;
TCGv_i64 low_addr_gpr, high_addr_gpr;
- MemOp mop;
+ TCGv_i128 t16;
REQUIRE_INSNS_FLAGS(ctx, 64BX);
@@ -101,51 +101,14 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
low_addr_gpr = cpu_gpr[a->rt + 1];
high_addr_gpr = cpu_gpr[a->rt];
}
+ t16 = tcg_temp_new_i128();
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_ATOMIC128) {
- mop = DEF_MEMOP(MO_128);
- TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx));
- if (store) {
- if (ctx->le_mode) {
- gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr,
- high_addr_gpr, oi);
- } else {
- gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr,
- low_addr_gpr, oi);
-
- }
- } else {
- if (ctx->le_mode) {
- gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi);
- tcg_gen_ld_i64(high_addr_gpr, cpu_env,
- offsetof(CPUPPCState, retxh));
- } else {
- gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi);
- tcg_gen_ld_i64(low_addr_gpr, cpu_env,
- offsetof(CPUPPCState, retxh));
- }
- }
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- }
+ if (store) {
+ tcg_gen_concat_i64_i128(t16, low_addr_gpr, high_addr_gpr);
+ tcg_gen_qemu_st_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
} else {
- mop = DEF_MEMOP(MO_UQ);
- if (store) {
- tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
- } else {
- tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
- }
-
- gen_addr_add(ctx, ea, ea, 8);
-
- if (store) {
- tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
- } else {
- tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
- }
+ tcg_gen_qemu_ld_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
+ tcg_gen_extr_i128_i64(low_addr_gpr, high_addr_gpr, t16);
}
#else
qemu_build_not_reached();