@@ -140,7 +140,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
uintptr_t ret;
TranslationBlock *last_tb;
int tb_exit;
- uint8_t *tb_ptr = itb->tc_ptr;
+ uint8_t *tb_ptr = atomic_read(&itb->tc_ptr);
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %p [" TARGET_FMT_lx "] %s\n",
@@ -291,8 +291,8 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
- if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
- tb->flags != flags)) {
+ if (unlikely(!tb || atomic_read(&tb->pc) != pc || atomic_read(&tb->cs_base) != cs_base ||
+ atomic_read(&tb->flags) != flags)) {
tb = tb_htable_lookup(cpu, pc, cs_base, flags);
if (!tb) {
@@ -1198,10 +1198,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
gen_code_buf = tcg_ctx.code_gen_ptr;
- tb->tc_ptr = gen_code_buf;
- tb->cs_base = cs_base;
- tb->flags = flags;
- tb->cflags = cflags;
+ atomic_set(&tb->tc_ptr, gen_code_buf);
+ atomic_set(&tb->cs_base, cs_base);
+ atomic_set(&tb->flags, flags);
+ atomic_set(&tb->cflags, cflags);
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
To meet C11 semantics for shared data access we need to use relaxed atomic accesses. While the completion of data writes w.r.t reads is ensured by QHT's explicit barriers when a newly generated TB is inserted ThreadSanitizer will still complain. By using the relaxed accesses the same code gets generated but instrumentation does not have to worry about a potentially undefined interaction between plain loads/stores. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- cpu-exec.c | 6 +++--- translate-all.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) -- 2.9.3