@@ -280,7 +280,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = entry->addr_write;
+ target_ulong tlb_addr = tlb_addr_write(entry);
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
@@ -295,7 +295,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
- tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
@@ -325,7 +325,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
cannot evict the first. */
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
entry2 = tlb_entry(env, mmu_idx, page2);
- if (!tlb_hit_page(entry2->addr_write, page2)
+ if (!tlb_hit_page(tlb_addr_write(entry2), page2)
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
@@ -358,7 +358,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = entry->addr_write;
+ target_ulong tlb_addr = tlb_addr_write(entry);
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
@@ -373,7 +373,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
- tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
@@ -403,7 +403,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
cannot evict the first. */
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
entry2 = tlb_entry(env, mmu_idx, page2);
- if (!tlb_hit_page(entry2->addr_write, page2)
+ if (!tlb_hit_page(tlb_addr_write(entry2), page2)
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
@@ -126,6 +126,15 @@ extern __thread uintptr_t helper_retaddr;
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
+static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
+{
+#if TCG_OVERSIZED_GUEST
+ return entry->addr_write;
+#else
+ return atomic_read(&entry->addr_write);
+#endif
+}
+
/* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
@@ -439,7 +448,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
tlb_addr = tlbentry->addr_read;
break;
case 1:
- tlb_addr = tlbentry->addr_write;
+ tlb_addr = tlb_addr_write(tlbentry);
break;
case 2:
tlb_addr = tlbentry->addr_code;
@@ -177,7 +177,7 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
addr = ptr;
mmu_idx = CPU_MMU_INDEX;
entry = tlb_entry(env, mmu_idx, addr);
- if (unlikely(entry->addr_write !=
+ if (unlikely(tlb_addr_write(entry) !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
oi = make_memop_idx(SHIFT, mmu_idx);
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
@@ -258,7 +258,7 @@ static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
target_ulong page)
{
return tlb_hit_page(tlb_entry->addr_read, page) ||
- tlb_hit_page(tlb_entry->addr_write, page) ||
+ tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
tlb_hit_page(tlb_entry->addr_code, page);
}
@@ -855,7 +855,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = entry->addr_write;
+ tlb_addr = tlb_addr_write(entry);
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
/* RAM access */
uintptr_t haddr = addr + entry->addend;
@@ -904,7 +904,14 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
- target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
+ target_ulong cmp;
+
+ /* elt_ofs might correspond to .addr_write, so use atomic_read */
+#if TCG_OVERSIZED_GUEST
+ cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
+#else
+ cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
+#endif
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@@ -977,7 +984,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- if (!tlb_hit(entry->addr_write, addr)) {
+ if (!tlb_hit(tlb_addr_write(entry), addr)) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
@@ -995,7 +1002,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
size_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlbe->addr_write;
+ target_ulong tlb_addr = tlb_addr_write(tlbe);
TCGMemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
@@ -1026,7 +1033,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
}
- tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */