@@ -31,7 +31,7 @@ void tlb_unprotect_code(ram_addr_t ram_addr);
#endif
#ifndef CONFIG_USER_ONLY
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
+void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length);
void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
#endif
@@ -912,7 +912,7 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
* We must take tlb_c.lock to avoid racing with another vCPU update. The only
* thing actually updated is the target TLB entry ->addr_write flags.
*/
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
+void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
{
int mmu_idx;
@@ -923,12 +923,12 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
for (i = 0; i < n; i++) {
tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
- start1, length);
+ start, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
- start1, length);
+ start, length);
}
}
qemu_spin_unlock(&cpu->neg.tlb.c.lock);