@@ -92,9 +92,6 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
}
}
-/* statistics */
-int tlb_flush_count;
-
/* This is OK because CPU architectures generally permit an
* implementation to drop entries from the TLB at any time, so
* flushing more entries than required is only an efficiency issue,
@@ -112,7 +109,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
}
assert_cpu_is_self(cpu);
- tlb_debug("(count: %d)\n", tlb_flush_count++);
tb_lock();
@@ -23,7 +23,6 @@
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
-extern int tlb_flush_count;
#endif
#endif
@@ -1927,7 +1927,6 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
cpu_fprintf(f, "TB invalidate count %d\n",
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
- cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
tcg_dump_info(f, cpu_fprintf);
tb_unlock();
Flushing of the CPU TLB is no longer a simple count. The ratio of self-flushes to cross-flushes and if they need to synchronise across vCPUs has more of an impact. To correctly capture this we'll replace the simple count with trace points in a future patch. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- cputlb.c | 4 ---- include/exec/cputlb.h | 1 - translate-all.c | 1 - 3 files changed, 6 deletions(-) -- 2.11.0