@@ -73,6 +73,25 @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
+/* flush_all_helper: run fn across all cpus
+ *
+ * If the wait flag is set then the src cpu's helper will be queued as
+ * "safe" work and the loop exited creating a synchronisation point
+ * where all queued work will be finished before execution starts
+ * again.
+ */
+static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
+ run_on_cpu_data d)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (cpu != src) {
+ async_run_on_cpu(cpu, fn, d);
+ }
+ }
+}
+
/* statistics */
int tlb_flush_count;
@@ -128,6 +147,19 @@ void tlb_flush(CPUState *cpu)
}
}
+void tlb_flush_all_cpus(CPUState *src_cpu)
+{
+ flush_all_helper(src_cpu, tlb_flush_global_async_work, RUN_ON_CPU_NULL);
+ tlb_flush_global_async_work(src_cpu, RUN_ON_CPU_NULL);
+}
+
+void QEMU_NORETURN tlb_flush_all_cpus_synced(CPUState *src_cpu)
+{
+ flush_all_helper(src_cpu, tlb_flush_global_async_work, RUN_ON_CPU_NULL);
+ tlb_flush_global_async_work(src_cpu, RUN_ON_CPU_NULL);
+ cpu_loop_exit(src_cpu);
+}
+
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
@@ -178,6 +210,30 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
}
}
+void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
+
+ tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+ fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
+}
+
+void QEMU_NORETURN tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
+
+ tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+ async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+ cpu_loop_exit(src_cpu);
+}
+
+
+
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->addr_read &
@@ -317,14 +373,56 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
}
}
-void tlb_flush_page_all(target_ulong addr)
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
+ uint16_t idxmap)
{
- CPUState *cpu;
+ const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
+ target_ulong addr_and_mmu_idx;
- CPU_FOREACH(cpu) {
- async_run_on_cpu(cpu, tlb_flush_page_async_work,
- RUN_ON_CPU_TARGET_PTR(addr));
- }
+ tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
+
+ /* This should already be page aligned */
+ addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
+ addr_and_mmu_idx |= idxmap;
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+}
+
+void QEMU_NORETURN tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
+ target_ulong addr_and_mmu_idx;
+
+ tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
+
+ /* This should already be page aligned */
+ addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
+ addr_and_mmu_idx |= idxmap;
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ cpu_loop_exit(src_cpu);
+}
+
+void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+{
+ const run_on_cpu_func fn = tlb_flush_page_async_work;
+
+ flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
+ fn(src, RUN_ON_CPU_TARGET_PTR(addr));
+}
+
+void QEMU_NORETURN tlb_flush_page_all_cpus_synced(CPUState *src,
+ target_ulong addr)
+{
+ const run_on_cpu_func fn = tlb_flush_page_async_work;
+
+ flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
+ async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
+ cpu_loop_exit(src);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
@@ -93,6 +93,27 @@ void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
*/
void tlb_flush_page(CPUState *cpu, target_ulong addr);
/**
+ * tlb_flush_page_all_cpus:
+ * @cpu: src CPU of the flush
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
+void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+/**
+ * tlb_flush_page_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes like tlb_flush_page_all_cpus except this function does
+ * not return and will restart the run-loop once all CPUs have
+ * executed the flush.
+ */
+void QEMU_NORETURN tlb_flush_page_all_cpus_synced(CPUState *src,
+ target_ulong addr);
+/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
*
@@ -103,6 +124,19 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
*/
void tlb_flush(CPUState *cpu);
/**
+ * tlb_flush_all_cpus:
+ * @cpu: src CPU of the flush
+ */
+void tlb_flush_all_cpus(CPUState *src_cpu);
+/**
+ * tlb_flush_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ *
+ * Like tlb_flush_all_cpus except this function does not return and
+ * will restart the run-loop once all CPUs have executed the flush.
+ */
+void QEMU_NORETURN tlb_flush_all_cpus_synced(CPUState *src_cpu);
+/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
@@ -114,8 +148,34 @@ void tlb_flush(CPUState *cpu);
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
uint16_t idxmap);
/**
+ * tlb_flush_page_by_mmuidx_all_cpus:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of all CPUs, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap);
+/**
+ * tlb_flush_page_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of all CPUs, for the specified MMU
+ * indexes like tlb_flush_page_by_mmuidx_all_cpus except this function
+ * does not return and will restart the run-loop once all CPUs have
+ * executed the flush.
+ */
+void QEMU_NORETURN tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap);
+/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
+ * @wait: If true ensure synchronisation by exiting the cpu_loop
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from the TLB of the specified CPU, for the specified
@@ -123,6 +183,27 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
*/
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
/**
+ * tlb_flush_by_mmuidx_all_cpus:
+ * @cpu: Originating CPU of the flush
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from all TLBs of all CPUs, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_flush_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from all TLBs of all CPUs, for the specified
+ * MMU indexes like tlb_flush_by_mmuidx_all_cpus except this function
+ * does not return and will restart the run-loop once all CPUs have
+ * executed the flush.
+ */
+void QEMU_NORETURN tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ uint16_t idxmap);
+/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
* @vaddr: virtual address of page to add entry for
@@ -159,16 +240,26 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr);
-void tlb_flush_page_all(target_ulong addr);
#else
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
-
+static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+{
+}
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
+ target_ulong addr)
+{
+}
static inline void tlb_flush(CPUState *cpu)
{
}
-
+static inline void tlb_flush_all_cpus(CPUState *src_cpu)
+{
+}
+static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
+{
+}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
target_ulong addr, uint16_t idxmap)
{
@@ -177,6 +268,23 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
+static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
+{
+}
+static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ uint16_t idxmap)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */