@@ -297,7 +297,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
goto found;
}
-#ifdef CONFIG_USER_ONLY
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. Since we're momentarily dropping
* tb_lock, there's a chance that our desired tb has been
@@ -311,14 +310,11 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
mmap_unlock();
goto found;
}
-#endif
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
-#ifdef CONFIG_USER_ONLY
mmap_unlock();
-#endif
found:
/* we add the TB in the virtual pc hash table */
@@ -523,7 +519,6 @@ static inline void cpu_handle_interrupt(CPUState *cpu,
}
if (unlikely(cpu->exit_request || replay_has_interrupt())) {
- cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
@@ -661,8 +656,5 @@ int cpu_exec(CPUState *cpu)
cc->cpu_exec_exit(cpu);
rcu_read_unlock();
- /* fail safe : never use current_cpu outside cpu_exec() */
- current_cpu = NULL;
-
return ret;
}
@@ -962,10 +962,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
qemu_cpu_kick(cpu);
while (!atomic_mb_read(&wi.done)) {
- CPUState *self_cpu = current_cpu;
-
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
- current_cpu = self_cpu;
}
}
@@ -1027,13 +1024,13 @@ static void flush_queued_work(CPUState *cpu)
static void qemu_wait_io_event_common(CPUState *cpu)
{
+ atomic_mb_set(&cpu->thread_kicked, false);
if (cpu->stop) {
cpu->stop = false;
cpu->stopped = true;
qemu_cond_broadcast(&qemu_pause_cond);
}
flush_queued_work(cpu);
- cpu->thread_kicked = false;
}
static void qemu_tcg_wait_io_event(CPUState *cpu)
@@ -1042,9 +1039,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
- CPU_FOREACH(cpu) {
- qemu_wait_io_event_common(cpu);
- }
+ qemu_wait_io_event_common(cpu);
}
static void qemu_kvm_wait_io_event(CPUState *cpu)
@@ -1111,6 +1106,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
+ current_cpu = cpu;
sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI);
@@ -1119,9 +1115,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
- current_cpu = cpu;
while (1) {
- current_cpu = NULL;
qemu_mutex_unlock_iothread();
do {
int sig;
@@ -1132,7 +1126,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
exit(1);
}
qemu_mutex_lock_iothread();
- current_cpu = cpu;
qemu_wait_io_event_common(cpu);
}
@@ -1249,7 +1242,7 @@ static void kick_tcg_thread(void *opaque)
qemu_cpu_kick_rr_cpu();
}
-static void *qemu_tcg_cpu_thread_fn(void *arg)
+static void *qemu_tcg_single_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
QEMUTimer *kick_timer;
@@ -1331,6 +1324,69 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
return NULL;
}
+/* Multi-threaded TCG
+ *
+ * In the multi-threaded case each vCPU has its own thread. The TLS
+ * variable current_cpu can be used deep in the code to find the
+ * current CPUState for a given thread.
+ */
+
+static void *qemu_tcg_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ /* process any pending work */
+ atomic_mb_set(&cpu->exit_request, 1);
+
+ while (1) {
+ bool sleep = false;
+
+ if (cpu_can_run(cpu)) {
+ int r = tcg_cpu_exec(cpu);
+ switch (r) {
+ case EXCP_DEBUG:
+ cpu_handle_guest_debug(cpu);
+ break;
+ case EXCP_HALTED:
+ /* during start-up the vCPU is reset and the thread is
+ * kicked several times. If we don't ensure we go back
+ * to sleep in the halted state we won't cleanly
+ * start-up when the vCPU is enabled.
+ */
+ sleep = true;
+ break;
+ default:
+ /* Ignore everything else? */
+ break;
+ }
+ } else {
+ sleep = true;
+ }
+
+ handle_icount_deadline();
+
+ if (sleep) {
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ }
+
+ atomic_mb_set(&cpu->exit_request, 0);
+ qemu_tcg_wait_io_event(cpu);
+ }
+
+ return NULL;
+}
+
static void qemu_cpu_kick_thread(CPUState *cpu)
{
#ifndef _WIN32
@@ -1355,7 +1411,7 @@ void qemu_cpu_kick(CPUState *cpu)
qemu_cond_broadcast(cpu->halt_cond);
if (tcg_enabled()) {
cpu_exit(cpu);
- /* Also ensure current RR cpu is kicked */
+ /* NOP unless doing single-thread RR */
qemu_cpu_kick_rr_cpu();
} else {
qemu_cpu_kick_thread(cpu);
@@ -1422,13 +1478,6 @@ void pause_all_vcpus(void)
if (qemu_in_vcpu_thread()) {
cpu_stop_current();
- if (!kvm_enabled()) {
- CPU_FOREACH(cpu) {
- cpu->stop = false;
- cpu->stopped = true;
- }
- return;
- }
}
while (!all_vcpus_paused()) {
@@ -1462,29 +1511,42 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- static QemuCond *tcg_halt_cond;
- static QemuThread *tcg_cpu_thread;
+ static QemuCond *single_tcg_halt_cond;
+ static QemuThread *single_tcg_cpu_thread;
- /* share a single thread for all cpus with TCG */
- if (!tcg_cpu_thread) {
+ if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
- tcg_halt_cond = cpu->halt_cond;
- snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
+
+ if (qemu_tcg_mttcg_enabled()) {
+ /* create a thread per vCPU with TCG (MTTCG) */
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
cpu->cpu_index);
- qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
- cpu, QEMU_THREAD_JOINABLE);
+
+ qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+
+ } else {
+ /* share a single thread for all cpus with TCG */
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
+ qemu_thread_create(cpu->thread, thread_name,
+ qemu_tcg_single_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+
+ single_tcg_halt_cond = cpu->halt_cond;
+ single_tcg_cpu_thread = cpu->thread;
+ }
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
while (!cpu->created) {
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
}
- tcg_cpu_thread = cpu->thread;
} else {
- cpu->thread = tcg_cpu_thread;
- cpu->halt_cond = tcg_halt_cond;
+ /* For non-MTTCG cases we share the thread */
+ cpu->thread = single_tcg_cpu_thread;
+ cpu->halt_cond = single_tcg_halt_cond;
}
}
@@ -207,6 +207,7 @@ static inline void cpu_exec_end(CPUState *cpu)
}
exclusive_idle();
pthread_mutex_unlock(&exclusive_lock);
+ cpu->exit_request = false;
}
void cpu_list_lock(void)
@@ -83,7 +83,11 @@
#endif
#ifdef CONFIG_SOFTMMU
-#define assert_memory_lock() do { /* nothing */ } while (0)
+#define assert_memory_lock() do { \
+ if (DEBUG_MEM_LOCKS) { \
+ g_assert(have_tb_lock); \
+ } \
+ } while (0)
#else
#define assert_memory_lock() do { \
if (DEBUG_MEM_LOCKS) { \
@@ -147,36 +151,28 @@ static void *l1_map[V_L1_SIZE];
TCGContext tcg_ctx;
/* translation block context */
-#ifdef CONFIG_USER_ONLY
__thread int have_tb_lock;
-#endif
void tb_lock(void)
{
-#ifdef CONFIG_USER_ONLY
assert(!have_tb_lock);
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock++;
-#endif
}
void tb_unlock(void)
{
-#ifdef CONFIG_USER_ONLY
assert(have_tb_lock);
have_tb_lock--;
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
-#endif
}
void tb_lock_reset(void)
{
-#ifdef CONFIG_USER_ONLY
if (have_tb_lock) {
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock = 0;
}
-#endif
}
#ifdef DEBUG_LOCKING
@@ -185,15 +181,11 @@ void tb_lock_reset(void)
#define DEBUG_TB_LOCKS 0
#endif
-#ifdef CONFIG_SOFTMMU
-#define assert_tb_lock() do { /* nothing */ } while (0)
-#else
#define assert_tb_lock() do { \
if (DEBUG_TB_LOCKS) { \
g_assert(have_tb_lock); \
} \
} while (0)
-#endif
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);