mirror of https://gitee.com/openkylin/qemu.git
accel/tcg: Register a force_rcu notifier
A TCG vCPU doing a busy loop systematicaly hangs the QEMU monitor
if the user passes 'device_add' without argument. This is because
drain_cpu_all() which is called from qmp_device_add() cannot return
if readers don't exit read-side critical sections. That is typically
what busy-looping TCG vCPUs do:
int cpu_exec(CPUState *cpu)
{
[...]
rcu_read_lock();
[...]
while (!cpu_handle_exception(cpu, &ret)) {
// Busy loop keeps vCPU here
}
[...]
rcu_read_unlock();
return ret;
}
For MTTCG, have all vCPU threads register a force_rcu notifier that will
kick them out of the loop using async_run_on_cpu(). The notifier is called
with the rcu_registry_lock mutex held, using async_run_on_cpu() ensures
there are no deadlocks.
For RR, a single thread runs all vCPUs. Just register a single notifier
that kicks the current vCPU to the next one.
For MTTCG:
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
For RR:
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Fixes: 7bed89958b
("device_core: use drain_call_rcu in in qmp_device_add")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/650
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20211109183523.47726-3-groug@kaod.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ef149763a8
commit
dd47a8f654
|
@ -28,6 +28,7 @@
|
||||||
#include "sysemu/tcg.h"
|
#include "sysemu/tcg.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
#include "qemu/notify.h"
|
||||||
#include "qemu/guest-random.h"
|
#include "qemu/guest-random.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
|
@ -35,6 +36,26 @@
|
||||||
#include "tcg-accel-ops.h"
|
#include "tcg-accel-ops.h"
|
||||||
#include "tcg-accel-ops-mttcg.h"
|
#include "tcg-accel-ops-mttcg.h"
|
||||||
|
|
||||||
|
typedef struct MttcgForceRcuNotifier {
|
||||||
|
Notifier notifier;
|
||||||
|
CPUState *cpu;
|
||||||
|
} MttcgForceRcuNotifier;
|
||||||
|
|
||||||
|
static void do_nothing(CPUState *cpu, run_on_cpu_data d)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mttcg_force_rcu(Notifier *notify, void *data)
|
||||||
|
{
|
||||||
|
CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called with rcu_registry_lock held, using async_run_on_cpu() ensures
|
||||||
|
* that there are no deadlocks.
|
||||||
|
*/
|
||||||
|
async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the multi-threaded case each vCPU has its own thread. The TLS
|
* In the multi-threaded case each vCPU has its own thread. The TLS
|
||||||
* variable current_cpu can be used deep in the code to find the
|
* variable current_cpu can be used deep in the code to find the
|
||||||
|
@ -43,12 +64,16 @@
|
||||||
|
|
||||||
static void *mttcg_cpu_thread_fn(void *arg)
|
static void *mttcg_cpu_thread_fn(void *arg)
|
||||||
{
|
{
|
||||||
|
MttcgForceRcuNotifier force_rcu;
|
||||||
CPUState *cpu = arg;
|
CPUState *cpu = arg;
|
||||||
|
|
||||||
assert(tcg_enabled());
|
assert(tcg_enabled());
|
||||||
g_assert(!icount_enabled());
|
g_assert(!icount_enabled());
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
force_rcu.notifier.notify = mttcg_force_rcu;
|
||||||
|
force_rcu.cpu = cpu;
|
||||||
|
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||||
tcg_register_thread();
|
tcg_register_thread();
|
||||||
|
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
|
@ -100,6 +125,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||||
|
|
||||||
tcg_cpus_destroy(cpu);
|
tcg_cpus_destroy(cpu);
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
|
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "sysemu/tcg.h"
|
#include "sysemu/tcg.h"
|
||||||
#include "sysemu/replay.h"
|
#include "sysemu/replay.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
#include "qemu/notify.h"
|
||||||
#include "qemu/guest-random.h"
|
#include "qemu/guest-random.h"
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
|
|
||||||
|
@ -133,6 +134,11 @@ static void rr_deal_with_unplugged_cpus(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rr_force_rcu(Notifier *notify, void *data)
|
||||||
|
{
|
||||||
|
rr_kick_next_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the single-threaded case each vCPU is simulated in turn. If
|
* In the single-threaded case each vCPU is simulated in turn. If
|
||||||
* there is more than a single vCPU we create a simple timer to kick
|
* there is more than a single vCPU we create a simple timer to kick
|
||||||
|
@ -143,10 +149,13 @@ static void rr_deal_with_unplugged_cpus(void)
|
||||||
|
|
||||||
static void *rr_cpu_thread_fn(void *arg)
|
static void *rr_cpu_thread_fn(void *arg)
|
||||||
{
|
{
|
||||||
|
Notifier force_rcu;
|
||||||
CPUState *cpu = arg;
|
CPUState *cpu = arg;
|
||||||
|
|
||||||
assert(tcg_enabled());
|
assert(tcg_enabled());
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
force_rcu.notify = rr_force_rcu;
|
||||||
|
rcu_add_force_rcu_notifier(&force_rcu);
|
||||||
tcg_register_thread();
|
tcg_register_thread();
|
||||||
|
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
|
@ -255,6 +264,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||||
rr_deal_with_unplugged_cpus();
|
rr_deal_with_unplugged_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_remove_force_rcu_notifier(&force_rcu);
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue