mirror of https://gitee.com/openkylin/qemu.git
*_run_on_cpu: introduce run_on_cpu_data type
This changes the *_run_on_cpu APIs (and helpers) to pass data in a run_on_cpu_data type instead of a plain void *. This is because we sometimes want to pass a target address (target_ulong) and this fails on 32 bit hosts emulating 64 bit guests. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20161027151030.20863-24-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
12e9700d7a
commit
14e6fe12a7
|
@ -109,7 +109,7 @@ void cpu_list_remove(CPUState *cpu)
|
|||
struct qemu_work_item {
|
||||
struct qemu_work_item *next;
|
||||
run_on_cpu_func func;
|
||||
void *data;
|
||||
run_on_cpu_data data;
|
||||
bool free, exclusive, done;
|
||||
};
|
||||
|
||||
|
@ -129,7 +129,7 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
|
|||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
|
||||
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
|
||||
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
|
||||
QemuMutex *mutex)
|
||||
{
|
||||
struct qemu_work_item wi;
|
||||
|
@ -154,7 +154,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
|
||||
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
|
||||
{
|
||||
struct qemu_work_item *wi;
|
||||
|
||||
|
@ -296,7 +296,8 @@ void cpu_exec_end(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
|
||||
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
struct qemu_work_item *wi;
|
||||
|
||||
|
|
7
cpus.c
7
cpus.c
|
@ -556,7 +556,7 @@ static const VMStateDescription vmstate_timers = {
|
|||
}
|
||||
};
|
||||
|
||||
static void cpu_throttle_thread(CPUState *cpu, void *opaque)
|
||||
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
|
||||
{
|
||||
double pct;
|
||||
double throttle_ratio;
|
||||
|
@ -587,7 +587,8 @@ static void cpu_throttle_timer_tick(void *opaque)
|
|||
}
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
|
||||
async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
|
||||
async_run_on_cpu(cpu, cpu_throttle_thread,
|
||||
RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -914,7 +915,7 @@ void qemu_init_cpu_loop(void)
|
|||
qemu_thread_get_self(&io_thread);
|
||||
}
|
||||
|
||||
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
|
||||
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
|
||||
{
|
||||
do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
|
||||
}
|
||||
|
|
|
@ -133,9 +133,9 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
|
|||
}
|
||||
}
|
||||
|
||||
static void kvm_apic_put(CPUState *cs, void *data)
|
||||
static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
|
||||
{
|
||||
APICCommonState *s = data;
|
||||
APICCommonState *s = data.host_ptr;
|
||||
struct kvm_lapic_state kapic;
|
||||
int ret;
|
||||
|
||||
|
@ -151,12 +151,12 @@ static void kvm_apic_put(CPUState *cs, void *data)
|
|||
|
||||
static void kvm_apic_post_load(APICCommonState *s)
|
||||
{
|
||||
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
|
||||
run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
|
||||
}
|
||||
|
||||
static void do_inject_external_nmi(CPUState *cpu, void *data)
|
||||
static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
|
||||
{
|
||||
APICCommonState *s = data;
|
||||
APICCommonState *s = data.host_ptr;
|
||||
uint32_t lvt;
|
||||
int ret;
|
||||
|
||||
|
@ -174,7 +174,7 @@ static void do_inject_external_nmi(CPUState *cpu, void *data)
|
|||
|
||||
static void kvm_apic_external_nmi(APICCommonState *s)
|
||||
{
|
||||
run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
|
||||
run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
|
||||
}
|
||||
|
||||
static void kvm_send_msi(MSIMessage *msg)
|
||||
|
@ -213,7 +213,7 @@ static void kvm_apic_reset(APICCommonState *s)
|
|||
/* Not used by KVM, which uses the CPU mp_state instead. */
|
||||
s->wait_for_sipi = 0;
|
||||
|
||||
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
|
||||
run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
|
||||
}
|
||||
|
||||
static void kvm_apic_realize(DeviceState *dev, Error **errp)
|
||||
|
|
|
@ -487,10 +487,9 @@ typedef struct VAPICEnableTPRReporting {
|
|||
bool enable;
|
||||
} VAPICEnableTPRReporting;
|
||||
|
||||
static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
|
||||
static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
|
||||
{
|
||||
VAPICEnableTPRReporting *info = data;
|
||||
|
||||
VAPICEnableTPRReporting *info = data.host_ptr;
|
||||
apic_enable_tpr_access_reporting(info->apic, info->enable);
|
||||
}
|
||||
|
||||
|
@ -505,7 +504,7 @@ static void vapic_enable_tpr_reporting(bool enable)
|
|||
CPU_FOREACH(cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
info.apic = cpu->apic_state;
|
||||
run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
|
||||
run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -738,9 +737,9 @@ static void vapic_realize(DeviceState *dev, Error **errp)
|
|||
nb_option_roms++;
|
||||
}
|
||||
|
||||
static void do_vapic_enable(CPUState *cs, void *data)
|
||||
static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
|
||||
{
|
||||
VAPICROMState *s = data;
|
||||
VAPICROMState *s = data.host_ptr;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
|
||||
static const uint8_t enabled = 1;
|
||||
|
@ -762,7 +761,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,
|
|||
|
||||
if (s->state == VAPIC_ACTIVE) {
|
||||
if (smp_cpus == 1) {
|
||||
run_on_cpu(first_cpu, do_vapic_enable, s);
|
||||
run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
|
||||
} else {
|
||||
zero = g_malloc0(s->rom_state.vapic_size);
|
||||
cpu_physical_memory_write(s->vapic_paddr, zero,
|
||||
|
|
|
@ -84,11 +84,11 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
|
|||
env->tlb_dirty = true;
|
||||
}
|
||||
|
||||
static void spin_kick(CPUState *cs, void *data)
|
||||
static void spin_kick(CPUState *cs, run_on_cpu_data data)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
SpinInfo *curspin = data;
|
||||
SpinInfo *curspin = data.host_ptr;
|
||||
hwaddr map_size = 64 * 1024 * 1024;
|
||||
hwaddr map_start;
|
||||
|
||||
|
@ -147,7 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
|
|||
|
||||
if (!(ldq_p(&curspin->addr) & 1)) {
|
||||
/* run CPU */
|
||||
run_on_cpu(cpu, spin_kick, curspin);
|
||||
run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2148,7 +2148,7 @@ static void spapr_machine_finalizefn(Object *obj)
|
|||
g_free(spapr->kvm_type);
|
||||
}
|
||||
|
||||
static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg)
|
||||
static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
cpu_synchronize_state(cs);
|
||||
ppc_cpu_do_system_reset(cs);
|
||||
|
@ -2159,7 +2159,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
|
|||
CPUState *cs;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL);
|
||||
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@ struct SPRSyncState {
|
|||
target_ulong mask;
|
||||
};
|
||||
|
||||
static void do_spr_sync(CPUState *cs, void *arg)
|
||||
static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
struct SPRSyncState *s = arg;
|
||||
struct SPRSyncState *s = arg.host_ptr;
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
|
@ -37,7 +37,7 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
|
|||
.value = value,
|
||||
.mask = mask
|
||||
};
|
||||
run_on_cpu(cs, do_spr_sync, &s);
|
||||
run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
|
||||
}
|
||||
|
||||
static bool has_spr(PowerPCCPU *cpu, int spr)
|
||||
|
@ -911,10 +911,10 @@ typedef struct {
|
|||
Error *err;
|
||||
} SetCompatState;
|
||||
|
||||
static void do_set_compat(CPUState *cs, void *arg)
|
||||
static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
SetCompatState *s = arg;
|
||||
SetCompatState *s = arg.host_ptr;
|
||||
|
||||
cpu_synchronize_state(cs);
|
||||
ppc_set_compat(cpu, s->cpu_version, &s->err);
|
||||
|
@ -1017,7 +1017,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
|
|||
.err = NULL,
|
||||
};
|
||||
|
||||
run_on_cpu(cs, do_set_compat, &s);
|
||||
run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
|
||||
|
||||
if (s.err) {
|
||||
error_report_err(s.err);
|
||||
|
|
|
@ -231,7 +231,25 @@ struct kvm_run;
|
|||
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
||||
|
||||
/* work queue */
|
||||
typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);
|
||||
|
||||
/* The union type allows passing of 64 bit target pointers on 32 bit
|
||||
* hosts in a single parameter
|
||||
*/
|
||||
typedef union {
|
||||
int host_int;
|
||||
unsigned long host_ulong;
|
||||
void *host_ptr;
|
||||
vaddr target_ptr;
|
||||
} run_on_cpu_data;
|
||||
|
||||
#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
|
||||
#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
|
||||
#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
|
||||
#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
|
||||
#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
|
||||
|
||||
typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
|
||||
|
||||
struct qemu_work_item;
|
||||
|
||||
/**
|
||||
|
@ -637,7 +655,7 @@ bool cpu_is_stopped(CPUState *cpu);
|
|||
*
|
||||
* Used internally in the implementation of run_on_cpu.
|
||||
*/
|
||||
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
|
||||
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
|
||||
QemuMutex *mutex);
|
||||
|
||||
/**
|
||||
|
@ -648,7 +666,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
|
|||
*
|
||||
* Schedules the function @func for execution on the vCPU @cpu.
|
||||
*/
|
||||
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
|
||||
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
|
||||
|
||||
/**
|
||||
* async_run_on_cpu:
|
||||
|
@ -658,7 +676,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
|
|||
*
|
||||
* Schedules the function @func for execution on the vCPU @cpu asynchronously.
|
||||
*/
|
||||
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
|
||||
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
|
||||
|
||||
/**
|
||||
* async_safe_run_on_cpu:
|
||||
|
@ -672,7 +690,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
|
|||
* Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
|
||||
* BQL.
|
||||
*/
|
||||
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
|
||||
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
|
||||
|
||||
/**
|
||||
* qemu_get_cpu:
|
||||
|
|
20
kvm-all.c
20
kvm-all.c
|
@ -1856,7 +1856,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
|
|||
s->coalesced_flush_in_progress = false;
|
||||
}
|
||||
|
||||
static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
|
||||
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->kvm_vcpu_dirty) {
|
||||
kvm_arch_get_registers(cpu);
|
||||
|
@ -1867,11 +1867,11 @@ static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
|
|||
void kvm_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->kvm_vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL);
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
|
||||
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
|
||||
cpu->kvm_vcpu_dirty = false;
|
||||
|
@ -1879,10 +1879,10 @@ static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
|
|||
|
||||
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL);
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
|
||||
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
|
||||
cpu->kvm_vcpu_dirty = false;
|
||||
|
@ -1890,7 +1890,7 @@ static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
|
|||
|
||||
void kvm_cpu_synchronize_post_init(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL);
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
int kvm_cpu_exec(CPUState *cpu)
|
||||
|
@ -2218,9 +2218,10 @@ struct kvm_set_guest_debug_data {
|
|||
int err;
|
||||
};
|
||||
|
||||
static void kvm_invoke_set_guest_debug(CPUState *cpu, void *data)
|
||||
static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
|
||||
{
|
||||
struct kvm_set_guest_debug_data *dbg_data = data;
|
||||
struct kvm_set_guest_debug_data *dbg_data =
|
||||
(struct kvm_set_guest_debug_data *) data.host_ptr;
|
||||
|
||||
dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
|
||||
&dbg_data->dbg);
|
||||
|
@ -2237,7 +2238,8 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
|
|||
}
|
||||
kvm_arch_update_guest_debug(cpu, &data.dbg);
|
||||
|
||||
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
|
||||
run_on_cpu(cpu, kvm_invoke_set_guest_debug,
|
||||
RUN_ON_CPU_HOST_PTR(&data));
|
||||
return data.err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1121,9 +1121,9 @@ typedef struct MCEInjectionParams {
|
|||
int flags;
|
||||
} MCEInjectionParams;
|
||||
|
||||
static void do_inject_x86_mce(CPUState *cs, void *data)
|
||||
static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
|
||||
{
|
||||
MCEInjectionParams *params = data;
|
||||
MCEInjectionParams *params = data.host_ptr;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *cenv = &cpu->env;
|
||||
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
|
||||
|
@ -1230,7 +1230,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
return;
|
||||
}
|
||||
|
||||
run_on_cpu(cs, do_inject_x86_mce, ¶ms);
|
||||
run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
|
||||
if (flags & MCE_INJECT_BROADCAST) {
|
||||
CPUState *other_cs;
|
||||
|
||||
|
@ -1243,7 +1243,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
if (other_cs == cs) {
|
||||
continue;
|
||||
}
|
||||
run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
|
||||
run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ static int kvm_get_tsc(CPUState *cs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg)
|
||||
static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
kvm_get_tsc(cpu);
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ void kvm_synchronize_all_tsc(void)
|
|||
|
||||
if (kvm_enabled()) {
|
||||
CPU_FOREACH(cpu) {
|
||||
run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL);
|
||||
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ static void s390_cpu_machine_reset_cb(void *opaque)
|
|||
{
|
||||
S390CPU *cpu = opaque;
|
||||
|
||||
run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL);
|
||||
run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -220,7 +220,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
s390_cpu_gdb_init(cs);
|
||||
qemu_init_vcpu(cs);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
run_on_cpu(cs, s390_do_cpu_full_reset, NULL);
|
||||
run_on_cpu(cs, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
|
||||
#else
|
||||
cpu_reset(cs);
|
||||
#endif
|
||||
|
|
|
@ -502,13 +502,13 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
|
|||
#define decode_basedisp_rs decode_basedisp_s
|
||||
|
||||
/* helper functions for run_on_cpu() */
|
||||
static inline void s390_do_cpu_reset(CPUState *cs, void *arg)
|
||||
static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
|
||||
|
||||
scc->cpu_reset(cs);
|
||||
}
|
||||
static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg)
|
||||
static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
cpu_reset(cs);
|
||||
}
|
||||
|
|
|
@ -1607,7 +1607,7 @@ int kvm_s390_cpu_restart(S390CPU *cpu)
|
|||
{
|
||||
SigpInfo si = {};
|
||||
|
||||
run_on_cpu(CPU(cpu), sigp_restart, &si);
|
||||
run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
|
||||
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1683,31 +1683,31 @@ static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
|
|||
|
||||
switch (order) {
|
||||
case SIGP_START:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_start, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_STOP:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_RESTART:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_STOP_STORE_STATUS:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_STORE_STATUS_ADDR:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_STORE_ADTL_STATUS:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_SET_PREFIX:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_INITIAL_CPU_RESET:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
case SIGP_CPU_RESET:
|
||||
run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
|
||||
run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
|
||||
break;
|
||||
default:
|
||||
DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
|
||||
|
|
|
@ -126,7 +126,7 @@ static int modified_clear_reset(S390CPU *cpu)
|
|||
pause_all_vcpus();
|
||||
cpu_synchronize_all_states();
|
||||
CPU_FOREACH(t) {
|
||||
run_on_cpu(t, s390_do_cpu_full_reset, NULL);
|
||||
run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
s390_cmma_reset();
|
||||
subsystem_reset();
|
||||
|
@ -145,7 +145,7 @@ static int load_normal_reset(S390CPU *cpu)
|
|||
pause_all_vcpus();
|
||||
cpu_synchronize_all_states();
|
||||
CPU_FOREACH(t) {
|
||||
run_on_cpu(t, s390_do_cpu_reset, NULL);
|
||||
run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
s390_cmma_reset();
|
||||
subsystem_reset();
|
||||
|
|
|
@ -917,16 +917,14 @@ static void page_flush_tb(void)
|
|||
}
|
||||
|
||||
/* flush all the translation blocks */
|
||||
static void do_tb_flush(CPUState *cpu, void *data)
|
||||
static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
||||
{
|
||||
unsigned tb_flush_req = (unsigned) (uintptr_t) data;
|
||||
|
||||
tb_lock();
|
||||
|
||||
/* If it's already been done on request of another CPU,
|
||||
/* If it is already been done on request of another CPU,
|
||||
* just retry.
|
||||
*/
|
||||
if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
|
||||
if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -967,8 +965,9 @@ done:
|
|||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
|
||||
unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush,
|
||||
RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue